From 95af9fc017182284cfc1844f6f85520ffcafb175 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 13 Feb 2023 13:15:45 +0000 Subject: [PATCH 01/33] WIP --- .github/workflows/publish.yaml | 3 +- CHANGELOG.md | 27 +++ LocalPost.sln | 39 ++++- README.md | 10 ++ .../AmazonSqsApp.csproj} | 0 .../Controllers/WeatherForecastController.cs | 2 +- .../{SampleWebApp => AmazonSqsApp}/Program.cs | 2 +- .../Properties/launchSettings.json | 0 .../WeatherForecast.cs | 2 +- .../appsettings.Development.json | 0 .../appsettings.json | 0 samples/AzureQueueApp/AzureQueueApp.csproj | 19 ++ .../Controllers/WeatherForecastController.cs | 32 ++++ samples/AzureQueueApp/Program.cs | 36 ++++ .../Properties/launchSettings.json | 31 ++++ samples/AzureQueueApp/WeatherForecast.cs | 12 ++ .../appsettings.Development.json | 8 + samples/AzureQueueApp/appsettings.json | 9 + .../Controllers/WeatherForecastController.cs | 32 ++++ .../KafkaConsumerApp/KafkaConsumerApp.csproj | 13 ++ samples/KafkaConsumerApp/Program.cs | 25 +++ .../Properties/launchSettings.json | 31 ++++ samples/KafkaConsumerApp/WeatherForecast.cs | 12 ++ .../appsettings.Development.json | 8 + samples/KafkaConsumerApp/appsettings.json | 9 + .../AzureQueues.cs | 20 +++ .../ConsumerOptions.cs | 18 ++ .../DependencyInjection/HealthChecks.cs | 23 +++ .../ServiceRegistration.cs | 37 ++++ .../IMessageHandler.cs | 7 + .../LocalPost.Azure.QueueConsumer.csproj | 55 ++++++ .../MessagePuller.cs | 65 +++++++ src/LocalPost.KafkaConsumer/ConsumeContext.cs | 10 ++ src/LocalPost.KafkaConsumer/Consumer.cs | 94 ++++++++++ .../ConsumerBuilder.cs | 89 ++++++++++ .../ConsumerOptions.cs | 17 ++ .../ServiceCollectionExtensions.cs | 25 +++ src/LocalPost.KafkaConsumer/HealthCheck.cs | 22 +++ .../IMessageHandler.cs | 7 + .../LocalPost.KafkaConsumer.csproj | 55 ++++++ .../LocalPost.RabbitMqConsumer.csproj | 55 ++++++ .../ServiceCollectionExtensions.cs | 4 +- src/LocalPost.SnsPublisher/Sender.cs | 12 +- src/LocalPost.SqsConsumer/ConsumeContext.cs | 10 ++ src/LocalPost.SqsConsumer/Consumer.cs | 57 ++++++ src/LocalPost.SqsConsumer/ConsumerBuilder.cs | 39 +++++ .../ConsumerMiddleware.cs | 28 +++ src/LocalPost.SqsConsumer/ConsumerOptions.cs | 67 +++++++ .../DependencyInjection/HealthChecks.cs | 23 +++ .../ServiceCollectionExtensions.cs | 83 --------- .../ServiceRegistration.cs | 79 +++++++++ ...qsMessageHandler.cs => IMessageHandler.cs} | 2 +- .../LocalPost.SqsConsumer.csproj | 2 +- .../ProcessedMessages.cs | 47 ----- src/LocalPost.SqsConsumer/QueueClient.cs | 110 ++++++++++++ .../SqsConsumerOptions.cs | 106 ----------- .../SqsDeleteBatchBuilder.cs | 30 ---- src/LocalPost.SqsConsumer/SqsPuller.cs | 83 --------- src/LocalPost/BackgroundQueue.cs | 21 ++- src/LocalPost/BackgroundQueueBuilder.cs | 45 +++++ src/LocalPost/BackgroundQueueConsumer.cs | 76 +++----- src/LocalPost/BackgroundServiceSupervisor.cs | 165 ++++++++++++++++++ ...gurationExtensions.cs => Configuration.cs} | 3 +- .../CustomQueueRegistrationExtensions.cs | 37 ---- .../DependencyInjection/HealthChecks.cs | 40 +++++ .../DependencyInjection/INamedService.cs | 6 + ...nExtensions.cs => JobQueueRegistration.cs} | 5 +- .../DependencyInjection/QueueRegistration.cs | 52 ++++++ .../QueueRegistrationExtensions.cs | 23 --- .../ServiceProviderLookups.cs | 15 ++ src/LocalPost/LocalPost.csproj | 7 + src/LocalPost/MiddlewareStack.cs | 26 +++ src/LocalPost/MiddlewareStackBuilder.cs | 52 ++++++ src/LocalPost/RecordsSupport.cs | 10 -- 74 files changed, 1836 insertions(+), 490 deletions(-) create mode 100644 CHANGELOG.md rename samples/{SampleWebApp/SampleWebApp.csproj => AmazonSqsApp/AmazonSqsApp.csproj} (100%) rename samples/{SampleWebApp => AmazonSqsApp}/Controllers/WeatherForecastController.cs (97%) rename samples/{SampleWebApp => AmazonSqsApp}/Program.cs (98%) rename samples/{SampleWebApp => AmazonSqsApp}/Properties/launchSettings.json (100%) rename samples/{SampleWebApp => AmazonSqsApp}/WeatherForecast.cs (90%) rename samples/{SampleWebApp => AmazonSqsApp}/appsettings.Development.json (100%) rename samples/{SampleWebApp => AmazonSqsApp}/appsettings.json (100%) create mode 100644 samples/AzureQueueApp/AzureQueueApp.csproj create mode 100644 samples/AzureQueueApp/Controllers/WeatherForecastController.cs create mode 100644 samples/AzureQueueApp/Program.cs create mode 100644 samples/AzureQueueApp/Properties/launchSettings.json create mode 100644 samples/AzureQueueApp/WeatherForecast.cs create mode 100644 samples/AzureQueueApp/appsettings.Development.json create mode 100644 samples/AzureQueueApp/appsettings.json create mode 100644 samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs create mode 100644 samples/KafkaConsumerApp/KafkaConsumerApp.csproj create mode 100644 samples/KafkaConsumerApp/Program.cs create mode 100644 samples/KafkaConsumerApp/Properties/launchSettings.json create mode 100644 samples/KafkaConsumerApp/WeatherForecast.cs create mode 100644 samples/KafkaConsumerApp/appsettings.Development.json create mode 100644 samples/KafkaConsumerApp/appsettings.json create mode 100644 src/LocalPost.Azure.QueueConsumer/AzureQueues.cs create mode 100644 src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs create mode 100644 src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs create mode 100644 src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs create mode 100644 src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs create mode 100644 src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj create mode 100644 src/LocalPost.Azure.QueueConsumer/MessagePuller.cs create mode 100644 src/LocalPost.KafkaConsumer/ConsumeContext.cs create mode 100644 src/LocalPost.KafkaConsumer/Consumer.cs create mode 100644 src/LocalPost.KafkaConsumer/ConsumerBuilder.cs create mode 100644 src/LocalPost.KafkaConsumer/ConsumerOptions.cs create mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs create mode 100644 src/LocalPost.KafkaConsumer/HealthCheck.cs create mode 100644 src/LocalPost.KafkaConsumer/IMessageHandler.cs create mode 100644 src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj create mode 100644 src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj create mode 100644 src/LocalPost.SqsConsumer/ConsumeContext.cs create mode 100644 src/LocalPost.SqsConsumer/Consumer.cs create mode 100644 src/LocalPost.SqsConsumer/ConsumerBuilder.cs create mode 100644 src/LocalPost.SqsConsumer/ConsumerMiddleware.cs create mode 100644 src/LocalPost.SqsConsumer/ConsumerOptions.cs create mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs delete mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionExtensions.cs create mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs rename src/LocalPost.SqsConsumer/{ISqsMessageHandler.cs => IMessageHandler.cs} (55%) delete mode 100644 src/LocalPost.SqsConsumer/ProcessedMessages.cs create mode 100644 src/LocalPost.SqsConsumer/QueueClient.cs delete mode 100644 src/LocalPost.SqsConsumer/SqsConsumerOptions.cs delete mode 100644 src/LocalPost.SqsConsumer/SqsDeleteBatchBuilder.cs delete mode 100644 src/LocalPost.SqsConsumer/SqsPuller.cs create mode 100644 src/LocalPost/BackgroundQueueBuilder.cs create mode 100644 src/LocalPost/BackgroundServiceSupervisor.cs rename src/LocalPost/DependencyInjection/{ConfigurationExtensions.cs => Configuration.cs} (85%) delete mode 100644 src/LocalPost/DependencyInjection/CustomQueueRegistrationExtensions.cs create mode 100644 src/LocalPost/DependencyInjection/HealthChecks.cs create mode 100644 src/LocalPost/DependencyInjection/INamedService.cs rename src/LocalPost/DependencyInjection/{JobQueueRegistrationExtensions.cs => JobQueueRegistration.cs} (69%) create mode 100644 src/LocalPost/DependencyInjection/QueueRegistration.cs delete mode 100644 src/LocalPost/DependencyInjection/QueueRegistrationExtensions.cs create mode 100644 src/LocalPost/DependencyInjection/ServiceProviderLookups.cs create mode 100644 src/LocalPost/MiddlewareStack.cs create mode 100644 src/LocalPost/MiddlewareStackBuilder.cs delete mode 100644 src/LocalPost/RecordsSupport.cs diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index bfce883..dcbc521 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -35,8 +35,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-dotnet@v3 with: - dotnet-version: | - 7.0.x + dotnet-version: 7.0.x - run: dotnet pack -c Release - name: Publish run: | diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..93cc0d6 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,27 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +### Added + +### Fixed + +## [0.2.0] - 2023-01-23 + +### Added + +- Kafka consumer + +### Changed + +- MessageHandler → Handler +- + +## [1.0.0] - 2017-06-20 + +### Added diff --git a/LocalPost.sln b/LocalPost.sln index 2c7c844..304488f 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -2,7 +2,7 @@ Microsoft Visual Studio Solution File, Format Version 12.00 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost", "src\LocalPost\LocalPost.csproj", "{474D2C1A-5557-4ED9-AF20-FE195D4C1AF7}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SampleWebApp", "samples\SampleWebApp\SampleWebApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AmazonSqsApp", "samples\AmazonSqsApp\AmazonSqsApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SnsPublisher", "src\LocalPost.SnsPublisher\LocalPost.SnsPublisher.csproj", "{D256C568-2B42-4DCC-AB54-15B512A99C44}" EndProject @@ -14,6 +14,18 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SqsConsumer", "sr EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SqsConsumer.Tests", "tests\LocalPost.SqsConsumer.Tests\LocalPost.SqsConsumer.Tests.csproj", "{2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Azure.QueueConsumer", "src\LocalPost.Azure.QueueConsumer\LocalPost.Azure.QueueConsumer.csproj", "{3F9454C4-9C0D-4FB4-9476-F32224182C7B}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.RabbitMqConsumer", "src\LocalPost.RabbitMqConsumer\LocalPost.RabbitMqConsumer.csproj", "{3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer", "src\LocalPost.KafkaConsumer\LocalPost.KafkaConsumer.csproj", "{D9139C53-5B9F-49E7-80DF-41C995C37E2F}" +EndProject +Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Examples", "Examples", "{405721DC-F290-4191-B638-9907D5EB042B}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AzureQueueApp", "samples\AzureQueueApp\AzureQueueApp.csproj", "{7C21BB9A-9C68-4750-84AA-272F201878A1}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -48,5 +60,30 @@ Global {2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}.Debug|Any CPU.Build.0 = Debug|Any CPU {2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}.Release|Any CPU.ActiveCfg = Release|Any CPU {2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}.Release|Any CPU.Build.0 = Release|Any CPU + {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Release|Any CPU.Build.0 = Release|Any CPU + {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Release|Any CPU.Build.0 = Release|Any CPU + {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.Build.0 = Release|Any CPU + {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Debug|Any CPU.Build.0 = Debug|Any CPU + {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Release|Any CPU.ActiveCfg = Release|Any CPU + {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Release|Any CPU.Build.0 = Release|Any CPU + {7C21BB9A-9C68-4750-84AA-272F201878A1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {7C21BB9A-9C68-4750-84AA-272F201878A1}.Debug|Any CPU.Build.0 = Debug|Any CPU + {7C21BB9A-9C68-4750-84AA-272F201878A1}.Release|Any CPU.ActiveCfg = Release|Any CPU + {7C21BB9A-9C68-4750-84AA-272F201878A1}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} + {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B} = {405721DC-F290-4191-B638-9907D5EB042B} + {7C21BB9A-9C68-4750-84AA-272F201878A1} = {405721DC-F290-4191-B638-9907D5EB042B} EndGlobalSection EndGlobal diff --git a/README.md b/README.md index 7ecd402..b4997a3 100644 --- a/README.md +++ b/README.md @@ -6,3 +6,13 @@ Simple .NET in-memory background queue ([System.Threading.Channels](https://lear - [Coravel queue](https://docs.coravel.net/Queuing/)/event broadcasting — only invocable queueing, event broadcasting is different from consuming a queue - [Hangfire](https://www.hangfire.io/) — for persistent queues (means payload serialisation), LocalPost is completely about in-memory ones + +## Amazon SQS Consumer + +### Permissions + +To operate on a queue below [permissions](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-permissions-reference.html) are required: +- sqs:GetQueueUrl +- sqs:GetQueueAttributes +- sqs:ReceiveMessage +- sqs:ChangeMessageVisibility diff --git a/samples/SampleWebApp/SampleWebApp.csproj b/samples/AmazonSqsApp/AmazonSqsApp.csproj similarity index 100% rename from samples/SampleWebApp/SampleWebApp.csproj rename to samples/AmazonSqsApp/AmazonSqsApp.csproj diff --git a/samples/SampleWebApp/Controllers/WeatherForecastController.cs b/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs similarity index 97% rename from samples/SampleWebApp/Controllers/WeatherForecastController.cs rename to samples/AmazonSqsApp/Controllers/WeatherForecastController.cs index 05c8458..562c19f 100644 --- a/samples/SampleWebApp/Controllers/WeatherForecastController.cs +++ b/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs @@ -3,7 +3,7 @@ using LocalPost.SnsPublisher; using Microsoft.AspNetCore.Mvc; -namespace SampleWebApp.Controllers; +namespace AmazonSqsApp.Controllers; [ApiController] [Route("[controller]")] diff --git a/samples/SampleWebApp/Program.cs b/samples/AmazonSqsApp/Program.cs similarity index 98% rename from samples/SampleWebApp/Program.cs rename to samples/AmazonSqsApp/Program.cs index 2b58949..eed557c 100644 --- a/samples/SampleWebApp/Program.cs +++ b/samples/AmazonSqsApp/Program.cs @@ -2,7 +2,7 @@ using Amazon.SQS; using LocalPost.SnsPublisher.DependencyInjection; using LocalPost.DependencyInjection; -using SampleWebApp; +using AmazonSqsApp; using LocalPost.SqsConsumer.DependencyInjection; var builder = WebApplication.CreateBuilder(args); diff --git a/samples/SampleWebApp/Properties/launchSettings.json b/samples/AmazonSqsApp/Properties/launchSettings.json similarity index 100% rename from samples/SampleWebApp/Properties/launchSettings.json rename to samples/AmazonSqsApp/Properties/launchSettings.json diff --git a/samples/SampleWebApp/WeatherForecast.cs b/samples/AmazonSqsApp/WeatherForecast.cs similarity index 90% rename from samples/SampleWebApp/WeatherForecast.cs rename to samples/AmazonSqsApp/WeatherForecast.cs index 72eee54..6c1a09b 100644 --- a/samples/SampleWebApp/WeatherForecast.cs +++ b/samples/AmazonSqsApp/WeatherForecast.cs @@ -1,4 +1,4 @@ -namespace SampleWebApp; +namespace AmazonSqsApp; public class WeatherForecast { diff --git a/samples/SampleWebApp/appsettings.Development.json b/samples/AmazonSqsApp/appsettings.Development.json similarity index 100% rename from samples/SampleWebApp/appsettings.Development.json rename to samples/AmazonSqsApp/appsettings.Development.json diff --git a/samples/SampleWebApp/appsettings.json b/samples/AmazonSqsApp/appsettings.json similarity index 100% rename from samples/SampleWebApp/appsettings.json rename to samples/AmazonSqsApp/appsettings.json diff --git a/samples/AzureQueueApp/AzureQueueApp.csproj b/samples/AzureQueueApp/AzureQueueApp.csproj new file mode 100644 index 0000000..2be9b2b --- /dev/null +++ b/samples/AzureQueueApp/AzureQueueApp.csproj @@ -0,0 +1,19 @@ + + + + net7 + enable + + + + + + + + + + + + + + diff --git a/samples/AzureQueueApp/Controllers/WeatherForecastController.cs b/samples/AzureQueueApp/Controllers/WeatherForecastController.cs new file mode 100644 index 0000000..c76fc47 --- /dev/null +++ b/samples/AzureQueueApp/Controllers/WeatherForecastController.cs @@ -0,0 +1,32 @@ +using Microsoft.AspNetCore.Mvc; + +namespace AzureQueueApp.Controllers; + +[ApiController] +[Route("[controller]")] +public class WeatherForecastController : ControllerBase +{ + private static readonly string[] Summaries = new[] + { + "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" + }; + + private readonly ILogger _logger; + + public WeatherForecastController(ILogger logger) + { + _logger = logger; + } + + [HttpGet(Name = "GetWeatherForecast")] + public IEnumerable Get() + { + return Enumerable.Range(1, 5).Select(index => new WeatherForecast + { + Date = DateTime.Now.AddDays(index), + TemperatureC = Random.Shared.Next(-20, 55), + Summary = Summaries[Random.Shared.Next(Summaries.Length)] + }) + .ToArray(); + } +} diff --git a/samples/AzureQueueApp/Program.cs b/samples/AzureQueueApp/Program.cs new file mode 100644 index 0000000..1f194e4 --- /dev/null +++ b/samples/AzureQueueApp/Program.cs @@ -0,0 +1,36 @@ +using Azure.Identity; +using Microsoft.Extensions.Azure; + +var builder = WebApplication.CreateBuilder(args); + +// Add services to the container. + +builder.Services.AddControllers(); +// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle +builder.Services.AddEndpointsApiExplorer(); +builder.Services.AddSwaggerGen(); + +builder.Services.AddAzureClients(clientBuilder => +{ + clientBuilder.AddQueueServiceClient(); + // Use DefaultAzureCredential by default + clientBuilder.UseCredential(new DefaultAzureCredential()); +}); + + +var app = builder.Build(); + +// Configure the HTTP request pipeline. +if (app.Environment.IsDevelopment()) +{ + app.UseSwagger(); + app.UseSwaggerUI(); +} + +app.UseHttpsRedirection(); + +app.UseAuthorization(); + +app.MapControllers(); + +app.Run(); diff --git a/samples/AzureQueueApp/Properties/launchSettings.json b/samples/AzureQueueApp/Properties/launchSettings.json new file mode 100644 index 0000000..6964d33 --- /dev/null +++ b/samples/AzureQueueApp/Properties/launchSettings.json @@ -0,0 +1,31 @@ +{ + "$schema": "https://json.schemastore.org/launchsettings.json", + "iisSettings": { + "windowsAuthentication": false, + "anonymousAuthentication": true, + "iisExpress": { + "applicationUrl": "http://localhost:41855", + "sslPort": 44346 + } + }, + "profiles": { + "AzureQueueApp": { + "commandName": "Project", + "dotnetRunMessages": true, + "launchBrowser": true, + "launchUrl": "swagger", + "applicationUrl": "https://localhost:7088;http://localhost:5207", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + } + }, + "IIS Express": { + "commandName": "IISExpress", + "launchBrowser": true, + "launchUrl": "swagger", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + } + } + } +} diff --git a/samples/AzureQueueApp/WeatherForecast.cs b/samples/AzureQueueApp/WeatherForecast.cs new file mode 100644 index 0000000..507bcdc --- /dev/null +++ b/samples/AzureQueueApp/WeatherForecast.cs @@ -0,0 +1,12 @@ +namespace AzureQueueApp; + +public class WeatherForecast +{ + public DateTime Date { get; set; } + + public int TemperatureC { get; set; } + + public int TemperatureF => 32 + (int) (TemperatureC / 0.5556); + + public string? Summary { get; set; } +} diff --git a/samples/AzureQueueApp/appsettings.Development.json b/samples/AzureQueueApp/appsettings.Development.json new file mode 100644 index 0000000..0c208ae --- /dev/null +++ b/samples/AzureQueueApp/appsettings.Development.json @@ -0,0 +1,8 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + } +} diff --git a/samples/AzureQueueApp/appsettings.json b/samples/AzureQueueApp/appsettings.json new file mode 100644 index 0000000..10f68b8 --- /dev/null +++ b/samples/AzureQueueApp/appsettings.json @@ -0,0 +1,9 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + }, + "AllowedHosts": "*" +} diff --git a/samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs b/samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs new file mode 100644 index 0000000..26a207e --- /dev/null +++ b/samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs @@ -0,0 +1,32 @@ +using Microsoft.AspNetCore.Mvc; + +namespace KafkaConsumerApp.Controllers; + +[ApiController] +[Route("[controller]")] +public class WeatherForecastController : ControllerBase +{ + private static readonly string[] Summaries = new[] + { + "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" + }; + + private readonly ILogger _logger; + + public WeatherForecastController(ILogger logger) + { + _logger = logger; + } + + [HttpGet(Name = "GetWeatherForecast")] + public IEnumerable Get() + { + return Enumerable.Range(1, 5).Select(index => new WeatherForecast + { + Date = DateTime.Now.AddDays(index), + TemperatureC = Random.Shared.Next(-20, 55), + Summary = Summaries[Random.Shared.Next(Summaries.Length)] + }) + .ToArray(); + } +} diff --git a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj new file mode 100644 index 0000000..3b880d8 --- /dev/null +++ b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj @@ -0,0 +1,13 @@ + + + + net6.0 + enable + enable + + + + + + + diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs new file mode 100644 index 0000000..48863a6 --- /dev/null +++ b/samples/KafkaConsumerApp/Program.cs @@ -0,0 +1,25 @@ +var builder = WebApplication.CreateBuilder(args); + +// Add services to the container. + +builder.Services.AddControllers(); +// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle +builder.Services.AddEndpointsApiExplorer(); +builder.Services.AddSwaggerGen(); + +var app = builder.Build(); + +// Configure the HTTP request pipeline. +if (app.Environment.IsDevelopment()) +{ + app.UseSwagger(); + app.UseSwaggerUI(); +} + +app.UseHttpsRedirection(); + +app.UseAuthorization(); + +app.MapControllers(); + +app.Run(); diff --git a/samples/KafkaConsumerApp/Properties/launchSettings.json b/samples/KafkaConsumerApp/Properties/launchSettings.json new file mode 100644 index 0000000..e82fbd4 --- /dev/null +++ b/samples/KafkaConsumerApp/Properties/launchSettings.json @@ -0,0 +1,31 @@ +{ + "$schema": "https://json.schemastore.org/launchsettings.json", + "iisSettings": { + "windowsAuthentication": false, + "anonymousAuthentication": true, + "iisExpress": { + "applicationUrl": "http://localhost:13538", + "sslPort": 44379 + } + }, + "profiles": { + "KafkaConsumerApp": { + "commandName": "Project", + "dotnetRunMessages": true, + "launchBrowser": true, + "launchUrl": "swagger", + "applicationUrl": "https://localhost:7104;http://localhost:5164", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + } + }, + "IIS Express": { + "commandName": "IISExpress", + "launchBrowser": true, + "launchUrl": "swagger", + "environmentVariables": { + "ASPNETCORE_ENVIRONMENT": "Development" + } + } + } +} diff --git a/samples/KafkaConsumerApp/WeatherForecast.cs b/samples/KafkaConsumerApp/WeatherForecast.cs new file mode 100644 index 0000000..af7b542 --- /dev/null +++ b/samples/KafkaConsumerApp/WeatherForecast.cs @@ -0,0 +1,12 @@ +namespace KafkaConsumerApp; + +public class WeatherForecast +{ + public DateTime Date { get; set; } + + public int TemperatureC { get; set; } + + public int TemperatureF => 32 + (int) (TemperatureC / 0.5556); + + public string? Summary { get; set; } +} diff --git a/samples/KafkaConsumerApp/appsettings.Development.json b/samples/KafkaConsumerApp/appsettings.Development.json new file mode 100644 index 0000000..0c208ae --- /dev/null +++ b/samples/KafkaConsumerApp/appsettings.Development.json @@ -0,0 +1,8 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + } +} diff --git a/samples/KafkaConsumerApp/appsettings.json b/samples/KafkaConsumerApp/appsettings.json new file mode 100644 index 0000000..10f68b8 --- /dev/null +++ b/samples/KafkaConsumerApp/appsettings.json @@ -0,0 +1,9 @@ +{ + "Logging": { + "LogLevel": { + "Default": "Information", + "Microsoft.AspNetCore": "Warning" + } + }, + "AllowedHosts": "*" +} diff --git a/src/LocalPost.Azure.QueueConsumer/AzureQueues.cs b/src/LocalPost.Azure.QueueConsumer/AzureQueues.cs new file mode 100644 index 0000000..9dc1b60 --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/AzureQueues.cs @@ -0,0 +1,20 @@ +using Azure.Storage.Queues; + +namespace LocalPost.Azure.QueueConsumer; + +internal interface IAzureQueues +{ + QueueClient Get(string name); +} + +internal sealed class AzureQueues : IAzureQueues +{ + private readonly QueueServiceClient _client; + + public AzureQueues(QueueServiceClient client) + { + _client = client; + } + + public QueueClient Get(string name) => _client.GetQueueClient(name); +} diff --git a/src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs b/src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs new file mode 100644 index 0000000..7765394 --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs @@ -0,0 +1,18 @@ +using System.ComponentModel.DataAnnotations; + +namespace LocalPost.Azure.QueueConsumer; + +/// +/// General Azure Storage Queue consumer settings +/// +public sealed record ConsumerOptions +{ + /// + /// How many messages to process in parallel. + /// + [Required] public ushort MaxConcurrency { get; set; } = 10; + + [Required] public string QueueName { get; set; } = null!; + + [Range(1, 32)] public byte BufferSize { get; set; } = 10; +} diff --git a/src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs new file mode 100644 index 0000000..5cb5e4f --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs @@ -0,0 +1,23 @@ +using System.Diagnostics.CodeAnalysis; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.Azure.QueueConsumer.DependencyInjection; + +public static class HealthChecks +{ + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddAmazonSqsConsumerReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddAmazonSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); +} diff --git a/src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs new file mode 100644 index 0000000..65f8e5a --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs @@ -0,0 +1,37 @@ +using Azure.Storage.Queues.Models; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Options; + +namespace LocalPost.Azure.QueueConsumer.DependencyInjection; + +public static class ServiceRegistration +{ + public static OptionsBuilder AddAzureQueueConsumer(this IServiceCollection services, + string name) where THandler : IHandler => + services + .AddAzureQueueConsumer(name, provider => provider.GetRequiredService().InvokeAsync); + + public static OptionsBuilder AddAzureQueueConsumer(this IServiceCollection services, + string name, Func> handlerFactory) + { + // Expect Azure QueueServiceClient to be registered in the DI container using the usual way, + // see https://learn.microsoft.com/en-us/dotnet/azure/sdk/dependency-injection#register-client + services.TryAddSingleton(); + + services.TryAddSingleton(); + services.AddSingleton(provider => ActivatorUtilities.CreateInstance(provider, name)); + + services + .AddCustomBackgroundQueue($"AzureQueue/{name}", + provider => provider.GetQueue(name), + provider => provider.GetQueue(name).Wrap(handlerFactory(provider))) + .Configure>( + (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); + + // TODO Health check, metrics + + return services.AddOptions(name).Configure(options => options.QueueName = name); + } +} diff --git a/src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs b/src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs new file mode 100644 index 0000000..3aceb1b --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs @@ -0,0 +1,7 @@ +using Azure.Storage.Queues.Models; + +namespace LocalPost.Azure.QueueConsumer; + +public interface IMessageHandler : IHandler +{ +} diff --git a/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj b/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj new file mode 100644 index 0000000..d08b6f3 --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj @@ -0,0 +1,55 @@ + + + + netstandard2.0 + true + + false + + LocalPost.Azure.QueueConsumer + background;task;queue;azure;sqs + Local (in-process) background queue for sending to Amazon SNS. + Alexey Shokov + + README.md + MIT + https://github.com/alexeyshockov/LocalPost + git + true + + + + + + + + + + true + + + + true + true + true + true + snupkg + + + true + + + + + + + + + + + + + + + + diff --git a/src/LocalPost.Azure.QueueConsumer/MessagePuller.cs b/src/LocalPost.Azure.QueueConsumer/MessagePuller.cs new file mode 100644 index 0000000..5d338d8 --- /dev/null +++ b/src/LocalPost.Azure.QueueConsumer/MessagePuller.cs @@ -0,0 +1,65 @@ +using System.Diagnostics; +using Azure; +using Azure.Storage.Queues; +using Azure.Storage.Queues.Models; +using Microsoft.Extensions.Options; + +namespace LocalPost.Azure.QueueConsumer; + +internal sealed class MessagePuller : IAsyncEnumerable +{ + private static readonly ActivitySource Tracer = new(typeof(MessagePuller).Namespace); + + private readonly QueueClient _queue; + private readonly ConsumerOptions _options; + + public MessagePuller(IAzureQueues queues, string name, IOptionsMonitor options) + { + _options = options.Get(name); + var queueName = _options.QueueName ?? throw new ArgumentNullException(nameof(options), "Queue name is required"); + + Name = name; + + _queue = queues.Get(queueName); + } + + public string Name { get; } + + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) + { + while (!ct.IsCancellationRequested) + { + var messages = await PullMessagesAsync(ct); + + foreach (var message in messages) + yield return message; + } + } + + private async Task> PullMessagesAsync(CancellationToken ct) + { + using var span = Tracer.StartActivity(); + + // Azure SDK handles network failures + var response = await _queue.ReceiveMessagesAsync(_options.BufferSize, null, ct); + + return response.Value; + } + + public Handler Wrap(Handler handler) => async (message, ct) => + { + await handler(message, ct); + + // Won't be deleted in case of an exception in the handler + await DeleteMessageAsync(message, ct); + }; + + private async Task DeleteMessageAsync(QueueMessage message, CancellationToken ct) + { + using var span = Tracer.StartActivity(); + + await _queue.DeleteMessageAsync(message.MessageId, message.PopReceipt, ct); + + // TODO Log failures?.. + } +} diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs new file mode 100644 index 0000000..413201b --- /dev/null +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -0,0 +1,10 @@ +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +public readonly record struct ConsumeContext +{ + public required IConsumer Client { get; init; } + + public required ConsumeResult Result { get; init; } +} diff --git a/src/LocalPost.KafkaConsumer/Consumer.cs b/src/LocalPost.KafkaConsumer/Consumer.cs new file mode 100644 index 0000000..27e09b7 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/Consumer.cs @@ -0,0 +1,94 @@ +using System.Threading.Channels; +using Confluent.Kafka; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; + +namespace LocalPost.KafkaConsumer; + +public static partial class Consumer +{ + internal sealed class Service : BackgroundService + { + private readonly ILogger _logger; + private readonly ConsumerOptions _options; + + private readonly IConsumer _kafka; + private readonly ChannelWriter> _queue; + private readonly Handler _errorHandler; + + public Service(ILogger logger, ConsumerOptions options, string name, IConsumer kafka, + ChannelWriter> queue, Handler errorHandler) + { + _logger = logger; + _options = options; + _kafka = kafka; + _queue = queue; + _errorHandler = errorHandler; + Name = name; + } + + public string Name { get; } + + public bool Closed { get; private set; } + + protected override Task ExecuteAsync(CancellationToken stoppingToken) => + Task.Run(() => Run(stoppingToken), stoppingToken); + + private async Task Run(CancellationToken stoppingToken = default) + { + _kafka.Subscribe(_options.TopicName); + + while (!stoppingToken.IsCancellationRequested) + { + await Task.Yield(); + + try + { + await _queue.WaitToWriteAsync(stoppingToken); // Wait for the buffer capacity + + await Consume(stoppingToken); + } + catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) + { + // Just complete the method normally... + } + } + + _logger.LogInformation("Stopping Kafka {Topic} consumer...", _options.TopicName); + Closed = true; + _kafka.Close(); + _queue.Complete(); + } + + private async Task Consume(CancellationToken stoppingToken) + { + // TODO Transaction activity... + try + { + var consumeResult = _kafka.Consume(stoppingToken); + + if (consumeResult is null || consumeResult.IsPartitionEOF || consumeResult.Message is null) + return; // Continue the loop + + await _queue.WriteAsync(new ConsumeContext + { + Client = _kafka, + Result = consumeResult, + }, stoppingToken); + } + catch (ConsumeException e) + { + _logger.LogError(e, "Kafka {Topic} consumer error, help link: {HelpLink}", + _options.TopicName, e.HelpLink); + + await _errorHandler(e, stoppingToken); // TODO exit the app if configured... + } + } + + public override void Dispose() + { + base.Dispose(); + _kafka.Dispose(); + } + } +} diff --git a/src/LocalPost.KafkaConsumer/ConsumerBuilder.cs b/src/LocalPost.KafkaConsumer/ConsumerBuilder.cs new file mode 100644 index 0000000..76c7e7c --- /dev/null +++ b/src/LocalPost.KafkaConsumer/ConsumerBuilder.cs @@ -0,0 +1,89 @@ +using System.Threading.Channels; +using Confluent.Kafka; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.KafkaConsumer; + +public static partial class Consumer +{ + public sealed class Builder + { + private Action> _configure = (cb) => { }; + + private Handler _errorHandler = (e, ct) => Task.CompletedTask; + + private Handler> _handler = (c, ct) => Task.CompletedTask; + + private readonly List>> _middlewares = new(); + + private readonly Channel> _queue = + // Kafka client (librdkafka) is optimised to prefetch messages, so there is no need to maintain our own + // buffer + Channel.CreateBounded>(new BoundedChannelOptions(1) + { + SingleWriter = true, + SingleReader = true + // TODO AllowSynchronousContinuations?.. + }); + + public required string Name { get; init; } + + public Builder ConfigureKafkaClient(Action> configure) + { + _configure = configure; + + return this; + } + + // TODO Remove + public Builder SetErrorHandler(Handler handler) + { + _errorHandler = handler; + + return this; + } + + public Builder SetMessageHandler(Handler> handler) + { + _handler = (c, ct) => handler(c.Result.Message, ct); + + return this; + } + + // FIXME Take from the container... +// public Builder SetMessageHandler(THandler handler) +// where THandler : IMessageHandler> => +// SetMessageHandler(handler.Process); + + public Builder SetMessageHandler(Handler handler) => + SetMessageHandler((m, ct) => handler(m.Value, ct)); + + public Builder AddMiddleware(MiddlewareFactory> factory) + { + _middlewares.Add(factory); + + return this; + } + + public Builder AddMiddleware(Middleware> middleware) => + AddMiddleware(_ => middleware); + + internal HandlerFactory> BuildHandlerFactory() => + new MiddlewareStack>(_handler, _middlewares).Resolve; + + internal IAsyncEnumerable> Messages => _queue.Reader.ReadAllAsync(); + + internal Service Build(IServiceProvider provider) + { + var clientConfig = provider.GetRequiredService>().Get(Name); + + var clientBuilder = new ConsumerBuilder(clientConfig); + _configure(clientBuilder); + + var kafkaClient = clientBuilder.Build(); + + return ActivatorUtilities.CreateInstance(provider, clientConfig, Name, kafkaClient, _errorHandler); + } + } +} diff --git a/src/LocalPost.KafkaConsumer/ConsumerOptions.cs b/src/LocalPost.KafkaConsumer/ConsumerOptions.cs new file mode 100644 index 0000000..c18218d --- /dev/null +++ b/src/LocalPost.KafkaConsumer/ConsumerOptions.cs @@ -0,0 +1,17 @@ +using System.ComponentModel.DataAnnotations; +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +/// +/// General Azure Storage Queue consumer settings +/// +public sealed class ConsumerOptions : ConsumerConfig +{ + /// + /// How many messages to process in parallel. + /// + [Required] public ushort MaxConcurrency { get; set; } = 10; + + [Required] public string TopicName { get; set; } = null!; +} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs new file mode 100644 index 0000000..a5f66e9 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs @@ -0,0 +1,25 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.KafkaConsumer.DependencyInjection; + +public static class ServiceCollectionExtensions +{ + public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, + string name, Action.Builder> configure) + { + var builder = new Consumer.Builder { Name = name }; + configure(builder); + + services.AddHostedService(provider => builder.Build(provider)); + services + .AddCustomBackgroundQueue($"Kafka/{name}", _ => builder.Messages, builder.BuildHandlerFactory()) + .Configure>( + (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); + + // TODO Health check, metrics (with all topics for this consumer... (it can more than 1)) + + return services.AddOptions(name).Configure(options => options.TopicName = name); + } +} diff --git a/src/LocalPost.KafkaConsumer/HealthCheck.cs b/src/LocalPost.KafkaConsumer/HealthCheck.cs new file mode 100644 index 0000000..b88e111 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/HealthCheck.cs @@ -0,0 +1,22 @@ +using Microsoft.Extensions.Diagnostics.HealthChecks; +using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; + +namespace LocalPost.KafkaConsumer; + +internal class HealthCheck : IHealthCheck +{ + private readonly Consumer.Service _consumer; + + + public Task CheckHealthAsync(HealthCheckContext context, + CancellationToken cancellationToken = default) => Task.FromResult(_currentRates.UpdatedAt switch + { + not null => Healthy("Thresholds loaded", new Dictionary + { + ["LastUpdated"] = _currentRates.UpdatedAt, + ["RulesThreshold"] = _currentRates.Rules, + ["WorkflowThreshold"] = _currentRates.Workflows + }), + _ => Unhealthy("Thresholds not loaded") + }); +} diff --git a/src/LocalPost.KafkaConsumer/IMessageHandler.cs b/src/LocalPost.KafkaConsumer/IMessageHandler.cs new file mode 100644 index 0000000..6cadbf6 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/IMessageHandler.cs @@ -0,0 +1,7 @@ +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +public interface IMessageHandler : LocalPost.IHandler> +{ +} diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj new file mode 100644 index 0000000..70d8948 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -0,0 +1,55 @@ + + + + netstandard2.0 + true + + false + + LocalPost.KafkaConsumer + background;task;queue;kafka + Local (in-process) background queue for sending to Amazon SNS. + Alexey Shokov + + README.md + MIT + https://github.com/alexeyshockov/LocalPost + git + true + + + + + + + + + + true + + + + true + true + true + true + snupkg + + + true + + + + + + + + + + + + + + + + diff --git a/src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj b/src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj new file mode 100644 index 0000000..b2a3faf --- /dev/null +++ b/src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj @@ -0,0 +1,55 @@ + + + + netstandard2.0 + true + + false + + LocalPost.RabbitMqConsumer + background;task;queue;rabbitmq + Local (in-process) background queue for sending to Amazon SNS. + Alexey Shokov + + README.md + MIT + https://github.com/alexeyshockov/LocalPost + git + true + + + + + + + + + + true + + + + true + true + true + true + snupkg + + + true + + + + + + + + + + + + + + + + diff --git a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs b/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs index f44a234..b367fc7 100644 --- a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs +++ b/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs @@ -13,11 +13,11 @@ public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServ services.TryAddSingleton(); return services - .AddAmazonSnsBatchPublisher(provider => provider.GetRequiredService().Send); + .AddAmazonSnsBatchPublisher(provider => provider.GetRequiredService().SendAsync); } public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services, - Func> handlerFactory) + HandlerFactory handlerFactory) { services.TryAddSingleton(); services.TryAddSingleton(provider => provider.GetRequiredService()); diff --git a/src/LocalPost.SnsPublisher/Sender.cs b/src/LocalPost.SnsPublisher/Sender.cs index da477cc..0b328be 100644 --- a/src/LocalPost.SnsPublisher/Sender.cs +++ b/src/LocalPost.SnsPublisher/Sender.cs @@ -1,14 +1,14 @@ +using System.Diagnostics; using Amazon.SimpleNotificationService; using Amazon.SimpleNotificationService.Model; using Microsoft.Extensions.Logging; namespace LocalPost.SnsPublisher; -/// -/// Default implementation -/// internal sealed class Sender { + private static readonly ActivitySource Tracer = new(typeof(Sender).Namespace); + private readonly ILogger _logger; private readonly IAmazonSimpleNotificationService _sns; @@ -18,9 +18,11 @@ public Sender(ILogger logger, IAmazonSimpleNotificationService sns) _sns = sns; } - public async Task Send(PublishBatchRequest payload, CancellationToken ct) + public async Task SendAsync(PublishBatchRequest payload, CancellationToken ct) { - _logger.LogTrace("Sending a batch of {Amount} publish request(s) to SNS...", payload.PublishBatchRequestEntries.Count); + using var span = Tracer.StartActivity(); + + _logger.LogTrace("Sending a batch of {Amount} message(s) to SNS...", payload.PublishBatchRequestEntries.Count); var batchResponse = await _sns.PublishBatchAsync(payload, ct); if (batchResponse.Failed.Any()) diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs new file mode 100644 index 0000000..a8cdd10 --- /dev/null +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -0,0 +1,10 @@ +using Amazon.SQS.Model; + +namespace LocalPost.SqsConsumer; + +public readonly record struct ConsumeContext(string QueueName, string QueueUrl, Message Message) +{ + public readonly DateTimeOffset ReceivedAt = DateTimeOffset.Now; + + public bool IsStale => false; // TODO Check the visibility timeout +} diff --git a/src/LocalPost.SqsConsumer/Consumer.cs b/src/LocalPost.SqsConsumer/Consumer.cs new file mode 100644 index 0000000..de680ef --- /dev/null +++ b/src/LocalPost.SqsConsumer/Consumer.cs @@ -0,0 +1,57 @@ +using System.Threading.Channels; +using Microsoft.Extensions.Options; + +namespace LocalPost.SqsConsumer; + +public static partial class Consumer +{ + internal sealed class Service : IBackgroundService + { + private readonly QueueClient _client; + private readonly Channel _queue; + + public Service(string name, QueueClient client, IOptionsMonitor options) + { + var config = options.Get(name); + + _client = client; + _queue = Channel.CreateBounded(new BoundedChannelOptions(config.BufferSize) + { + SingleWriter = true, + SingleReader = true + // TODO AllowSynchronousContinuations?.. + }); + + Name = name; + } + + public string Name { get; } + + public IAsyncEnumerable Messages => _queue.Reader.ReadAllAsync(); + + public async Task StartAsync(CancellationToken ct) + { + await _client.ConnectAsync(ct); + } + + public async Task ExecuteAsync(CancellationToken ct) + { + while (!ct.IsCancellationRequested) + { + await _queue.Writer.WaitToWriteAsync(ct); // Wait for the buffer capacity + + await Consume(ct); + } + } + + public Task StopAsync(CancellationToken ct) => Task.CompletedTask; + + private async Task Consume(CancellationToken stoppingToken) + { + var messages = await _client.PullMessagesAsync(stoppingToken); + + foreach (var message in messages) + await _queue.Writer.WriteAsync(message, stoppingToken); + } + } +} diff --git a/src/LocalPost.SqsConsumer/ConsumerBuilder.cs b/src/LocalPost.SqsConsumer/ConsumerBuilder.cs new file mode 100644 index 0000000..3a8780c --- /dev/null +++ b/src/LocalPost.SqsConsumer/ConsumerBuilder.cs @@ -0,0 +1,39 @@ +using System.Collections.Immutable; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; + +namespace LocalPost.SqsConsumer; + +public static partial class Consumer +{ + public sealed class Builder + { + public Builder(string name) + { + Name = name; + MiddlewareStackBuilder.Append(); + } + + public string Name { get; } + + // TODO Use... + public Handler ErrorHandler { get; set; } = (m, ct) => Task.CompletedTask; + + public MiddlewareStackBuilder MiddlewareStackBuilder { get; } = new(); + + internal HandlerFactory BuildHandlerFactory() => + MiddlewareStackBuilder.Build().Resolve; + + internal IHostedService Build(IServiceProvider provider) + { + var client = ActivatorUtilities.CreateInstance(provider, Name); + var consumer = ActivatorUtilities.CreateInstance(provider, Name, client); + + var consumerSupervisor = ActivatorUtilities.CreateInstance>(provider, + Name, consumer); + + return consumerSupervisor; + } + } +} diff --git a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs new file mode 100644 index 0000000..298732d --- /dev/null +++ b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs @@ -0,0 +1,28 @@ +using Amazon.SQS; + +namespace LocalPost.SqsConsumer; + +public static partial class Consumer +{ + internal sealed class Middleware : IMiddleware + { + private readonly IAmazonSQS _sqs; + + public Middleware(IAmazonSQS sqs) + { + _sqs = sqs; + } + + public Handler Invoke(Handler next) => async (context, ct) => + { + if (context.IsStale) + return; + + // TODO Processing timeout from the visibility timeout + await next(context, ct); // Extend message's VisibilityTimeout in case of long processing?.. + + // Won't be deleted in case of an exception in the handler + await _sqs.DeleteMessageAsync(context.QueueUrl, context.Message.ReceiptHandle, ct); + }; + } +} diff --git a/src/LocalPost.SqsConsumer/ConsumerOptions.cs b/src/LocalPost.SqsConsumer/ConsumerOptions.cs new file mode 100644 index 0000000..71372ac --- /dev/null +++ b/src/LocalPost.SqsConsumer/ConsumerOptions.cs @@ -0,0 +1,67 @@ +using System.Collections.Immutable; +using System.ComponentModel.DataAnnotations; +using Amazon.SQS; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; + +namespace LocalPost.SqsConsumer; + +/// +/// General SQS consumer settings +/// +public sealed record ConsumerOptions +{ + internal static readonly List AllAttributes = new() { "All" }; + internal static readonly List AllMessageAttributes = new() { "All" }; + + public const int DefaultTimeout = 30; + + /// + /// How many messages to process in parallel. + /// + [Required] public ushort MaxConcurrency { get; set; } = 10; + + [Required] public string QueueName { get; set; } = null!; + + private string? _queueUrl; + /// + /// If not set, IAmazonSQS.GetQueueUrlAsync(QueueName) will be used once, to get the actual URL of the queue. + /// + [Url] public string? QueueUrl + { + get => _queueUrl; + set + { + _queueUrl = value; + + // Extract name (MyQueue) from an URL (https://sqs.us-east-2.amazonaws.com/123456789012/MyQueue) + if (Uri.TryCreate(value, UriKind.Absolute, out var url) && url.Segments.Length >= 3) + QueueName = url.Segments[2]; + } + } + + /// + /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. + /// + /// + /// Amazon SQS short and long polling + /// + /// + /// Setting up long polling + /// + [Range(0, 20)] public byte WaitTimeSeconds { get; set; } = 20; + + [Range(1, 10)] public byte MaxNumberOfMessages { get; set; } = 10; + + [Range(1, uint.MaxValue)] public byte BufferSize { get; set; } = 1; + + /// + /// Message processing timeout, in seconds. If not set, IAmazonSQS.GetQueueAttributesAsync() will be used once, to get + /// VisibilityTimeout for the queue. + /// + /// + /// Amazon SQS visibility timeout + /// + [Range(1, 43200)] + public int Timeout { get; set; } = DefaultTimeout; +} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs new file mode 100644 index 0000000..24f4e6c --- /dev/null +++ b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs @@ -0,0 +1,23 @@ +using System.Diagnostics.CodeAnalysis; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.SqsConsumer.DependencyInjection; + +public static class HealthChecks +{ + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddAmazonSqsConsumerReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddAmazonSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); +} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionExtensions.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionExtensions.cs deleted file mode 100644 index 71c0961..0000000 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionExtensions.cs +++ /dev/null @@ -1,83 +0,0 @@ -using Amazon.SQS.Model; -using LocalPost; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.SqsConsumer.DependencyInjection; - -public static class ServiceCollectionExtensions -{ - public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, - string name, MessageHandler handler) => - services.AddAmazonSqsConsumer(name, _ => handler); - - public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, - string name, Func handler) where TDep1 : notnull => - services.AddAmazonSqsConsumer(name, provider => (context, ct) => - { - var dep1 = provider.GetRequiredService(); - - return handler(dep1, context, ct); - }); - - public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, - string name, Func handler) - where TDep1 : notnull - where TDep2 : notnull => - services.AddAmazonSqsConsumer(name, provider => (context, ct) => - { - var dep1 = provider.GetRequiredService(); - var dep2 = provider.GetRequiredService(); - - return handler(dep1, dep2, context, ct); - }); - - public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, - string name, Func handler) - where TDep1 : notnull - where TDep2 : notnull - where TDep3 : notnull => - services.AddAmazonSqsConsumer(name, provider => (context, ct) => - { - var dep1 = provider.GetRequiredService(); - var dep2 = provider.GetRequiredService(); - var dep3 = provider.GetRequiredService(); - - return handler(dep1, dep2, dep3, context, ct); - }); - - public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name) where THandler : IMessageHandler => - services - .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); - - public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Func> handlerFactory) - { - services.TryAddSingleton, SqsConsumerOptionsResolver>(); - - services.TryAddSingleton(); - services.AddSingleton(provider => ActivatorUtilities.CreateInstance(provider, name)); - - services - .AddCustomBackgroundQueue($"SQS/{name}", - provider => provider.GetSqs(name), - provider => provider.GetSqs(name).Handler(handlerFactory(provider))) - .Configure>( - (options, sqsOptions) => { options.MaxConcurrency = sqsOptions.Get(name).MaxConcurrency; }); - - services.TryAddSingleton(); - services - .AddCustomBackgroundQueue($"SQS/{name}/ProcessedMessages", - provider => provider.GetSqs(name).ProcessedMessages, - provider => provider.GetRequiredService().Process) - .Configure>( - (options, sqsOptions) => { options.MaxConcurrency = sqsOptions.Get(name).MaxConcurrency; }); - - // TODO Health check, metrics - - return services.AddOptions(name).Configure(options => options.QueueName = name); - } -} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs new file mode 100644 index 0000000..c604676 --- /dev/null +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs @@ -0,0 +1,79 @@ +using Amazon.SQS.Model; +using LocalPost; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Options; + +namespace LocalPost.SqsConsumer.DependencyInjection; + +public static class ServiceRegistration +{ +// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// string name, Handler handler) => +// services.AddAmazonSqsConsumer(name, _ => handler); +// +// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// string name, Func handler) where TDep1 : notnull => +// services.AddAmazonSqsConsumer(name, provider => (context, ct) => +// { +// var dep1 = provider.GetRequiredService(); +// +// return handler(dep1, context, ct); +// }); +// +// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// string name, Func handler) +// where TDep1 : notnull +// where TDep2 : notnull => +// services.AddAmazonSqsConsumer(name, provider => (context, ct) => +// { +// var dep1 = provider.GetRequiredService(); +// var dep2 = provider.GetRequiredService(); +// +// return handler(dep1, dep2, context, ct); +// }); +// +// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// string name, Func handler) +// where TDep1 : notnull +// where TDep2 : notnull +// where TDep3 : notnull => +// services.AddAmazonSqsConsumer(name, provider => (context, ct) => +// { +// var dep1 = provider.GetRequiredService(); +// var dep2 = provider.GetRequiredService(); +// var dep3 = provider.GetRequiredService(); +// +// return handler(dep1, dep2, dep3, context, ct); +// }); + +// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, +// string name) where THandler : IMessageHandler => +// services +// .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); + + public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, + string name, Action configure) + { + var builder = new Consumer.Builder(name); + configure(builder); + services.AddHostedService(builder.Build); + + services.TryAddSingleton(); + + services + .AddBackgroundQueueConsumer(name, b => b + .SetReaderFactory(provider => provider.GetRequiredService(name).Messages) + .MiddlewareStackBuilder.SetHandler(builder.BuildHandlerFactory())) + .Configure>( + (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); + + return services.AddOptions(name).Configure(options => options.QueueName = name); + } + +// public static IHealthChecksBuilder AddAmazonSqsConsumerHealthCheck(this IHealthChecksBuilder builder) +// { +// // TODO Add a global one... +// } +} diff --git a/src/LocalPost.SqsConsumer/ISqsMessageHandler.cs b/src/LocalPost.SqsConsumer/IMessageHandler.cs similarity index 55% rename from src/LocalPost.SqsConsumer/ISqsMessageHandler.cs rename to src/LocalPost.SqsConsumer/IMessageHandler.cs index 0b454a0..5cceb8d 100644 --- a/src/LocalPost.SqsConsumer/ISqsMessageHandler.cs +++ b/src/LocalPost.SqsConsumer/IMessageHandler.cs @@ -3,6 +3,6 @@ namespace LocalPost.SqsConsumer; -public interface ISqsMessageHandler : IMessageHandler +public interface IMessageHandler : IHandler { } diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index 7feb6cf..55ecf76 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -7,7 +7,7 @@ false LocalPost.SqsConsumer - background;task;queue;amazon;sns;aws + background;task;queue;amazon;sqs;aws Local (in-process) background queue for sending to Amazon SNS. Alexey Shokov diff --git a/src/LocalPost.SqsConsumer/ProcessedMessages.cs b/src/LocalPost.SqsConsumer/ProcessedMessages.cs deleted file mode 100644 index a5a4117..0000000 --- a/src/LocalPost.SqsConsumer/ProcessedMessages.cs +++ /dev/null @@ -1,47 +0,0 @@ -using System.Threading.Channels; -using Amazon.SQS; -using Amazon.SQS.Model; -using LocalPost; - -namespace LocalPost.SqsConsumer; - -internal sealed class ProcessedMessages : IBackgroundQueue, IAsyncEnumerable -{ - private readonly string _queueUrl; - private readonly Channel _messages = - Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false, - }); - - public ProcessedMessages(string queueUrl) - { - _queueUrl = queueUrl; - } - - public ValueTask Enqueue(Message item, CancellationToken ct = default) => _messages.Writer.WriteAsync( - new DeleteMessageBatchRequestEntry(Guid.NewGuid().ToString(), item.ReceiptHandle), ct); - - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) => - _messages.Reader.ReadAllAsync(ct).Batch(() => new SqsDeleteBatchBuilder(_queueUrl)).GetAsyncEnumerator(ct); -} - -internal sealed class ProcessedMessagesHandler : IMessageHandler -{ - private readonly IAmazonSQS _sqs; - - public ProcessedMessagesHandler(IAmazonSQS sqs) - { - _sqs = sqs; - } - - public async Task Process(DeleteMessageBatchRequest payload, CancellationToken ct) - { - var response = await _sqs.DeleteMessageBatchAsync(payload, ct); - if (response.Failed.Any()) - { - // TODO Log failures - } - } -} diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs new file mode 100644 index 0000000..3e31fbb --- /dev/null +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -0,0 +1,110 @@ +using System.Diagnostics; +using Amazon.SQS; +using Amazon.SQS.Model; +using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; + +namespace LocalPost.SqsConsumer; + +internal sealed class QueueClient +{ + private static readonly ActivitySource Tracer = new(typeof(QueueClient).Namespace); + + private readonly ILogger _logger; + private readonly IAmazonSQS _sqs; + private readonly ConsumerOptions _options; + + public QueueClient(ILogger logger, string name, IAmazonSQS sqs, IOptionsMonitor options) + { + _logger = logger; + _sqs = sqs; + _options = options.Get(name); + } + + private GetQueueAttributesResponse? _queueAttributes; + + // TODO Use + public int MessageVisibilityTimeout => _queueAttributes?.VisibilityTimeout switch + { + > 0 => _queueAttributes.VisibilityTimeout, + _ => _options.Timeout + }; + + private string? _queueUrl; + private string QueueUrl => _queueUrl ?? throw new InvalidOperationException("SQS queue client is not connected"); + + public async Task ConnectAsync(CancellationToken ct) + { + if (string.IsNullOrEmpty(_options.QueueUrl)) + // Checking for a possible error in the response would be also good... + _queueUrl = (await _sqs.GetQueueUrlAsync(_options.QueueName, ct)).QueueUrl; + + await FetchQueueAttributesAsync(ct); + } + + private async Task FetchQueueAttributesAsync(CancellationToken ct) + { + try + { + // Checking for a possible error in the response would be also good... + _queueAttributes = await _sqs.GetQueueAttributesAsync(QueueUrl, ConsumerOptions.AllAttributes, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + _logger.LogWarning(e, "Cannot fetch attributes for SQS {Queue}", _options.QueueName); + } + } + + public async Task DeleteMessageAsync(Message message, CancellationToken ct) + { + using var span = Tracer.StartActivity(); + + var response = await _sqs.DeleteMessageAsync(QueueUrl, message.ReceiptHandle, ct); + + // TODO Log failures?.. + } + + public async Task> PullMessagesAsync(CancellationToken ct) + { + using var span = Tracer.StartActivity(); + + var attributeNames = ConsumerOptions.AllAttributes; // TODO Configurable + var messageAttributeNames = ConsumerOptions.AllMessageAttributes; // TODO Configurable + + try + { + // AWS SDK handles network failures, see + // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html + var response = await _sqs.ReceiveMessageAsync(new ReceiveMessageRequest + { + QueueUrl = QueueUrl, + WaitTimeSeconds = _options.WaitTimeSeconds, + MaxNumberOfMessages = _options.MaxNumberOfMessages, + AttributeNames = attributeNames, + MessageAttributeNames = messageAttributeNames, + }, ct); + + // TODO Add number of received messages to the diagnostics span + return response.Messages + .Select(message => new ConsumeContext(_options.QueueName, QueueUrl, message)).ToArray(); + } +// catch (OverLimitException) +// { +// // TODO Handle +// } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + // FIXME Error handler + } + + return Array.Empty(); + } +} diff --git a/src/LocalPost.SqsConsumer/SqsConsumerOptions.cs b/src/LocalPost.SqsConsumer/SqsConsumerOptions.cs deleted file mode 100644 index 352b734..0000000 --- a/src/LocalPost.SqsConsumer/SqsConsumerOptions.cs +++ /dev/null @@ -1,106 +0,0 @@ -using System.Collections.Immutable; -using System.ComponentModel.DataAnnotations; -using Amazon.SQS; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; - -namespace LocalPost.SqsConsumer; - -/// -/// General SQS consumer settings -/// -public sealed record SqsConsumerOptions -{ - public static readonly ImmutableArray AllAttributes = ImmutableArray.Create("All"); - public static readonly ImmutableArray AllMessageAttributes = ImmutableArray.Create("All"); - - public const ushort DefaultTimeout = 30; - - /// - /// How many messages to process in parallel. - /// - [Required] public ushort MaxConcurrency { get; set; } = 10; - - [Required] public string QueueName { get; set; } = null!; - - /// - /// If not set, IAmazonSQS.GetQueueUrlAsync(QueueName) will be used once, to get the actual URL of the queue. - /// - [Url] public string? QueueUrl { get; set; } - - /// - /// Time to wait for available messages in the queue. - /// - /// - /// Amazon SQS short and long polling - /// - [Range(0, 60)] public byte WaitTimeSeconds { get; set; } = 20; - - [Range(1, 10)] public byte MaxNumberOfMessages { get; set; } = 10; - - /// - /// Message processing timeout, in seconds. If not set, IAmazonSQS.GetQueueAttributesAsync() will be used once, to get - /// VisibilityTimeout for the queue. - /// - /// - /// Amazon SQS visibility timeout - /// - [Range(1, 43200)] - public ushort Timeout { get; set; } -} - -internal sealed class SqsConsumerOptionsResolver : IPostConfigureOptions -{ - private readonly ILogger _logger; - private readonly IAmazonSQS _sqs; - - public SqsConsumerOptionsResolver(ILogger logger, IAmazonSQS sqs) - { - _logger = logger; - _sqs = sqs; - } - - public void PostConfigure(string name, SqsConsumerOptions options) - { - if (string.IsNullOrEmpty(options.QueueUrl)) - FetchQueueUrl(options).Wait(); - - if (options.Timeout == 0) // Try to get it from the SQS settings if not set - FetchVisibilityTimeout(options).Wait(); - } - - private async Task FetchQueueUrl(SqsConsumerOptions options) - { - try - { - // Checking possible errors in the response would be good - options.QueueUrl = (await _sqs.GetQueueUrlAsync(options.QueueName)).QueueUrl; - } - catch (Exception e) - { - // TODO Wrap in our own exception - throw new ArgumentException($"Cannot fetch SQS URL for {options.QueueName}", nameof(options), e); - } - } - - private async Task FetchVisibilityTimeout(SqsConsumerOptions options) - { - try - { - // Checking possible errors in the response would be good - var queueAttributes = await _sqs - .GetQueueAttributesAsync(options.QueueUrl, SqsConsumerOptions.AllAttributes.ToList()); - - options.Timeout = queueAttributes.VisibilityTimeout switch - { - > 0 => (ushort) queueAttributes.VisibilityTimeout, - _ => SqsConsumerOptions.DefaultTimeout - }; - } - catch (Exception e) - { - _logger.LogWarning(e, "Cannot fetch SQS attributes for {Queue}", options.QueueName); - options.Timeout = SqsConsumerOptions.DefaultTimeout; - } - } -} diff --git a/src/LocalPost.SqsConsumer/SqsDeleteBatchBuilder.cs b/src/LocalPost.SqsConsumer/SqsDeleteBatchBuilder.cs deleted file mode 100644 index d33294d..0000000 --- a/src/LocalPost.SqsConsumer/SqsDeleteBatchBuilder.cs +++ /dev/null @@ -1,30 +0,0 @@ -using Amazon.SQS.Model; - -namespace LocalPost.SqsConsumer; - -internal sealed class SqsDeleteBatchBuilder : BatchBuilder -{ - private readonly DeleteMessageBatchRequest _batchRequest; - - public SqsDeleteBatchBuilder(string queueUrl) : base(TimeSpan.FromSeconds(1)) // TODO Configurable - { - _batchRequest = new DeleteMessageBatchRequest { QueueUrl = queueUrl }; - } - - public override bool IsEmpty => _batchRequest.Entries.Count == 0; - - private bool CanFit(DeleteMessageBatchRequestEntry entry) => _batchRequest.Entries.Count <= 10; - - public override bool TryAdd(DeleteMessageBatchRequestEntry entry) - { - var canFit = CanFit(entry); - if (!canFit) - return false; - - _batchRequest.Entries.Add(entry); - - return true; - } - - public override DeleteMessageBatchRequest Build() => _batchRequest; -} diff --git a/src/LocalPost.SqsConsumer/SqsPuller.cs b/src/LocalPost.SqsConsumer/SqsPuller.cs deleted file mode 100644 index a8c10e9..0000000 --- a/src/LocalPost.SqsConsumer/SqsPuller.cs +++ /dev/null @@ -1,83 +0,0 @@ -using System.Collections.Immutable; -using Amazon.SQS; -using Amazon.SQS.Model; -using LocalPost; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.SqsConsumer; - -internal static partial class ServiceProviderExtensions -{ - public static SqsPuller GetSqs(this IServiceProvider provider, string name) => - provider.GetRequiredService()[name]; -} - -internal sealed class SqsAccessor -{ - private readonly IReadOnlyDictionary _registry; - - public SqsAccessor(IEnumerable registry) - { - _registry = registry.ToImmutableDictionary(x => x.Name, x => x); - } - - public SqsPuller this[string name] => _registry[name]; -} - -internal sealed class SqsPuller : IAsyncEnumerable -{ - private readonly IAmazonSQS _sqs; - private readonly SqsConsumerOptions _options; - - private readonly IBackgroundQueue _processedMessages; - - public SqsPuller(IAmazonSQS sqs, string name, IOptionsMonitor options) - { - _sqs = sqs; - _options = options.Get(name); - - var processedMessages = new ProcessedMessages(_options.QueueUrl); - _processedMessages = processedMessages; - ProcessedMessages = processedMessages; - - Name = name; - } - - public string Name { get; } - - public IAsyncEnumerable ProcessedMessages { get; } - - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) - { - var attributeNames = SqsConsumerOptions.AllAttributes.ToList(); // TODO Configurable - var messageAttributeNames = SqsConsumerOptions.AllMessageAttributes.ToList(); // TODO Configurable - - while (!ct.IsCancellationRequested) - { - // AWS SDK handles network failures, see - // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html - var receiveMessageResponse = await _sqs.ReceiveMessageAsync(new ReceiveMessageRequest - { - QueueUrl = _options.QueueUrl, - WaitTimeSeconds = _options.WaitTimeSeconds, - MaxNumberOfMessages = _options.MaxNumberOfMessages, - AttributeNames = attributeNames, - MessageAttributeNames = messageAttributeNames, - }, ct); - - foreach (var message in receiveMessageResponse.Messages) - yield return message; - } - } - - public MessageHandler Handler(MessageHandler handler) => async (payload, ct) => - { - await handler(payload, ct); - - // Won't be deleted in case of an exception in the handler - await _processedMessages.Enqueue(payload, ct); - - // Extend message's VisibilityTimeout in case of long processing?.. - }; -} diff --git a/src/LocalPost/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue.cs index 2100f38..cd708a3 100644 --- a/src/LocalPost/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue.cs @@ -12,18 +12,31 @@ public interface IBackgroundQueueReader public ChannelReader Reader { get; } } -public interface IMessageHandler +public interface IHandler { - Task Process(TOut payload, CancellationToken ct); + Task InvokeAsync(TOut payload, CancellationToken ct); } -public delegate Task MessageHandler(T context, CancellationToken ct); +public interface IMiddleware +{ + Handler Invoke(Handler next); +} + +public delegate Task Handler(T context, CancellationToken ct); + +public delegate Handler HandlerFactory(IServiceProvider provider); + +public delegate Handler Middleware(Handler next); +//public delegate Task Middleware(T context, Handler next, CancellationToken ct); + +public delegate Middleware MiddlewareFactory(IServiceProvider provider); // Simplest background queue -public sealed class BackgroundQueue : IBackgroundQueue, IAsyncEnumerable +public sealed partial class BackgroundQueue : IBackgroundQueue, IAsyncEnumerable { + // TODO Bounded version (1000 by default), overflow should be dropped with a log message private readonly Channel _messages = Channel.CreateUnbounded(new UnboundedChannelOptions { SingleReader = false, diff --git a/src/LocalPost/BackgroundQueueBuilder.cs b/src/LocalPost/BackgroundQueueBuilder.cs new file mode 100644 index 0000000..4698580 --- /dev/null +++ b/src/LocalPost/BackgroundQueueBuilder.cs @@ -0,0 +1,45 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; + +namespace LocalPost; + +public sealed partial class BackgroundQueue +{ + public sealed class Builder + { + private Func>? _readerFactory; + + public Builder(string name) + { + Name = name; + } + + public string Name { get; } + + public MiddlewareStackBuilder MiddlewareStackBuilder { get; } = new(); + + public Builder SetReaderFactory(Func> factory) + { + _readerFactory = factory; + + return this; + } + + private HandlerFactory BuildHandlerFactory() => + MiddlewareStackBuilder.Build().Resolve; + + internal IHostedService Build(IServiceProvider provider) + { + // TODO Custom exception + var readerFactory = _readerFactory ?? throw new Exception($"Reader factory is required"); + + var executor = ActivatorUtilities.CreateInstance(provider, Name); + var consumer = ActivatorUtilities.CreateInstance>(provider, Name, + readerFactory(provider), executor, BuildHandlerFactory()); + var consumerSupervisor = ActivatorUtilities.CreateInstance>>(provider, Name, + consumer); + + return consumerSupervisor; + } + } +} diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 6b645f9..8054328 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -1,91 +1,71 @@ using System.Threading.Channels; using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; namespace LocalPost; -internal sealed class BackgroundQueueConsumer : BackgroundService +internal sealed class BackgroundQueueConsumer : IBackgroundService { private readonly ILogger> _logger; - private readonly string _name; private readonly IServiceScopeFactory _scopeFactory; private readonly IAsyncEnumerable _reader; private readonly IExecutor _executor; - private readonly Func> _handlerFactory; + private readonly Func> _handlerFactory; public BackgroundQueueConsumer(string name, ILogger> logger, IServiceScopeFactory scopeFactory, - IExecutor executor, IAsyncEnumerable reader, Func> handlerFactory) + IExecutor executor, IAsyncEnumerable reader, Func> handlerFactory) { + Name = name; _logger = logger; - _name = name; _scopeFactory = scopeFactory; _reader = reader; _executor = executor; _handlerFactory = handlerFactory; } - protected override async Task ExecuteAsync(CancellationToken stoppingToken) - { - _logger.LogInformation("Starting {Name} background queue...", _name); + public string Name { get; } + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + public async Task ExecuteAsync(CancellationToken ct) + { try { - await foreach (var message in _reader.WithCancellation(stoppingToken)) - await _executor.StartAsync(() => Process(message, stoppingToken), stoppingToken); - } - catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) - { - // The rest of the queue will be processed in StopAsync() below - _logger.LogInformation("Application exit has been requested, stopping {Name} background queue...", _name); + await foreach (var message in _reader.WithCancellation(ct)) + await _executor.StartAsync(() => Process(message, ct), ct); } catch (ChannelClosedException e) { - _logger.LogWarning(e, "{Name} queue has been closed, stop listening", _name); + // TODO isRunning above... + _logger.LogWarning(e, "{Name} queue has been closed, stop listening", Name); // The rest of the queue will be processed in StopAsync() below } - catch (Exception e) - { - // Custom error handler?.. - _logger.LogCritical(e, "Unhandled exception, stop listening"); - } } - public override async Task StopAsync(CancellationToken forceExitToken) + public async Task StopAsync(CancellationToken forceExitToken) { - await base.StopAsync(forceExitToken); - - var enumerator = _reader.GetAsyncEnumerator(forceExitToken); - var move = enumerator.MoveNextAsync(); - var completed = false; - do + try { - // Suck out all the _available_ messages - while (move.IsCompleted) - { - completed = !await move; - if (completed) - break; - - await _executor.StartAsync(() => Process(enumerator.Current, forceExitToken), forceExitToken); - - move = enumerator.MoveNextAsync(); - } - - if (_executor.IsEmpty) - // It means that nothing has been started (no messages read), so we are finally done - break; + // TODO An option to NOT process the rest of the messages... + await foreach (var message in _reader.WithCancellation(forceExitToken)) + await _executor.StartAsync(() => Process(message, forceExitToken), forceExitToken); + } + catch (ChannelClosedException e) + { + // TODO Do something? + } - // Wait until all currently running tasks are finished - await _executor.WaitAsync(forceExitToken); - } while (!completed); + // Wait until all currently running tasks are finished + await _executor.WaitAsync(forceExitToken); } private async Task Process(T message, CancellationToken ct) { + // TODO Tracing... + using var scope = _scopeFactory.CreateScope(); // Make it specific for this queue somehow?.. @@ -102,7 +82,7 @@ private async Task Process(T message, CancellationToken ct) } catch (Exception e) { - _logger.LogError(e, "{Name} queue: unhandled exception while processing a message", _name); + _logger.LogError(e, "{Queue}: unhandled exception while processing a message", Name); } } } diff --git a/src/LocalPost/BackgroundServiceSupervisor.cs b/src/LocalPost/BackgroundServiceSupervisor.cs new file mode 100644 index 0000000..5a6d27a --- /dev/null +++ b/src/LocalPost/BackgroundServiceSupervisor.cs @@ -0,0 +1,165 @@ +using System.Diagnostics.CodeAnalysis; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; +using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; + +namespace LocalPost; + +internal interface IBackgroundService : INamedService +{ + Task StartAsync(CancellationToken ct); + + Task ExecuteAsync(CancellationToken ct); + + Task StopAsync(CancellationToken ct); +} + +internal sealed class BackgroundServiceSupervisor : IHostedService, INamedService, IDisposable + where T : class, IBackgroundService +{ + public sealed class LivenessCheck : IHealthCheck + { + private readonly BackgroundServiceSupervisor _supervisor; + + public LivenessCheck(BackgroundServiceSupervisor supervisor) + { + _supervisor = supervisor; + } + + public Task CheckHealthAsync(HealthCheckContext context, + CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); + + private HealthCheckResult CheckHealth(HealthCheckContext _) + { + if (_supervisor.Crashed) + return Unhealthy($"{_supervisor.Name} has crashed", _supervisor.Exception); + + if (_supervisor is { Started: true, Running: false }) + return Unhealthy($"{_supervisor.Name} is not running"); + + // Starting or running + return Healthy($"{_supervisor.Name} is alive"); + } + } + + public sealed class ReadinessCheck : IHealthCheck + { + private readonly BackgroundServiceSupervisor _supervisor; + + public ReadinessCheck(BackgroundServiceSupervisor supervisor) + { + _supervisor = supervisor; + } + + public Task CheckHealthAsync(HealthCheckContext context, + CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); + + private HealthCheckResult CheckHealth(HealthCheckContext context) + { + if (!_supervisor.Started) + return Unhealthy($"{_supervisor.Name} has not been started yet", _supervisor.Exception); + + if (_supervisor.Crashed) + return Unhealthy($"{_supervisor.Name} has crashed", _supervisor.Exception); + + return Healthy($"{_supervisor.Name} is running"); + } + } + + private readonly ILogger> _logger; + + private CancellationTokenSource? _executionCts; + private Task? _execution; + + public BackgroundServiceSupervisor(ILogger> logger, T service) + { + _logger = logger; + Service = service; + } + + public T Service { get; } + + public string Name => Service.Name; + + public bool Started => _executionCts is not null && _execution is not null; + + public bool Running => _execution is not null && _execution.IsCompleted; + + [MemberNotNullWhen(true, nameof(Exception))] + public bool Crashed => Exception is not null; + + public Exception? Exception { get; private set; } + + public async Task StartAsync(CancellationToken ct) + { + if (_executionCts is not null) + throw new InvalidOperationException("Service has been already started"); + + _executionCts = new CancellationTokenSource(); + + try + { + await Service.StartAsync(ct).ConfigureAwait(false); + + // Store the task we're executing + _execution = ExecuteAsync(_executionCts.Token); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + _logger.LogInformation("{Name} start has been aborted", Name); + } + catch (Exception e) + { + Exception = e; + _logger.LogCritical(e, "Unhandled exception while starting {Name} background queue", Name); + } + } + + private async Task ExecuteAsync(CancellationToken stoppingToken) + { + // In case stop has been already requested + if (stoppingToken.IsCancellationRequested) + return; + + try + { + await Service.ExecuteAsync(stoppingToken); + } + catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) + { + // The rest of the queue will be processed in StopAsync() below + _logger.LogInformation("Application exit has been requested, stopping {Name} background queue...", Name); + } + catch (Exception e) + { + Exception = e; + _logger.LogCritical(e, "Unhandled exception in {Name} background queue", Name); + } + } + + public async Task StopAsync(CancellationToken forceExitToken) + { + try + { + // Signal cancellation to the executing method + _executionCts?.Cancel(); + } + finally + { + if (_execution is not null) + // Wait until the execution completes or the app is forced to exit + await Task.WhenAny(_execution, Task.Delay(Timeout.Infinite, forceExitToken)).ConfigureAwait(false); + } + + await Service.StopAsync(forceExitToken).ConfigureAwait(false); + } + + public void Dispose() + { + _executionCts?.Cancel(); + if (Service is IDisposable disposableService) + disposableService.Dispose(); + } +} diff --git a/src/LocalPost/DependencyInjection/ConfigurationExtensions.cs b/src/LocalPost/DependencyInjection/Configuration.cs similarity index 85% rename from src/LocalPost/DependencyInjection/ConfigurationExtensions.cs rename to src/LocalPost/DependencyInjection/Configuration.cs index b3925d2..0bc12a8 100644 --- a/src/LocalPost/DependencyInjection/ConfigurationExtensions.cs +++ b/src/LocalPost/DependencyInjection/Configuration.cs @@ -1,11 +1,10 @@ -using Microsoft.Extensions.Configuration; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; // TODO Open to public later -internal static class ConfigurationExtensions +internal static class Configuration { public static OptionsBuilder AddBackgroundQueueOptions(this IServiceCollection services) => services.AddOptions(Reflection.FriendlyNameOf>()); diff --git a/src/LocalPost/DependencyInjection/CustomQueueRegistrationExtensions.cs b/src/LocalPost/DependencyInjection/CustomQueueRegistrationExtensions.cs deleted file mode 100644 index 0378885..0000000 --- a/src/LocalPost/DependencyInjection/CustomQueueRegistrationExtensions.cs +++ /dev/null @@ -1,37 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.DependencyInjection; - -public static class CustomQueueRegistrationExtensions -{ - // TReader & THandler have to be registered by the user - public static OptionsBuilder AddCustomBackgroundQueue( - this IServiceCollection services) where TReader : IAsyncEnumerable where THandler : IMessageHandler => - services.AddCustomBackgroundQueue(provider => provider.GetRequiredService().Process); - - // TReader has to be registered by the user - public static OptionsBuilder AddCustomBackgroundQueue(this IServiceCollection services, - Func> handlerFactory) - where TReader : IAsyncEnumerable => - services.AddCustomBackgroundQueue(Reflection.FriendlyNameOf(), - provider => provider.GetRequiredService(), handlerFactory); - - public static OptionsBuilder AddCustomBackgroundQueue(this IServiceCollection services, string name, - Func> readerFactory, - Func> handlerFactory) - { - // TODO Try...() version of this one, to be gentle with multiple registrations of the same queue?.. - services.AddHostedService(provider => - { - var executor = ActivatorUtilities.CreateInstance(provider, name); - - return ActivatorUtilities.CreateInstance>(provider, name, - readerFactory(provider), executor, handlerFactory); - }); - - // TODO Health check, metrics - - return services.AddOptions(name);; - } -} diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs new file mode 100644 index 0000000..580134c --- /dev/null +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -0,0 +1,40 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.DependencyInjection; + +public static class HealthChecks +{ + public static IHealthChecksBuilder AddBackgroundQueueConsumerReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => + builder.AddBackgroundServiceReadinessCheck>(name, failureStatus, tags, timeout); + + public static IHealthChecksBuilder AddBackgroundQueueConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => + builder.AddBackgroundServiceLivenessCheck>(name, failureStatus, tags, timeout); + + internal static IHealthChecksBuilder AddBackgroundServiceReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) where T : class, IBackgroundService => + builder.Add(new HealthCheckRegistration( + name, + provider => ActivatorUtilities.CreateInstance.ReadinessCheck>(provider, + provider.GetSupervisor(name)), + failureStatus, + tags, + timeout)); + + internal static IHealthChecksBuilder AddBackgroundServiceLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) where T : class, IBackgroundService => + builder.Add(new HealthCheckRegistration( + name, + provider => ActivatorUtilities.CreateInstance.LivenessCheck>(provider, + provider.GetSupervisor(name)), + failureStatus, + tags, + timeout)); +} diff --git a/src/LocalPost/DependencyInjection/INamedService.cs b/src/LocalPost/DependencyInjection/INamedService.cs new file mode 100644 index 0000000..59c6dd2 --- /dev/null +++ b/src/LocalPost/DependencyInjection/INamedService.cs @@ -0,0 +1,6 @@ +namespace LocalPost.DependencyInjection; + +internal interface INamedService +{ + string Name { get; } +} diff --git a/src/LocalPost/DependencyInjection/JobQueueRegistrationExtensions.cs b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs similarity index 69% rename from src/LocalPost/DependencyInjection/JobQueueRegistrationExtensions.cs rename to src/LocalPost/DependencyInjection/JobQueueRegistration.cs index a9eb008..48f79fe 100644 --- a/src/LocalPost/DependencyInjection/JobQueueRegistrationExtensions.cs +++ b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs @@ -4,13 +4,14 @@ namespace LocalPost.DependencyInjection; -public static class JobQueueRegistrationExtensions +public static class JobQueueRegistration { public static OptionsBuilder AddBackgroundJobQueue(this IServiceCollection services) { services.TryAddSingleton(); services.TryAddSingleton(provider => provider.GetRequiredService()); - return services.AddCustomBackgroundQueue(_ => (job, ct) => job(ct)); + return services.AddBackgroundQueueConsumer(builder => + builder.MiddlewareStackBuilder.SetHandler((job, ct) => job(ct))); } } diff --git a/src/LocalPost/DependencyInjection/QueueRegistration.cs b/src/LocalPost/DependencyInjection/QueueRegistration.cs new file mode 100644 index 0000000..758e75b --- /dev/null +++ b/src/LocalPost/DependencyInjection/QueueRegistration.cs @@ -0,0 +1,52 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Options; + +namespace LocalPost.DependencyInjection; + +public static class QueueRegistration +{ + // THandler has to be registered by the user + public static OptionsBuilder AddBackgroundQueue(this IServiceCollection services, + Action.Builder>? configure = null) where THandler : IHandler => + services.AddBackgroundQueue(builder => builder.MiddlewareStackBuilder.SetHandler()); + + public static OptionsBuilder AddBackgroundQueue(this IServiceCollection services, + Action.Builder> configure) + { + services.TryAddSingleton>(); + services.TryAddSingleton>(provider => provider.GetRequiredService>()); + + return services.AddBackgroundQueueConsumer>(configure); + } + + + +// // TReader & THandler have to be registered by the user +// public static OptionsBuilder AddBackgroundQueueConsumer( +// this IServiceCollection services) where TReader : IAsyncEnumerable where THandler : IHandler => +// services.AddCustomBackgroundQueue(provider => provider.GetRequiredService().InvokeAsync); + + // TReader has to be registered by the user + public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, + Action.Builder> configure) where TReader : IAsyncEnumerable => + services.AddBackgroundQueueConsumer(Reflection.FriendlyNameOf(), builder => configure( + builder.SetReaderFactory(provider => provider.GetRequiredService()))); + + public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, + Action.Builder> configure) => + services.AddBackgroundQueueConsumer(Reflection.FriendlyNameOf(), configure); + + public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, + string name, Action.Builder> configure) + { + var builder = new BackgroundQueue.Builder(name); + configure(builder); + + // TODO Try...() version of this one, to be gentle with multiple registrations of the same queue + // (extend ServiceDescriptor, add name to it and search using it) + services.AddHostedService(builder.Build); + + return services.AddOptions(name); + } +} diff --git a/src/LocalPost/DependencyInjection/QueueRegistrationExtensions.cs b/src/LocalPost/DependencyInjection/QueueRegistrationExtensions.cs deleted file mode 100644 index 567a90a..0000000 --- a/src/LocalPost/DependencyInjection/QueueRegistrationExtensions.cs +++ /dev/null @@ -1,23 +0,0 @@ -using Microsoft.Extensions.Configuration; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.DependencyInjection; - -public static class QueueRegistrationExtensions -{ - // THandler has to be registered by the user - public static OptionsBuilder AddBackgroundQueue(this IServiceCollection services) - where THandler : IMessageHandler => - services.AddBackgroundQueue(provider => provider.GetRequiredService().Process); - - public static OptionsBuilder AddBackgroundQueue(this IServiceCollection services, - Func> handlerFactory) - { - services.TryAddSingleton>(); - services.TryAddSingleton>(provider => provider.GetRequiredService>()); - - return services.AddCustomBackgroundQueue>(handlerFactory); - } -} diff --git a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs new file mode 100644 index 0000000..22ba9f1 --- /dev/null +++ b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs @@ -0,0 +1,15 @@ +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost.DependencyInjection; + +internal static class ServiceProviderLookups +{ + public static T GetRequiredService(this IServiceProvider provider, string name) + where T : INamedService + { + return provider.GetRequiredService>().First(x => x.Name == name); + } + + public static BackgroundServiceSupervisor GetSupervisor(this IServiceProvider provider, string name) + where T : class, IBackgroundService => provider.GetRequiredService>(name); +} diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index ceabac6..8bb797d 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -43,14 +43,18 @@ + + + + @@ -62,6 +66,9 @@ <_Parameter1>$(MSBuildProjectName).SqsConsumer + + <_Parameter1>$(MSBuildProjectName).KafkaConsumer + diff --git a/src/LocalPost/MiddlewareStack.cs b/src/LocalPost/MiddlewareStack.cs new file mode 100644 index 0000000..29f8c0e --- /dev/null +++ b/src/LocalPost/MiddlewareStack.cs @@ -0,0 +1,26 @@ +using System.Collections.Immutable; + +namespace LocalPost; + +public sealed class MiddlewareStack +{ + private readonly HandlerFactory _handlerFactory; + private readonly ImmutableArray> _middlewares; + + public MiddlewareStack(HandlerFactory handlerFactory, IEnumerable>? middlewares = null) + { + _handlerFactory = handlerFactory; + _middlewares = middlewares?.ToImmutableArray() ?? ImmutableArray>.Empty; + } + + public Handler Resolve(IServiceProvider provider) + { + var middlewares = _middlewares.Select(factory => factory(provider)); + + var handler = _handlerFactory(provider); + foreach (var middleware in middlewares) // TODO Reverse? + handler = middleware(handler); + + return handler; + } +} diff --git a/src/LocalPost/MiddlewareStackBuilder.cs b/src/LocalPost/MiddlewareStackBuilder.cs new file mode 100644 index 0000000..e54a5bc --- /dev/null +++ b/src/LocalPost/MiddlewareStackBuilder.cs @@ -0,0 +1,52 @@ +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost; + +public sealed class MiddlewareStackBuilder : MiddlewareStackBuilder> +{ +} + +public abstract class MiddlewareStackBuilder + where TBuilder : MiddlewareStackBuilder +{ + protected readonly List> Middlewares = new(); + protected HandlerFactory HandlerFactory = _ => (c, ct) => Task.CompletedTask; + + public TBuilder SetHandler(Handler handler) => SetHandler(_ => handler); + + public TBuilder SetHandler() where THandler : IHandler => + SetHandler(provider => provider.GetRequiredService().InvokeAsync); + + public TBuilder SetHandler(HandlerFactory factory) + { + HandlerFactory = factory; + + return (TBuilder) this; + } + + // public TBuilder Append() where TMiddleware : IHandler +// { +// Middlewares.Add(provider => next => ActivatorUtilities.CreateInstance(provider, next).InvokeAsync); +// +// return (TBuilder) this; +// } + + public TBuilder Append(Middleware middleware) => + Append(_ => middleware); + + public TBuilder Append() where TMiddleware : IMiddleware + { + Middlewares.Add(provider => provider.GetRequiredService().Invoke); + + return (TBuilder) this; + } + + public TBuilder Append(MiddlewareFactory factory) + { + Middlewares.Add(factory); + + return (TBuilder) this; + } + + internal MiddlewareStack Build() => new(HandlerFactory, Middlewares); +} diff --git a/src/LocalPost/RecordsSupport.cs b/src/LocalPost/RecordsSupport.cs deleted file mode 100644 index 0bc7e05..0000000 --- a/src/LocalPost/RecordsSupport.cs +++ /dev/null @@ -1,10 +0,0 @@ -// ReSharper disable once CheckNamespace -namespace System.Runtime.CompilerServices; - -using ComponentModel; - -// See https://bit.ly/3xSzC0Q -[EditorBrowsable(EditorBrowsableState.Never)] -internal static class IsExternalInit -{ -} From 4f6eb8b29f535dbfe273450ffc956f234889ebf2 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sun, 14 May 2023 17:43:38 +0000 Subject: [PATCH 02/33] chore: let's replace Executor with multiple channel consumers, again --- .editorconfig | 12 +++ samples/AmazonSqsApp/Program.cs | 28 +++--- .../LocalPost.Azure.QueueConsumer.csproj | 2 +- src/LocalPost.KafkaConsumer/Builder.cs | 69 ++++++++++++++ src/LocalPost.KafkaConsumer/ConsumeContext.cs | 1 + .../ConsumerBuilder.cs | 89 ------------------- .../ConsumerOptions.cs | 17 ---- .../DependencyInjection/HealthChecks.cs | 23 +++++ .../ServiceCollectionExtensions.cs | 25 ------ .../ServiceRegistration.cs | 25 ++++++ src/LocalPost.KafkaConsumer/HealthCheck.cs | 22 ----- .../LocalPost.KafkaConsumer.csproj | 2 +- .../{Consumer.cs => MessageSource.cs} | 39 ++++---- src/LocalPost.KafkaConsumer/Options.cs | 13 +++ .../DependencyInjection/HealthChecks.cs | 26 ++++++ .../ServiceCollectionExtensions.cs | 27 ------ .../ServiceRegistration.cs | 34 +++++++ ...sions.cs => PublishBatchRequestEntryEx.cs} | 2 +- src/LocalPost.SnsPublisher/Publisher.cs | 65 +++++++------- .../PublisherOptions.cs | 4 + src/LocalPost.SnsPublisher/Sender.cs | 4 +- .../TopicPublishRequests.cs | 35 ++++++++ .../{ConsumerBuilder.cs => Builder.cs} | 23 ++--- .../ConsumerMiddleware.cs | 4 +- .../DependencyInjection/HealthChecks.cs | 6 +- .../ServiceRegistration.cs | 49 ++++++---- .../{Consumer.cs => MessageSource.cs} | 17 ++-- .../{ConsumerOptions.cs => Options.cs} | 2 +- src/LocalPost.SqsConsumer/QueueClient.cs | 27 +++--- ...syncEnumerable.cs => AsyncEnumerableEx.cs} | 0 src/LocalPost/BackgroundJobQueue.cs | 23 ++--- src/LocalPost/BackgroundQueue.cs | 74 ++++++++++++--- src/LocalPost/BackgroundQueueBuilder.cs | 17 ++-- src/LocalPost/BackgroundQueueConsumer.cs | 18 ++-- src/LocalPost/BackgroundQueueSupervisor.cs | 30 +++++++ src/LocalPost/BackgroundService.cs | 12 +++ src/LocalPost/BackgroundServiceSupervisor.cs | 45 +++++----- .../DependencyInjection/HealthChecks.cs | 27 ++++-- .../JobQueueRegistration.cs | 4 +- .../DependencyInjection/QueueRegistration.cs | 54 ++++++----- src/LocalPost/Executor.cs | 3 +- src/LocalPost/OptionsEx.cs | 9 ++ src/LocalPost/QueueOptions.cs | 23 ++++- .../LocalPost.SqsConsumer.Tests.csproj | 2 + 44 files changed, 632 insertions(+), 401 deletions(-) create mode 100644 src/LocalPost.KafkaConsumer/Builder.cs delete mode 100644 src/LocalPost.KafkaConsumer/ConsumerBuilder.cs delete mode 100644 src/LocalPost.KafkaConsumer/ConsumerOptions.cs create mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs delete mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs create mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs delete mode 100644 src/LocalPost.KafkaConsumer/HealthCheck.cs rename src/LocalPost.KafkaConsumer/{Consumer.cs => MessageSource.cs} (69%) create mode 100644 src/LocalPost.KafkaConsumer/Options.cs create mode 100644 src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs delete mode 100644 src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs create mode 100644 src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs rename src/LocalPost.SnsPublisher/{PublishBatchRequestEntryExtensions.cs => PublishBatchRequestEntryEx.cs} (82%) create mode 100644 src/LocalPost.SnsPublisher/TopicPublishRequests.cs rename src/LocalPost.SqsConsumer/{ConsumerBuilder.cs => Builder.cs} (56%) rename src/LocalPost.SqsConsumer/{Consumer.cs => MessageSource.cs} (75%) rename src/LocalPost.SqsConsumer/{ConsumerOptions.cs => Options.cs} (98%) rename src/LocalPost/{AsyncEnumerable.cs => AsyncEnumerableEx.cs} (100%) create mode 100644 src/LocalPost/BackgroundQueueSupervisor.cs create mode 100644 src/LocalPost/BackgroundService.cs create mode 100644 src/LocalPost/OptionsEx.cs diff --git a/.editorconfig b/.editorconfig index b10163a..f48bc7e 100644 --- a/.editorconfig +++ b/.editorconfig @@ -33,3 +33,15 @@ indent_size = 4 [*.md] trim_trailing_whitespace = true insert_final_newline = true + +# See https://github.com/dotnet/aspnetcore/blob/main/.editorconfig +[*.{cs,vb}] + +# CA2007: Consider calling ConfigureAwait on the awaited task +#dotnet_diagnostic.CA2007.severity = warning + +# CA2012: Use ValueTask correctly +dotnet_diagnostic.CA2012.severity = warning + +# CA2013: Do not use ReferenceEquals with value types +dotnet_diagnostic.CA2013.severity = warning diff --git a/samples/AmazonSqsApp/Program.cs b/samples/AmazonSqsApp/Program.cs index eed557c..26c2c48 100644 --- a/samples/AmazonSqsApp/Program.cs +++ b/samples/AmazonSqsApp/Program.cs @@ -8,14 +8,14 @@ var builder = WebApplication.CreateBuilder(args); - // A background queue with an inline handler -builder.Services.AddBackgroundQueue(_ => async (w, ct) => -{ - await Task.Delay(TimeSpan.FromSeconds(2), ct); - Console.WriteLine(w.Summary); -}); - +builder.Services.AddBackgroundQueue( + // TODO Automatically add the health checks?.. + async (weather, ct) => + { + await Task.Delay(TimeSpan.FromSeconds(2), ct); + Console.WriteLine(weather.Summary); + }); // An async Amazon SNS sender, buffers messages and sends them in batches in the background @@ -23,15 +23,14 @@ builder.Services.AddAmazonSnsBatchPublisher(); - // An Amazon SQS consumer builder.Services.AddAWSService(); -builder.Services.AddAmazonSqsMinimalConsumer("test", async (context, ct) => -{ - await Task.Delay(1_000, ct); - Console.WriteLine(context.Body); -}); - +builder.Services.AddAmazonSqsConsumer("test", + async (context, ct) => + { + await Task.Delay(1_000, ct); + Console.WriteLine(context.Message.Body); + }); builder.Services.AddControllers(); @@ -45,6 +44,7 @@ app.UseSwagger(); app.UseSwaggerUI(); } + app.UseHttpsRedirection(); app.UseAuthorization(); app.MapControllers(); diff --git a/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj b/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj index d08b6f3..0062328 100644 --- a/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj +++ b/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj @@ -40,7 +40,7 @@ - + diff --git a/src/LocalPost.KafkaConsumer/Builder.cs b/src/LocalPost.KafkaConsumer/Builder.cs new file mode 100644 index 0000000..4825c10 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/Builder.cs @@ -0,0 +1,69 @@ +using System.Threading.Channels; +using Confluent.Kafka; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.KafkaConsumer; + +public static partial class MessageSource +{ + public sealed class Builder + { + private Action> _configure = (cb) => { }; + + private readonly Channel> _queue = + // Kafka client (librdkafka) is optimised to prefetch messages, so there is no need to maintain our own + // buffer + Channel.CreateBounded>(new BoundedChannelOptions(1) + { + SingleWriter = true, + SingleReader = true + }); + + public Builder(string name) + { + Name = name; + } + + public string Name { get; } + + internal IAsyncEnumerable> Messages => _queue.Reader.ReadAllAsync(); + + public MiddlewareStackBuilder> MiddlewareStackBuilder { get; } = new(); + + // TODO Implement +// public Builder SetMessageHandler(Handler> handler) +// { +// _handler = (c, ct) => handler(c.Result.Message, ct); +// +// return this; +// } + + // Allows to configure message format (Avro/JSON/Protobuf/etc.) and other things + public Builder ConfigureKafkaClient(Action> configure) + { + _configure = configure; + + return this; + } + + internal HandlerFactory> BuildHandlerFactory() => + MiddlewareStackBuilder.Build().Resolve; + + internal BackgroundServiceSupervisor Build(IServiceProvider provider) + { + var clientConfig = provider.GetRequiredService>().Get(Name); + + var clientBuilder = new ConsumerBuilder(clientConfig.Kafka); + _configure(clientBuilder); + + var kafkaClient = clientBuilder.Build(); + var consumer = ActivatorUtilities.CreateInstance(provider, Name, clientConfig, kafkaClient); + + var supervisor = ActivatorUtilities + .CreateInstance>(provider, consumer); + + return supervisor; + } + } +} diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 413201b..0b70d8e 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -4,6 +4,7 @@ namespace LocalPost.KafkaConsumer; public readonly record struct ConsumeContext { + // To commit the offset manually, for example. Do we really need to support these complex use cases?.. public required IConsumer Client { get; init; } public required ConsumeResult Result { get; init; } diff --git a/src/LocalPost.KafkaConsumer/ConsumerBuilder.cs b/src/LocalPost.KafkaConsumer/ConsumerBuilder.cs deleted file mode 100644 index 76c7e7c..0000000 --- a/src/LocalPost.KafkaConsumer/ConsumerBuilder.cs +++ /dev/null @@ -1,89 +0,0 @@ -using System.Threading.Channels; -using Confluent.Kafka; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.KafkaConsumer; - -public static partial class Consumer -{ - public sealed class Builder - { - private Action> _configure = (cb) => { }; - - private Handler _errorHandler = (e, ct) => Task.CompletedTask; - - private Handler> _handler = (c, ct) => Task.CompletedTask; - - private readonly List>> _middlewares = new(); - - private readonly Channel> _queue = - // Kafka client (librdkafka) is optimised to prefetch messages, so there is no need to maintain our own - // buffer - Channel.CreateBounded>(new BoundedChannelOptions(1) - { - SingleWriter = true, - SingleReader = true - // TODO AllowSynchronousContinuations?.. - }); - - public required string Name { get; init; } - - public Builder ConfigureKafkaClient(Action> configure) - { - _configure = configure; - - return this; - } - - // TODO Remove - public Builder SetErrorHandler(Handler handler) - { - _errorHandler = handler; - - return this; - } - - public Builder SetMessageHandler(Handler> handler) - { - _handler = (c, ct) => handler(c.Result.Message, ct); - - return this; - } - - // FIXME Take from the container... -// public Builder SetMessageHandler(THandler handler) -// where THandler : IMessageHandler> => -// SetMessageHandler(handler.Process); - - public Builder SetMessageHandler(Handler handler) => - SetMessageHandler((m, ct) => handler(m.Value, ct)); - - public Builder AddMiddleware(MiddlewareFactory> factory) - { - _middlewares.Add(factory); - - return this; - } - - public Builder AddMiddleware(Middleware> middleware) => - AddMiddleware(_ => middleware); - - internal HandlerFactory> BuildHandlerFactory() => - new MiddlewareStack>(_handler, _middlewares).Resolve; - - internal IAsyncEnumerable> Messages => _queue.Reader.ReadAllAsync(); - - internal Service Build(IServiceProvider provider) - { - var clientConfig = provider.GetRequiredService>().Get(Name); - - var clientBuilder = new ConsumerBuilder(clientConfig); - _configure(clientBuilder); - - var kafkaClient = clientBuilder.Build(); - - return ActivatorUtilities.CreateInstance(provider, clientConfig, Name, kafkaClient, _errorHandler); - } - } -} diff --git a/src/LocalPost.KafkaConsumer/ConsumerOptions.cs b/src/LocalPost.KafkaConsumer/ConsumerOptions.cs deleted file mode 100644 index c18218d..0000000 --- a/src/LocalPost.KafkaConsumer/ConsumerOptions.cs +++ /dev/null @@ -1,17 +0,0 @@ -using System.ComponentModel.DataAnnotations; -using Confluent.Kafka; - -namespace LocalPost.KafkaConsumer; - -/// -/// General Azure Storage Queue consumer settings -/// -public sealed class ConsumerOptions : ConsumerConfig -{ - /// - /// How many messages to process in parallel. - /// - [Required] public ushort MaxConcurrency { get; set; } = 10; - - [Required] public string TopicName { get; set; } = null!; -} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs new file mode 100644 index 0000000..e821eef --- /dev/null +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs @@ -0,0 +1,23 @@ +using System.Diagnostics.CodeAnalysis; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.KafkaConsumer.DependencyInjection; + +public static class HealthChecks +{ + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddKafkaConsumerReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceReadinessCheck.Service>(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerReadinessCheck>(name, failureStatus, tags, timeout); + + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceLivenessCheck.Service>(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerLivenessCheck>(name, failureStatus, tags, timeout); +} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs deleted file mode 100644 index a5f66e9..0000000 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionExtensions.cs +++ /dev/null @@ -1,25 +0,0 @@ -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.KafkaConsumer.DependencyInjection; - -public static class ServiceCollectionExtensions -{ - public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, - string name, Action.Builder> configure) - { - var builder = new Consumer.Builder { Name = name }; - configure(builder); - - services.AddHostedService(provider => builder.Build(provider)); - services - .AddCustomBackgroundQueue($"Kafka/{name}", _ => builder.Messages, builder.BuildHandlerFactory()) - .Configure>( - (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); - - // TODO Health check, metrics (with all topics for this consumer... (it can more than 1)) - - return services.AddOptions(name).Configure(options => options.TopicName = name); - } -} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs new file mode 100644 index 0000000..4e5fa6c --- /dev/null +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs @@ -0,0 +1,25 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.KafkaConsumer.DependencyInjection; + +public static class ServiceRegistration +{ + public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, + string name, Action.Builder> configure) + { + var builder = new MessageSource.Builder(name); + configure(builder); + services.AddHostedService(builder.Build); + + services + .AddBackgroundQueueConsumer>(name, b => b + .SetReaderFactory(_ => builder.Messages) + .MiddlewareStackBuilder.SetHandler(builder.BuildHandlerFactory())) + .Configure>( + (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).Consumer.MaxConcurrency; }); + + return services.AddOptions(name).Configure(options => options.TopicName = name); + } +} diff --git a/src/LocalPost.KafkaConsumer/HealthCheck.cs b/src/LocalPost.KafkaConsumer/HealthCheck.cs deleted file mode 100644 index b88e111..0000000 --- a/src/LocalPost.KafkaConsumer/HealthCheck.cs +++ /dev/null @@ -1,22 +0,0 @@ -using Microsoft.Extensions.Diagnostics.HealthChecks; -using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; - -namespace LocalPost.KafkaConsumer; - -internal class HealthCheck : IHealthCheck -{ - private readonly Consumer.Service _consumer; - - - public Task CheckHealthAsync(HealthCheckContext context, - CancellationToken cancellationToken = default) => Task.FromResult(_currentRates.UpdatedAt switch - { - not null => Healthy("Thresholds loaded", new Dictionary - { - ["LastUpdated"] = _currentRates.UpdatedAt, - ["RulesThreshold"] = _currentRates.Rules, - ["WorkflowThreshold"] = _currentRates.Workflows - }), - _ => Unhealthy("Thresholds not loaded") - }); -} diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index 70d8948..5863ea8 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -40,7 +40,7 @@ - + diff --git a/src/LocalPost.KafkaConsumer/Consumer.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs similarity index 69% rename from src/LocalPost.KafkaConsumer/Consumer.cs rename to src/LocalPost.KafkaConsumer/MessageSource.cs index 27e09b7..641c513 100644 --- a/src/LocalPost.KafkaConsumer/Consumer.cs +++ b/src/LocalPost.KafkaConsumer/MessageSource.cs @@ -1,43 +1,37 @@ using System.Threading.Channels; using Confluent.Kafka; -using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; namespace LocalPost.KafkaConsumer; -public static partial class Consumer +public static partial class MessageSource { - internal sealed class Service : BackgroundService + internal sealed class Service : IBackgroundService, IDisposable { private readonly ILogger _logger; - private readonly ConsumerOptions _options; + private readonly Options _options; private readonly IConsumer _kafka; private readonly ChannelWriter> _queue; - private readonly Handler _errorHandler; - public Service(ILogger logger, ConsumerOptions options, string name, IConsumer kafka, - ChannelWriter> queue, Handler errorHandler) + public Service(ILogger logger, string name, Options options, IConsumer kafka, + ChannelWriter> queue) { _logger = logger; _options = options; _kafka = kafka; _queue = queue; - _errorHandler = errorHandler; Name = name; } public string Name { get; } - public bool Closed { get; private set; } + public Task StartAsync(CancellationToken ct) => Task.Run(() => _kafka.Subscribe(_options.TopicName), ct); - protected override Task ExecuteAsync(CancellationToken stoppingToken) => - Task.Run(() => Run(stoppingToken), stoppingToken); + public Task ExecuteAsync(CancellationToken ct) => Task.Run(() => Run(ct), ct); private async Task Run(CancellationToken stoppingToken = default) { - _kafka.Subscribe(_options.TopicName); - while (!stoppingToken.IsCancellationRequested) { await Task.Yield(); @@ -53,11 +47,6 @@ private async Task Run(CancellationToken stoppingToken = default) // Just complete the method normally... } } - - _logger.LogInformation("Stopping Kafka {Topic} consumer...", _options.TopicName); - Closed = true; - _kafka.Close(); - _queue.Complete(); } private async Task Consume(CancellationToken stoppingToken) @@ -81,13 +70,21 @@ await _queue.WriteAsync(new ConsumeContext _logger.LogError(e, "Kafka {Topic} consumer error, help link: {HelpLink}", _options.TopicName, e.HelpLink); - await _errorHandler(e, stoppingToken); // TODO exit the app if configured... + // Bubble up, so the supervisor can report the error and the whole app can be restarted (Kubernetes) + throw; } } - public override void Dispose() + public Task StopAsync(CancellationToken ct) => Task.Run(() => + { + _logger.LogInformation("Stopping Kafka {Topic} consumer...", _options.TopicName); + + _kafka.Close(); + _queue.Complete(); + }, ct); + + public void Dispose() { - base.Dispose(); _kafka.Dispose(); } } diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs new file mode 100644 index 0000000..12a57f0 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -0,0 +1,13 @@ +using System.ComponentModel.DataAnnotations; +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +public sealed class Options +{ + public ConsumerConfig Kafka { get; set; } = new(); + + [Required] public string TopicName { get; set; } = null!; + + public ConsumerOptions Consumer { get; set; } = new(); +} diff --git a/src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs b/src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs new file mode 100644 index 0000000..28371ad --- /dev/null +++ b/src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs @@ -0,0 +1,26 @@ +using System.Diagnostics.CodeAnalysis; +using Amazon.SimpleNotificationService.Model; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.SnsPublisher.DependencyInjection; + +public static class HealthChecks +{ + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddAmazonSnsBatchPublisherReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + // FIXME Add queue supervisor + .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddAmazonSnsBatchPublisherLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + // FIXME Add queue supervisor + .AddBackgroundQueueConsumerLivenessCheck(name, failureStatus, tags, timeout); + + // TODO Optional checks for the in-memory queues... Like if they are full or not +} diff --git a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs b/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs deleted file mode 100644 index b367fc7..0000000 --- a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceCollectionExtensions.cs +++ /dev/null @@ -1,27 +0,0 @@ -using Amazon.SimpleNotificationService.Model; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.SnsPublisher.DependencyInjection; - -public static class ServiceCollectionExtensions -{ - public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services) - { - services.TryAddSingleton(); - - return services - .AddAmazonSnsBatchPublisher(provider => provider.GetRequiredService().SendAsync); - } - - public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services, - HandlerFactory handlerFactory) - { - services.TryAddSingleton(); - services.TryAddSingleton(provider => provider.GetRequiredService()); - - return services.AddCustomBackgroundQueue(handlerFactory); - } -} diff --git a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs new file mode 100644 index 0000000..d51281c --- /dev/null +++ b/src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs @@ -0,0 +1,34 @@ +using Amazon.SimpleNotificationService.Model; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Options; + +namespace LocalPost.SnsPublisher.DependencyInjection; + +public static class ServiceRegistration +{ + public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services) + { + services.TryAddSingleton(); + + return services.AddAmazonSnsBatchPublisher(builder => + builder.MiddlewareStackBuilder.SetHandler()); + } + + public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services, + Action.ConsumerBuilder> configure) + { + services.TryAddSingleton(); + services.TryAddSingleton(provider => provider.GetRequiredService()); + + services + .AddBackgroundQueueConsumer(configure) + .Configure>( + (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Value.Sender.MaxConcurrency; }); + + return services.AddOptions(); + + + } +} diff --git a/src/LocalPost.SnsPublisher/PublishBatchRequestEntryExtensions.cs b/src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs similarity index 82% rename from src/LocalPost.SnsPublisher/PublishBatchRequestEntryExtensions.cs rename to src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs index 4abb6b6..f592f03 100644 --- a/src/LocalPost.SnsPublisher/PublishBatchRequestEntryExtensions.cs +++ b/src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs @@ -3,7 +3,7 @@ namespace LocalPost.SnsPublisher; -internal static class PublishBatchRequestEntryExtensions +internal static class PublishBatchRequestEntryEx { // Include attributes in the calculation later?.. public static int CalculateSize(this PublishBatchRequestEntry entry) => Encoding.UTF8.GetByteCount(entry.Message); diff --git a/src/LocalPost.SnsPublisher/Publisher.cs b/src/LocalPost.SnsPublisher/Publisher.cs index 6115d8d..527470d 100644 --- a/src/LocalPost.SnsPublisher/Publisher.cs +++ b/src/LocalPost.SnsPublisher/Publisher.cs @@ -1,4 +1,4 @@ -using System.Threading.Channels; +using System.Collections.Immutable; using Amazon.SimpleNotificationService.Model; namespace LocalPost.SnsPublisher; @@ -8,52 +8,47 @@ public interface ISnsPublisher IBackgroundQueue ForTopic(string arn); } -internal sealed class Publisher : ISnsPublisher, IAsyncEnumerable, IDisposable +internal sealed partial class Publisher : ISnsPublisher, IAsyncEnumerable, + IBackgroundQueueManager, IDisposable { - private sealed class TopicPublishingQueue : IBackgroundQueue - { - private readonly Channel _batchEntries; - - public TopicPublishingQueue(string arn) - { - _batchEntries = Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false - }); - Results = _batchEntries.Reader.ReadAllAsync().Batch(() => new SnsBatchBuilder(arn)); - } - - public IAsyncEnumerable Results { get; } - - public ValueTask Enqueue(PublishBatchRequestEntry item, CancellationToken ct = default) - { - if (item.CalculateSize() > PublisherOptions.RequestMaxSize) - throw new ArgumentOutOfRangeException(nameof(item), "Message is too big"); - - return _batchEntries.Writer.WriteAsync(item, ct); - } - } + private ImmutableDictionary _channels = + ImmutableDictionary.Empty; - private readonly Dictionary _channels = new(); private readonly AsyncEnumerableMerger _combinedReader = new(true); - private TopicPublishingQueue Create(string arn) - { - var q = _channels[arn] = new TopicPublishingQueue(arn); - _combinedReader.Add(q.Results); + private readonly PublisherOptions _options; - return q; + public Publisher(PublisherOptions options) + { + _options = options; } + public bool IsClosed { get; private set; } + public IBackgroundQueue ForTopic(string arn) => - _channels.TryGetValue(arn, out var queue) ? queue : Create(arn); + ImmutableInterlocked.GetOrAdd(ref _channels, arn, RegisterQueueFor); - public void Dispose() + private TopicPublishRequests RegisterQueueFor(string arn) { - _combinedReader.Dispose(); + var queue = new TopicPublishRequests(_options.PerTopic, arn); + _combinedReader.Add(queue); + + return queue; } public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) => _combinedReader.GetAsyncEnumerator(ct); + + public async ValueTask CompleteAsync(CancellationToken ct = default) + { + // TODO Do not allow to register new topics, as they won't be processed here... + await Task.WhenAll(_channels.Values.Select(q => q.CompleteAsync(ct).AsTask())); + IsClosed = true; + } + + public void Dispose() + { + _combinedReader.Dispose(); + _channels = ImmutableDictionary.Empty; + } } diff --git a/src/LocalPost.SnsPublisher/PublisherOptions.cs b/src/LocalPost.SnsPublisher/PublisherOptions.cs index dd16e71..d201533 100644 --- a/src/LocalPost.SnsPublisher/PublisherOptions.cs +++ b/src/LocalPost.SnsPublisher/PublisherOptions.cs @@ -6,4 +6,8 @@ public sealed record PublisherOptions public const int RequestMaxSize = 262_144; public const int BatchMaxSize = 10; + + public ConsumerOptions Sender { get; init; } = new(); + + public QueueOptions PerTopic { get; init; } = new(); } diff --git a/src/LocalPost.SnsPublisher/Sender.cs b/src/LocalPost.SnsPublisher/Sender.cs index 0b328be..f27c520 100644 --- a/src/LocalPost.SnsPublisher/Sender.cs +++ b/src/LocalPost.SnsPublisher/Sender.cs @@ -5,7 +5,7 @@ namespace LocalPost.SnsPublisher; -internal sealed class Sender +internal sealed class Sender : IHandler { private static readonly ActivitySource Tracer = new(typeof(Sender).Namespace); @@ -18,7 +18,7 @@ public Sender(ILogger logger, IAmazonSimpleNotificationService sns) _sns = sns; } - public async Task SendAsync(PublishBatchRequest payload, CancellationToken ct) + public async Task InvokeAsync(PublishBatchRequest payload, CancellationToken ct) { using var span = Tracer.StartActivity(); diff --git a/src/LocalPost.SnsPublisher/TopicPublishRequests.cs b/src/LocalPost.SnsPublisher/TopicPublishRequests.cs new file mode 100644 index 0000000..76f51ba --- /dev/null +++ b/src/LocalPost.SnsPublisher/TopicPublishRequests.cs @@ -0,0 +1,35 @@ +using Amazon.SimpleNotificationService.Model; + +namespace LocalPost.SnsPublisher; + +internal sealed partial class Publisher +{ + private sealed class TopicPublishRequests : IBackgroundQueueManager, + IBackgroundQueue, IAsyncEnumerable + { + private readonly string _arn; + private readonly BackgroundQueue _queue; + + public TopicPublishRequests(QueueOptions options, string arn) + { + _arn = arn; + _queue = new BackgroundQueue(options); + } + + public IAsyncEnumerator GetAsyncEnumerator( + CancellationToken cancellationToken = default) => + _queue.Batch(() => new SnsBatchBuilder(_arn)).GetAsyncEnumerator(cancellationToken); + + public ValueTask Enqueue(PublishBatchRequestEntry item, CancellationToken ct = default) + { + if (item.CalculateSize() > PublisherOptions.RequestMaxSize) + throw new ArgumentOutOfRangeException(nameof(item), "Message is too big"); + + return _queue.Enqueue(item, ct); + } + + public bool IsClosed => _queue.IsClosed; + + public ValueTask CompleteAsync(CancellationToken ct = default) => _queue.CompleteAsync(ct); + } +} diff --git a/src/LocalPost.SqsConsumer/ConsumerBuilder.cs b/src/LocalPost.SqsConsumer/Builder.cs similarity index 56% rename from src/LocalPost.SqsConsumer/ConsumerBuilder.cs rename to src/LocalPost.SqsConsumer/Builder.cs index 3a8780c..c8795b3 100644 --- a/src/LocalPost.SqsConsumer/ConsumerBuilder.cs +++ b/src/LocalPost.SqsConsumer/Builder.cs @@ -1,14 +1,18 @@ -using System.Collections.Immutable; -using LocalPost.DependencyInjection; +using System.Threading.Channels; using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; namespace LocalPost.SqsConsumer; -public static partial class Consumer +public static partial class MessageSource { public sealed class Builder { + private Channel _queue = Channel.CreateBounded(new BoundedChannelOptions(config.BufferSize) + { + SingleWriter = true, + SingleReader = true + }); + public Builder(string name) { Name = name; @@ -17,23 +21,20 @@ public Builder(string name) public string Name { get; } - // TODO Use... - public Handler ErrorHandler { get; set; } = (m, ct) => Task.CompletedTask; - public MiddlewareStackBuilder MiddlewareStackBuilder { get; } = new(); internal HandlerFactory BuildHandlerFactory() => MiddlewareStackBuilder.Build().Resolve; - internal IHostedService Build(IServiceProvider provider) + internal BackgroundServiceSupervisor Build(IServiceProvider provider) { var client = ActivatorUtilities.CreateInstance(provider, Name); var consumer = ActivatorUtilities.CreateInstance(provider, Name, client); - var consumerSupervisor = ActivatorUtilities.CreateInstance>(provider, - Name, consumer); + var supervisor = ActivatorUtilities + .CreateInstance>(provider, consumer); - return consumerSupervisor; + return supervisor; } } } diff --git a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs index 298732d..cb4824d 100644 --- a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs +++ b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs @@ -2,9 +2,9 @@ namespace LocalPost.SqsConsumer; -public static partial class Consumer +public static partial class MessageSource { - internal sealed class Middleware : IMiddleware + internal sealed class Middleware : IMiddleware // TODO Rename { private readonly IAmazonSQS _sqs; diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs index 24f4e6c..ad21ed0 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs @@ -11,13 +11,13 @@ public static class HealthChecks public static IHealthChecksBuilder AddAmazonSqsConsumerReadinessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) + .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddAmazonSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + .AddBackgroundServiceLivenessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerLivenessCheck(name, failureStatus, tags, timeout); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs index c604676..f8022da 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs @@ -1,5 +1,3 @@ -using Amazon.SQS.Model; -using LocalPost; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; @@ -9,10 +7,30 @@ namespace LocalPost.SqsConsumer.DependencyInjection; public static class ServiceRegistration { -// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// string name, Handler handler) => -// services.AddAmazonSqsConsumer(name, _ => handler); -// +// public static OptionsBuilder AddAmazonSqsJsonConsumer(this IServiceCollection services, +// string name, Action? configure = null) where THandler : IHandler => +// services.AddAmazonSqsConsumer(name, builder => +// { +// builder.MiddlewareStackBuilder.SetHandler(); +// configure?.Invoke(builder); +// }); + + public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, + string name, Action? configure = null) where THandler : IHandler => + services.AddAmazonSqsConsumer(name, builder => + { + builder.MiddlewareStackBuilder.SetHandler(); + configure?.Invoke(builder); + }); + + public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, + string name, Handler handler, Action? configure = null) => + services.AddAmazonSqsConsumer(name, builder => + { + builder.MiddlewareStackBuilder.SetHandler(handler); + configure?.Invoke(builder); + }); + // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, // string name, Func handler) where TDep1 : notnull => // services.AddAmazonSqsConsumer(name, provider => (context, ct) => @@ -53,27 +71,22 @@ public static class ServiceRegistration // services // .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); - public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Action configure) + public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, + string name, Action configure) { - var builder = new Consumer.Builder(name); + var builder = new MessageSource.Builder(name); configure(builder); services.AddHostedService(builder.Build); - services.TryAddSingleton(); + services.TryAddSingleton(); services .AddBackgroundQueueConsumer(name, b => b - .SetReaderFactory(provider => provider.GetRequiredService(name).Messages) + .SetReaderFactory(provider => provider.GetRequiredService(name).Messages) .MiddlewareStackBuilder.SetHandler(builder.BuildHandlerFactory())) - .Configure>( + .Configure>( (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); - return services.AddOptions(name).Configure(options => options.QueueName = name); + return services.AddOptions(name).Configure(options => options.QueueName = name); } - -// public static IHealthChecksBuilder AddAmazonSqsConsumerHealthCheck(this IHealthChecksBuilder builder) -// { -// // TODO Add a global one... -// } } diff --git a/src/LocalPost.SqsConsumer/Consumer.cs b/src/LocalPost.SqsConsumer/MessageSource.cs similarity index 75% rename from src/LocalPost.SqsConsumer/Consumer.cs rename to src/LocalPost.SqsConsumer/MessageSource.cs index de680ef..72d927c 100644 --- a/src/LocalPost.SqsConsumer/Consumer.cs +++ b/src/LocalPost.SqsConsumer/MessageSource.cs @@ -3,24 +3,18 @@ namespace LocalPost.SqsConsumer; -public static partial class Consumer +public static partial class MessageSource { internal sealed class Service : IBackgroundService { private readonly QueueClient _client; private readonly Channel _queue; - public Service(string name, QueueClient client, IOptionsMonitor options) + public Service(string name, QueueClient client, IOptionsMonitor options) { var config = options.Get(name); _client = client; - _queue = Channel.CreateBounded(new BoundedChannelOptions(config.BufferSize) - { - SingleWriter = true, - SingleReader = true - // TODO AllowSynchronousContinuations?.. - }); Name = name; } @@ -44,7 +38,12 @@ public async Task ExecuteAsync(CancellationToken ct) } } - public Task StopAsync(CancellationToken ct) => Task.CompletedTask; + public Task StopAsync(CancellationToken ct) + { + _queue.Writer.Complete(); + + return Task.CompletedTask; + } private async Task Consume(CancellationToken stoppingToken) { diff --git a/src/LocalPost.SqsConsumer/ConsumerOptions.cs b/src/LocalPost.SqsConsumer/Options.cs similarity index 98% rename from src/LocalPost.SqsConsumer/ConsumerOptions.cs rename to src/LocalPost.SqsConsumer/Options.cs index 71372ac..43020ca 100644 --- a/src/LocalPost.SqsConsumer/ConsumerOptions.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -9,7 +9,7 @@ namespace LocalPost.SqsConsumer; /// /// General SQS consumer settings /// -public sealed record ConsumerOptions +public sealed record Options { internal static readonly List AllAttributes = new() { "All" }; internal static readonly List AllMessageAttributes = new() { "All" }; diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index 3e31fbb..48c8800 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -8,17 +8,25 @@ namespace LocalPost.SqsConsumer; internal sealed class QueueClient { + // TODO Add more details + // See https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 + // See https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 private static readonly ActivitySource Tracer = new(typeof(QueueClient).Namespace); private readonly ILogger _logger; private readonly IAmazonSQS _sqs; - private readonly ConsumerOptions _options; + private readonly Options _options; - public QueueClient(ILogger logger, string name, IAmazonSQS sqs, IOptionsMonitor options) + public QueueClient(ILogger logger, string name, IOptionsMonitor options, IAmazonSQS sqs) : + this(logger, options.Get(name), sqs) + { + } + + public QueueClient(ILogger logger, Options options, IAmazonSQS sqs) { _logger = logger; _sqs = sqs; - _options = options.Get(name); + _options = options; } private GetQueueAttributesResponse? _queueAttributes; @@ -47,7 +55,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) try { // Checking for a possible error in the response would be also good... - _queueAttributes = await _sqs.GetQueueAttributesAsync(QueueUrl, ConsumerOptions.AllAttributes, ct); + _queueAttributes = await _sqs.GetQueueAttributesAsync(QueueUrl, Options.AllAttributes, ct); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { @@ -72,8 +80,8 @@ public async Task> PullMessagesAsync(CancellationTok { using var span = Tracer.StartActivity(); - var attributeNames = ConsumerOptions.AllAttributes; // TODO Configurable - var messageAttributeNames = ConsumerOptions.AllMessageAttributes; // TODO Configurable + var attributeNames = Options.AllAttributes; // TODO Configurable + var messageAttributeNames = Options.AllMessageAttributes; // TODO Configurable try { @@ -100,11 +108,10 @@ public async Task> PullMessagesAsync(CancellationTok { throw; } - catch (Exception e) + catch (Exception) { - // FIXME Error handler + // Just bubble up, so the supervisor can report the error and the whole app can be restarted (Kubernetes) + throw; } - - return Array.Empty(); } } diff --git a/src/LocalPost/AsyncEnumerable.cs b/src/LocalPost/AsyncEnumerableEx.cs similarity index 100% rename from src/LocalPost/AsyncEnumerable.cs rename to src/LocalPost/AsyncEnumerableEx.cs diff --git a/src/LocalPost/BackgroundJobQueue.cs b/src/LocalPost/BackgroundJobQueue.cs index 0745202..8df07cf 100644 --- a/src/LocalPost/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundJobQueue.cs @@ -1,23 +1,26 @@ -using System.Threading.Channels; - namespace LocalPost; public delegate Task Job(CancellationToken ct); +/// +/// Just a convenient alias for . +/// public interface IBackgroundJobQueue : IBackgroundQueue { } -internal sealed class BackgroundJobQueue : IBackgroundJobQueue, IAsyncEnumerable +internal sealed class BackgroundJobQueue : IBackgroundJobQueue, IBackgroundQueueManager { - private readonly Channel _messages = Channel.CreateUnbounded(new UnboundedChannelOptions + private readonly BackgroundQueue _queue; + + public BackgroundJobQueue(BackgroundQueue queue) { - SingleReader = true, - SingleWriter = false, - }); + _queue = queue; + } + + public bool IsClosed => _queue.IsClosed; - public ValueTask Enqueue(Job item, CancellationToken ct = default) => _messages.Writer.WriteAsync(item, ct); + public ValueTask Enqueue(Job item, CancellationToken ct = default) => _queue.Enqueue(item, ct); - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken cancellationToken = default) => - _messages.Reader.ReadAllAsync(cancellationToken).GetAsyncEnumerator(cancellationToken); + public ValueTask CompleteAsync(CancellationToken ct = default) => _queue.CompleteAsync(ct); } diff --git a/src/LocalPost/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue.cs index cd708a3..ac42e48 100644 --- a/src/LocalPost/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue.cs @@ -1,15 +1,23 @@ using System.Threading.Channels; +using Microsoft.Extensions.Options; namespace LocalPost; public interface IBackgroundQueue { + // TODO Custom exception when closed?.. Or just return true/false?.. ValueTask Enqueue(T item, CancellationToken ct = default); } -public interface IBackgroundQueueReader +// TODO Open to public later +internal interface IBackgroundQueueManager { - public ChannelReader Reader { get; } + // Implement later for a better health check +// bool IsFull { get; } + + bool IsClosed { get; } + + ValueTask CompleteAsync(CancellationToken ct = default); } public interface IHandler @@ -33,18 +41,60 @@ public interface IMiddleware -// Simplest background queue -public sealed partial class BackgroundQueue : IBackgroundQueue, IAsyncEnumerable +public sealed partial class BackgroundQueue : IBackgroundQueue, IBackgroundQueueManager, IAsyncEnumerable { - // TODO Bounded version (1000 by default), overflow should be dropped with a log message - private readonly Channel _messages = Channel.CreateUnbounded(new UnboundedChannelOptions + private readonly TimeSpan _completionTimeout; + + // For the DI container + public BackgroundQueue(IOptions> options) : this(options.Value.Queue) + { + } + + public BackgroundQueue(QueueOptions options) : this( + options.MaxSize switch + { + not null => Channel.CreateBounded(new BoundedChannelOptions(options.MaxSize.Value) + { + SingleReader = true, + SingleWriter = false, + }), + _ => Channel.CreateUnbounded(new UnboundedChannelOptions + { + SingleReader = true, + SingleWriter = false, + }) + }, + TimeSpan.FromMilliseconds(options.CompletionTimeout ?? 0)) + { + } + + public BackgroundQueue(Channel messages, TimeSpan completionTimeout) + { + _completionTimeout = completionTimeout; + Messages = messages; + } + + protected Channel Messages { get; } + + public bool IsClosed { get; private set; } + + public ValueTask Enqueue(T item, CancellationToken ct = default) => Messages.Writer.WriteAsync(item, ct); + + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) + { + while (await Messages.Reader.WaitToReadAsync(ct)) + while (Messages.Reader.TryRead(out var item)) + yield return item; + } + + public async ValueTask CompleteAsync(CancellationToken ct = default) { - SingleReader = false, - SingleWriter = false, - }); + if (IsClosed) + return; - public ValueTask Enqueue(T item, CancellationToken ct = default) => _messages.Writer.WriteAsync(item, ct); + await Task.Delay(_completionTimeout, ct); - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) => - _messages.Reader.ReadAllAsync(ct).GetAsyncEnumerator(ct); + Messages.Writer.Complete(); // TODO Handle exceptions + IsClosed = true; + } } diff --git a/src/LocalPost/BackgroundQueueBuilder.cs b/src/LocalPost/BackgroundQueueBuilder.cs index 4698580..3ee1886 100644 --- a/src/LocalPost/BackgroundQueueBuilder.cs +++ b/src/LocalPost/BackgroundQueueBuilder.cs @@ -1,15 +1,14 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; namespace LocalPost; public sealed partial class BackgroundQueue { - public sealed class Builder + public sealed class ConsumerBuilder { private Func>? _readerFactory; - public Builder(string name) + public ConsumerBuilder(string name) { Name = name; } @@ -18,7 +17,7 @@ public Builder(string name) public MiddlewareStackBuilder MiddlewareStackBuilder { get; } = new(); - public Builder SetReaderFactory(Func> factory) + public ConsumerBuilder SetReaderFactory(Func> factory) { _readerFactory = factory; @@ -28,18 +27,18 @@ public Builder SetReaderFactory(Func> fact private HandlerFactory BuildHandlerFactory() => MiddlewareStackBuilder.Build().Resolve; - internal IHostedService Build(IServiceProvider provider) + internal BackgroundServiceSupervisor Build(IServiceProvider provider) { // TODO Custom exception var readerFactory = _readerFactory ?? throw new Exception($"Reader factory is required"); var executor = ActivatorUtilities.CreateInstance(provider, Name); var consumer = ActivatorUtilities.CreateInstance>(provider, Name, - readerFactory(provider), executor, BuildHandlerFactory()); - var consumerSupervisor = ActivatorUtilities.CreateInstance>>(provider, Name, - consumer); + executor, readerFactory(provider), BuildHandlerFactory()); + var supervisor = ActivatorUtilities + .CreateInstance>>(provider, consumer); - return consumerSupervisor; + return supervisor; } } } diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 8054328..66a8e7f 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -11,11 +11,13 @@ internal sealed class BackgroundQueueConsumer : IBackgroundService private readonly IAsyncEnumerable _reader; private readonly IExecutor _executor; - private readonly Func> _handlerFactory; + private readonly HandlerFactory _handlerFactory; - public BackgroundQueueConsumer(string name, - ILogger> logger, IServiceScopeFactory scopeFactory, - IExecutor executor, IAsyncEnumerable reader, Func> handlerFactory) + public BackgroundQueueConsumer(ILogger> logger, string name, + IServiceScopeFactory scopeFactory, + IExecutor executor, + IAsyncEnumerable reader, + HandlerFactory handlerFactory) { Name = name; _logger = logger; @@ -38,24 +40,24 @@ public async Task ExecuteAsync(CancellationToken ct) } catch (ChannelClosedException e) { - // TODO isRunning above... _logger.LogWarning(e, "{Name} queue has been closed, stop listening", Name); - // The rest of the queue will be processed in StopAsync() below + // All currently running tasks will be processed in StopAsync() below } } public async Task StopAsync(CancellationToken forceExitToken) { + // Good to have later: an option to NOT process the rest of the messages try { // TODO An option to NOT process the rest of the messages... await foreach (var message in _reader.WithCancellation(forceExitToken)) await _executor.StartAsync(() => Process(message, forceExitToken), forceExitToken); } - catch (ChannelClosedException e) + catch (ChannelClosedException) { - // TODO Do something? + // OK, just wait for the rest of the tasks to finish } // Wait until all currently running tasks are finished diff --git a/src/LocalPost/BackgroundQueueSupervisor.cs b/src/LocalPost/BackgroundQueueSupervisor.cs new file mode 100644 index 0000000..324b2e8 --- /dev/null +++ b/src/LocalPost/BackgroundQueueSupervisor.cs @@ -0,0 +1,30 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.Hosting; + +namespace LocalPost; + +public sealed partial class BackgroundQueue +{ + // TODO Use + internal sealed class Supervisor : IHostedService, INamedService + { + // TODO Health checks + + public Supervisor(IBackgroundQueueManager queue, string name) + { + Queue = queue; + Name = name; + } + + internal IBackgroundQueueManager Queue { get; } + + public string Name { get; } + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + + public async Task StopAsync(CancellationToken forceExitToken) + { + await Queue.CompleteAsync(forceExitToken); + } + } +} diff --git a/src/LocalPost/BackgroundService.cs b/src/LocalPost/BackgroundService.cs new file mode 100644 index 0000000..8b7d9cb --- /dev/null +++ b/src/LocalPost/BackgroundService.cs @@ -0,0 +1,12 @@ +using LocalPost.DependencyInjection; + +namespace LocalPost; + +internal interface IBackgroundService : INamedService +{ + Task StartAsync(CancellationToken ct); + + Task ExecuteAsync(CancellationToken ct); + + Task StopAsync(CancellationToken ct); +} diff --git a/src/LocalPost/BackgroundServiceSupervisor.cs b/src/LocalPost/BackgroundServiceSupervisor.cs index 5a6d27a..3ab0887 100644 --- a/src/LocalPost/BackgroundServiceSupervisor.cs +++ b/src/LocalPost/BackgroundServiceSupervisor.cs @@ -7,23 +7,13 @@ namespace LocalPost; -internal interface IBackgroundService : INamedService -{ - Task StartAsync(CancellationToken ct); - - Task ExecuteAsync(CancellationToken ct); - - Task StopAsync(CancellationToken ct); -} - -internal sealed class BackgroundServiceSupervisor : IHostedService, INamedService, IDisposable - where T : class, IBackgroundService +internal abstract class BackgroundServiceSupervisor : IHostedService, INamedService, IDisposable { public sealed class LivenessCheck : IHealthCheck { - private readonly BackgroundServiceSupervisor _supervisor; + private readonly BackgroundServiceSupervisor _supervisor; - public LivenessCheck(BackgroundServiceSupervisor supervisor) + public LivenessCheck(BackgroundServiceSupervisor supervisor) { _supervisor = supervisor; } @@ -46,9 +36,9 @@ private HealthCheckResult CheckHealth(HealthCheckContext _) public sealed class ReadinessCheck : IHealthCheck { - private readonly BackgroundServiceSupervisor _supervisor; + private readonly BackgroundServiceSupervisor _supervisor; - public ReadinessCheck(BackgroundServiceSupervisor supervisor) + public ReadinessCheck(BackgroundServiceSupervisor supervisor) { _supervisor = supervisor; } @@ -68,18 +58,18 @@ private HealthCheckResult CheckHealth(HealthCheckContext context) } } - private readonly ILogger> _logger; + private readonly ILogger _logger; private CancellationTokenSource? _executionCts; private Task? _execution; - public BackgroundServiceSupervisor(ILogger> logger, T service) + public BackgroundServiceSupervisor(ILogger logger, IBackgroundService service) { _logger = logger; Service = service; } - public T Service { get; } + public IBackgroundService Service { get; } public string Name => Service.Name; @@ -101,7 +91,7 @@ public async Task StartAsync(CancellationToken ct) try { - await Service.StartAsync(ct).ConfigureAwait(false); + await Service.StartAsync(ct); // Store the task we're executing _execution = ExecuteAsync(_executionCts.Token); @@ -126,6 +116,7 @@ private async Task ExecuteAsync(CancellationToken stoppingToken) try { await Service.ExecuteAsync(stoppingToken); + _logger.LogInformation("{Name} background queue is completed", Name); } catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) { @@ -150,16 +141,28 @@ public async Task StopAsync(CancellationToken forceExitToken) { if (_execution is not null) // Wait until the execution completes or the app is forced to exit - await Task.WhenAny(_execution, Task.Delay(Timeout.Infinite, forceExitToken)).ConfigureAwait(false); + await Task.WhenAny(_execution, Task.Delay(Timeout.Infinite, forceExitToken)); } - await Service.StopAsync(forceExitToken).ConfigureAwait(false); + await Service.StopAsync(forceExitToken); } public void Dispose() { _executionCts?.Cancel(); + // ReSharper disable once SuspiciousTypeConversion.Global if (Service is IDisposable disposableService) disposableService.Dispose(); } } + +internal sealed class BackgroundServiceSupervisor : BackgroundServiceSupervisor + where T : class, IBackgroundService +{ + public BackgroundServiceSupervisor(ILogger> logger, T service) : base(logger, service) + { + Service = service; + } + + public new T Service { get; } +} diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index 580134c..6a7ba0e 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -1,4 +1,4 @@ -using LocalPost.DependencyInjection; +using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; @@ -6,12 +6,29 @@ namespace LocalPost.DependencyInjection; public static class HealthChecks { - public static IHealthChecksBuilder AddBackgroundQueueConsumerReadinessCheck(this IHealthChecksBuilder builder, + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddBackgroundQueueReadinessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, + TimeSpan? timeout = default) => builder + .AddBackgroundServiceLivenessCheck(name, failureStatus, tags, timeout) + .AddBackgroundQueueConsumerLivenessCheck(name, failureStatus, tags, timeout); +} + +public static class HealthCheckBuilderEx +{ + internal static IHealthChecksBuilder AddBackgroundQueueConsumerReadinessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, TimeSpan? timeout = default) => builder.AddBackgroundServiceReadinessCheck>(name, failureStatus, tags, timeout); - public static IHealthChecksBuilder AddBackgroundQueueConsumerLivenessCheck(this IHealthChecksBuilder builder, + internal static IHealthChecksBuilder AddBackgroundQueueConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, TimeSpan? timeout = default) => builder.AddBackgroundServiceLivenessCheck>(name, failureStatus, tags, timeout); @@ -21,7 +38,7 @@ internal static IHealthChecksBuilder AddBackgroundServiceReadinessCheck(this TimeSpan? timeout = default) where T : class, IBackgroundService => builder.Add(new HealthCheckRegistration( name, - provider => ActivatorUtilities.CreateInstance.ReadinessCheck>(provider, + provider => ActivatorUtilities.CreateInstance(provider, provider.GetSupervisor(name)), failureStatus, tags, @@ -32,7 +49,7 @@ internal static IHealthChecksBuilder AddBackgroundServiceLivenessCheck(this I TimeSpan? timeout = default) where T : class, IBackgroundService => builder.Add(new HealthCheckRegistration( name, - provider => ActivatorUtilities.CreateInstance.LivenessCheck>(provider, + provider => ActivatorUtilities.CreateInstance(provider, provider.GetSupervisor(name)), failureStatus, tags, diff --git a/src/LocalPost/DependencyInjection/JobQueueRegistration.cs b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs index 48f79fe..b963e29 100644 --- a/src/LocalPost/DependencyInjection/JobQueueRegistration.cs +++ b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs @@ -6,12 +6,12 @@ namespace LocalPost.DependencyInjection; public static class JobQueueRegistration { - public static OptionsBuilder AddBackgroundJobQueue(this IServiceCollection services) + public static OptionsBuilder> AddBackgroundJobQueue(this IServiceCollection services) { services.TryAddSingleton(); services.TryAddSingleton(provider => provider.GetRequiredService()); - return services.AddBackgroundQueueConsumer(builder => + return services.AddBackgroundQueue(builder => builder.MiddlewareStackBuilder.SetHandler((job, ct) => job(ct))); } } diff --git a/src/LocalPost/DependencyInjection/QueueRegistration.cs b/src/LocalPost/DependencyInjection/QueueRegistration.cs index 758e75b..6ca2d0e 100644 --- a/src/LocalPost/DependencyInjection/QueueRegistration.cs +++ b/src/LocalPost/DependencyInjection/QueueRegistration.cs @@ -7,46 +7,56 @@ namespace LocalPost.DependencyInjection; public static class QueueRegistration { // THandler has to be registered by the user - public static OptionsBuilder AddBackgroundQueue(this IServiceCollection services, - Action.Builder>? configure = null) where THandler : IHandler => - services.AddBackgroundQueue(builder => builder.MiddlewareStackBuilder.SetHandler()); - - public static OptionsBuilder AddBackgroundQueue(this IServiceCollection services, - Action.Builder> configure) + public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, + Action.ConsumerBuilder>? configure = null) where THandler : IHandler => + services.AddBackgroundQueue(builder => + { + builder.MiddlewareStackBuilder.SetHandler(); + configure?.Invoke(builder); + }); + + public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, + Handler handler, Action.ConsumerBuilder>? configure = null) => + services.AddBackgroundQueue(builder => + { + builder.MiddlewareStackBuilder.SetHandler(handler); + configure?.Invoke(builder); + }); + + public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, + Action.ConsumerBuilder> configure) { services.TryAddSingleton>(); services.TryAddSingleton>(provider => provider.GetRequiredService>()); - return services.AddBackgroundQueueConsumer>(configure); - } - + services + .AddBackgroundQueueConsumer>(configure) + .Configure>>( + (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Value.Consumer.MaxConcurrency; }); - -// // TReader & THandler have to be registered by the user -// public static OptionsBuilder AddBackgroundQueueConsumer( -// this IServiceCollection services) where TReader : IAsyncEnumerable where THandler : IHandler => -// services.AddCustomBackgroundQueue(provider => provider.GetRequiredService().InvokeAsync); + return services.AddOptions>(); + } // TReader has to be registered by the user - public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, - Action.Builder> configure) where TReader : IAsyncEnumerable => + public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, + Action.ConsumerBuilder> configure) where TReader : class, IAsyncEnumerable => services.AddBackgroundQueueConsumer(Reflection.FriendlyNameOf(), builder => configure( builder.SetReaderFactory(provider => provider.GetRequiredService()))); - public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, - Action.Builder> configure) => + public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, + Action.ConsumerBuilder> configure) => services.AddBackgroundQueueConsumer(Reflection.FriendlyNameOf(), configure); - public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, - string name, Action.Builder> configure) + public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, + string name, Action.ConsumerBuilder> configure) { - var builder = new BackgroundQueue.Builder(name); + var builder = new BackgroundQueue.ConsumerBuilder(name); configure(builder); // TODO Try...() version of this one, to be gentle with multiple registrations of the same queue // (extend ServiceDescriptor, add name to it and search using it) services.AddHostedService(builder.Build); - return services.AddOptions(name); + return services.AddOptions(name); } } diff --git a/src/LocalPost/Executor.cs b/src/LocalPost/Executor.cs index a8b3f2e..b5752fd 100644 --- a/src/LocalPost/Executor.cs +++ b/src/LocalPost/Executor.cs @@ -18,8 +18,7 @@ internal sealed class BoundedExecutor : IExecutor { private readonly ConcurrentTasksList _tasks; - [ExcludeFromCodeCoverage] - public BoundedExecutor(string name, IOptionsMonitor options) : this(options.Get(name).MaxConcurrency) + public BoundedExecutor(string name, IOptionsMonitor options) : this(options.Get(name).MaxConcurrency) { } diff --git a/src/LocalPost/OptionsEx.cs b/src/LocalPost/OptionsEx.cs new file mode 100644 index 0000000..fc0977f --- /dev/null +++ b/src/LocalPost/OptionsEx.cs @@ -0,0 +1,9 @@ +using Microsoft.Extensions.Options; + +namespace LocalPost; + +public static class OptionsEx +{ + public static TOptions Get(this IOptionsMonitor optionsMonitor) => + optionsMonitor.Get(Reflection.FriendlyNameOf()); +} diff --git a/src/LocalPost/QueueOptions.cs b/src/LocalPost/QueueOptions.cs index 23a2cb3..4098f2c 100644 --- a/src/LocalPost/QueueOptions.cs +++ b/src/LocalPost/QueueOptions.cs @@ -5,10 +5,31 @@ namespace LocalPost; /// /// Background queue configuration. /// -public sealed record QueueOptions +public sealed record BackgroundQueueOptions +{ + public QueueOptions Queue { get; set; } = new(); + + public ConsumerOptions Consumer { get; set; } = new(); +} + +/// +/// Consumer configuration. +/// +public sealed record ConsumerOptions { /// /// How many messages to process in parallel. /// [Required] public ushort MaxConcurrency { get; set; } = ushort.MaxValue; } + +/// +/// Queue configuration. +/// +public sealed record QueueOptions +{ + // TODO Drop strategy + public ushort? MaxSize { get; set; } = ushort.MaxValue; + + public ushort? CompletionTimeout { get; set; } = 1_000; // Milliseconds +} diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index adf1316..cf24d63 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -13,6 +13,8 @@ + + From d192f52b532f4c1195221159a4fc71a20a56a732 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Wed, 24 May 2023 07:04:50 +0000 Subject: [PATCH 03/33] WORKING --- README.md | 22 ++- samples/AmazonSqsApp/AmazonSqsApp.csproj | 1 - .../Controllers/WeatherForecastController.cs | 11 +- samples/AmazonSqsApp/Program.cs | 31 ++-- .../KafkaConsumerApp/KafkaConsumerApp.csproj | 10 +- src/LocalPost.KafkaConsumer/Builder.cs | 69 --------- src/LocalPost.KafkaConsumer/ConsumeContext.cs | 4 +- .../DependencyInjection/HealthChecks.cs | 15 +- .../ServiceRegistration.cs | 32 +++-- .../IMessageHandler.cs | 6 +- .../KafkaConsumerService.cs | 101 +++++++++++++ src/LocalPost.KafkaConsumer/MessageSource.cs | 133 ++++++++++-------- src/LocalPost.KafkaConsumer/Options.cs | 5 +- src/LocalPost.SqsConsumer/Builder.cs | 40 ------ .../ConsumerMiddleware.cs | 33 ++--- .../DependencyInjection/HealthChecks.cs | 14 +- .../ServiceRegistration.cs | 43 +++--- src/LocalPost.SqsConsumer/MessageSource.cs | 71 ++++++---- src/LocalPost.SqsConsumer/Options.cs | 23 +-- src/LocalPost.SqsConsumer/QueueClient.cs | 5 - .../SqsConsumerService.cs | 92 ++++++++++++ .../AsyncEnumerableEx.cs | 2 +- .../AsyncEnumerableMerger.cs | 2 +- .../{ => AsyncEnumerable}/BatchBuilder.cs | 5 +- .../BatchingAsyncEnumerable.cs | 2 +- .../{ => AsyncEnumerable}/ConcurrentSet.cs | 2 +- src/LocalPost/BackgroundJobQueue.cs | 6 +- src/LocalPost/BackgroundQueue.cs | 32 ++--- src/LocalPost/BackgroundQueueBuilder.cs | 44 ------ src/LocalPost/BackgroundQueueConsumer.cs | 130 +++++++++-------- src/LocalPost/BackgroundQueueOptions.cs | 22 +++ src/LocalPost/BackgroundQueueService.cs | 112 +++++++++++++++ src/LocalPost/BackgroundQueueSupervisor.cs | 24 ++-- src/LocalPost/BackgroundService.cs | 4 +- src/LocalPost/BackgroundServiceSupervisor.cs | 133 +++++++++++++----- ...ReaderExtensions.cs => ChannelReaderEx.cs} | 2 +- src/LocalPost/ConcurrentTasksList.cs | 42 ------ .../DependencyInjection/Configuration.cs | 14 -- .../DependencyInjection/HealthChecks.cs | 50 +------ .../JobQueueRegistration.cs | 2 +- .../DependencyInjection/QueueRegistration.cs | 51 +++---- .../ServiceProviderLookups.cs | 7 +- src/LocalPost/Executor.cs | 56 -------- src/LocalPost/MiddlewareStackBuilder.cs | 2 +- src/LocalPost/QueueOptions.cs | 35 +---- src/LocalPost/ScopedHandler.cs | 48 +++++++ .../AsyncEnumTests.cs | 10 ++ .../AsyncEnumerableMergerTests.cs | 3 +- .../BatchingAsyncEnumerableTests.cs | 1 + 49 files changed, 873 insertions(+), 731 deletions(-) delete mode 100644 src/LocalPost.KafkaConsumer/Builder.cs create mode 100644 src/LocalPost.KafkaConsumer/KafkaConsumerService.cs delete mode 100644 src/LocalPost.SqsConsumer/Builder.cs create mode 100644 src/LocalPost.SqsConsumer/SqsConsumerService.cs rename src/LocalPost/{ => AsyncEnumerable}/AsyncEnumerableEx.cs (91%) rename src/LocalPost/{ => AsyncEnumerable}/AsyncEnumerableMerger.cs (98%) rename src/LocalPost/{ => AsyncEnumerable}/BatchBuilder.cs (88%) rename src/LocalPost/{ => AsyncEnumerable}/BatchingAsyncEnumerable.cs (98%) rename src/LocalPost/{ => AsyncEnumerable}/ConcurrentSet.cs (98%) delete mode 100644 src/LocalPost/BackgroundQueueBuilder.cs create mode 100644 src/LocalPost/BackgroundQueueOptions.cs create mode 100644 src/LocalPost/BackgroundQueueService.cs rename src/LocalPost/{ChannelReaderExtensions.cs => ChannelReaderEx.cs} (92%) delete mode 100644 src/LocalPost/ConcurrentTasksList.cs delete mode 100644 src/LocalPost/DependencyInjection/Configuration.cs delete mode 100644 src/LocalPost/Executor.cs create mode 100644 src/LocalPost/ScopedHandler.cs create mode 100644 tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs rename tests/LocalPost.Tests/{ => AsyncEnumerable}/AsyncEnumerableMergerTests.cs (98%) rename tests/LocalPost.Tests/{ => AsyncEnumerable}/BatchingAsyncEnumerableTests.cs (97%) diff --git a/README.md b/README.md index b4997a3..7d1c192 100644 --- a/README.md +++ b/README.md @@ -2,10 +2,10 @@ Simple .NET in-memory background queue ([System.Threading.Channels](https://learn.microsoft.com/de-de/dotnet/api/system.threading.channels?view=net-6.0) based). -## Alternatives +## Background tasks + +There are multiple ways to run background tasks in .NET. The most common are: -- [Coravel queue](https://docs.coravel.net/Queuing/)/event broadcasting — only invocable queueing, event broadcasting is different from consuming a queue -- [Hangfire](https://www.hangfire.io/) — for persistent queues (means payload serialisation), LocalPost is completely about in-memory ones ## Amazon SQS Consumer @@ -16,3 +16,19 @@ To operate on a queue below [permissions](https://docs.aws.amazon.com/AWSSimpleQ - sqs:GetQueueAttributes - sqs:ReceiveMessage - sqs:ChangeMessageVisibility + +## Usage + +### Installation + +### .NET 8 asynchronous background services handling + +Before version 8 .NET runtime handled start/stop of the services only synchronously, but now it is possible to enable concurrent handling of the services. This is done by setting `HostOptions` property `ConcurrentServiceExecution` to `true`: + +See https://github.com/dotnet/runtime/blob/v8.0.0/src/libraries/Microsoft.Extensions.Hosting/src/Internal/Host.cs +See https://github.com/dotnet/runtime/blob/main/src/libraries/Microsoft.Extensions.Hosting/src/HostOptions.cs + +## Similar projects + +- [Coravel queue](https://docs.coravel.net/Queuing/)/event broadcasting — only invocable queueing, event broadcasting is different from consuming a queue +- [Hangfire](https://www.hangfire.io/) — for persistent queues (means payload serialisation), LocalPost is completely about in-memory ones diff --git a/samples/AmazonSqsApp/AmazonSqsApp.csproj b/samples/AmazonSqsApp/AmazonSqsApp.csproj index 06fa56d..d090cfa 100644 --- a/samples/AmazonSqsApp/AmazonSqsApp.csproj +++ b/samples/AmazonSqsApp/AmazonSqsApp.csproj @@ -13,7 +13,6 @@ - diff --git a/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs b/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs index 562c19f..928117d 100644 --- a/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs +++ b/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs @@ -1,6 +1,4 @@ -using Amazon.SimpleNotificationService.Model; using LocalPost; -using LocalPost.SnsPublisher; using Microsoft.AspNetCore.Mvc; namespace AmazonSqsApp.Controllers; @@ -15,12 +13,10 @@ public class WeatherForecastController : ControllerBase }; private readonly IBackgroundQueue _queue; - private readonly ISnsPublisher _sns; - public WeatherForecastController(IBackgroundQueue queue, ISnsPublisher sns) + public WeatherForecastController(IBackgroundQueue queue) { _queue = queue; - _sns = sns; } [HttpGet(Name = "GetWeatherForecast")] @@ -35,11 +31,6 @@ public async ValueTask> Get() await _queue.Enqueue(forecasts[0]); - await _sns.ForTopic("arn:aws:sns:eu-central-1:703886664977:test").Enqueue(new PublishBatchRequestEntry - { - Message = forecasts[0].Summary - }); - return forecasts; } } diff --git a/samples/AmazonSqsApp/Program.cs b/samples/AmazonSqsApp/Program.cs index 26c2c48..bfd7da2 100644 --- a/samples/AmazonSqsApp/Program.cs +++ b/samples/AmazonSqsApp/Program.cs @@ -1,13 +1,12 @@ -using Amazon.SimpleNotificationService; using Amazon.SQS; -using LocalPost.SnsPublisher.DependencyInjection; -using LocalPost.DependencyInjection; using AmazonSqsApp; +using LocalPost; +using LocalPost.DependencyInjection; +using LocalPost.SqsConsumer; using LocalPost.SqsConsumer.DependencyInjection; var builder = WebApplication.CreateBuilder(args); - // A background queue with an inline handler builder.Services.AddBackgroundQueue( // TODO Automatically add the health checks?.. @@ -18,19 +17,10 @@ }); -// An async Amazon SNS sender, buffers messages and sends them in batches in the background -builder.Services.AddAWSService(); -builder.Services.AddAmazonSnsBatchPublisher(); - - // An Amazon SQS consumer builder.Services.AddAWSService(); -builder.Services.AddAmazonSqsConsumer("test", - async (context, ct) => - { - await Task.Delay(1_000, ct); - Console.WriteLine(context.Message.Body); - }); +builder.Services.AddScoped(); +builder.Services.AddAmazonSqsConsumer("test"); builder.Services.AddControllers(); @@ -49,3 +39,14 @@ app.UseAuthorization(); app.MapControllers(); app.Run(); + + + +class SqsHandler : IHandler +{ + public async Task InvokeAsync(ConsumeContext payload, CancellationToken ct) + { + await Task.Delay(1_000, ct); + Console.WriteLine(payload.Message.Body); + } +} diff --git a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj index 3b880d8..e2ff44c 100644 --- a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj +++ b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj @@ -1,13 +1,19 @@ - net6.0 - enable + net7 enable + + + + + + + diff --git a/src/LocalPost.KafkaConsumer/Builder.cs b/src/LocalPost.KafkaConsumer/Builder.cs deleted file mode 100644 index 4825c10..0000000 --- a/src/LocalPost.KafkaConsumer/Builder.cs +++ /dev/null @@ -1,69 +0,0 @@ -using System.Threading.Channels; -using Confluent.Kafka; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.KafkaConsumer; - -public static partial class MessageSource -{ - public sealed class Builder - { - private Action> _configure = (cb) => { }; - - private readonly Channel> _queue = - // Kafka client (librdkafka) is optimised to prefetch messages, so there is no need to maintain our own - // buffer - Channel.CreateBounded>(new BoundedChannelOptions(1) - { - SingleWriter = true, - SingleReader = true - }); - - public Builder(string name) - { - Name = name; - } - - public string Name { get; } - - internal IAsyncEnumerable> Messages => _queue.Reader.ReadAllAsync(); - - public MiddlewareStackBuilder> MiddlewareStackBuilder { get; } = new(); - - // TODO Implement -// public Builder SetMessageHandler(Handler> handler) -// { -// _handler = (c, ct) => handler(c.Result.Message, ct); -// -// return this; -// } - - // Allows to configure message format (Avro/JSON/Protobuf/etc.) and other things - public Builder ConfigureKafkaClient(Action> configure) - { - _configure = configure; - - return this; - } - - internal HandlerFactory> BuildHandlerFactory() => - MiddlewareStackBuilder.Build().Resolve; - - internal BackgroundServiceSupervisor Build(IServiceProvider provider) - { - var clientConfig = provider.GetRequiredService>().Get(Name); - - var clientBuilder = new ConsumerBuilder(clientConfig.Kafka); - _configure(clientBuilder); - - var kafkaClient = clientBuilder.Build(); - var consumer = ActivatorUtilities.CreateInstance(provider, Name, clientConfig, kafkaClient); - - var supervisor = ActivatorUtilities - .CreateInstance>(provider, consumer); - - return supervisor; - } - } -} diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 0b70d8e..10356f4 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -4,8 +4,8 @@ namespace LocalPost.KafkaConsumer; public readonly record struct ConsumeContext { - // To commit the offset manually, for example. Do we really need to support these complex use cases?.. - public required IConsumer Client { get; init; } + // To commit the offset manually, we need something. But it's a complex case... For the future. +// public required IConsumer Client { get; init; } public required ConsumeResult Result { get; init; } } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs index e821eef..9c7e88e 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs @@ -1,5 +1,4 @@ using System.Diagnostics.CodeAnalysis; -using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; @@ -9,15 +8,13 @@ public static class HealthChecks { [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddKafkaConsumerReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck.Service>(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerReadinessCheck>(name, failureStatus, tags, timeout); + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(KafkaConsumerService.QueueReadinessCheck(name, failureStatus, tags)) + .Add(KafkaConsumerService.ConsumerGroupReadinessCheck(name, failureStatus, tags)); [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceLivenessCheck.Service>(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerLivenessCheck>(name, failureStatus, tags, timeout); + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(KafkaConsumerService.QueueLivenessCheck(name, failureStatus, tags)) + .Add(KafkaConsumerService.ConsumerGroupLivenessCheck(name, failureStatus, tags)); } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs index 4e5fa6c..11c3473 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs @@ -1,24 +1,34 @@ +using Confluent.Kafka; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Options; namespace LocalPost.KafkaConsumer.DependencyInjection; public static class ServiceRegistration { - public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, - string name, Action.Builder> configure) + public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, string name, + Action>> configure, + Action> configureClient) => + services.AddKafkaConsumer(name, configure, configureClient); + + public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, string name, + Action>> configure, + Action> configureClient) { - var builder = new MessageSource.Builder(name); - configure(builder); - services.AddHostedService(builder.Build); + var handleStackBuilder = new MiddlewareStackBuilder>(); + configure(handleStackBuilder); + var handlerStack = handleStackBuilder.Build(); + + services.TryAddSingleton(provider => KafkaConsumerService.Create(provider, + name, handlerStack, configureClient)); + + services.AddSingleton(provider => + provider.GetRequiredService>(name).Supervisor); - services - .AddBackgroundQueueConsumer>(name, b => b - .SetReaderFactory(_ => builder.Messages) - .MiddlewareStackBuilder.SetHandler(builder.BuildHandlerFactory())) - .Configure>( - (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).Consumer.MaxConcurrency; }); + // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... return services.AddOptions(name).Configure(options => options.TopicName = name); } diff --git a/src/LocalPost.KafkaConsumer/IMessageHandler.cs b/src/LocalPost.KafkaConsumer/IMessageHandler.cs index 6cadbf6..746a6d0 100644 --- a/src/LocalPost.KafkaConsumer/IMessageHandler.cs +++ b/src/LocalPost.KafkaConsumer/IMessageHandler.cs @@ -2,6 +2,10 @@ namespace LocalPost.KafkaConsumer; -public interface IMessageHandler : LocalPost.IHandler> +public interface IMessageHandler : IHandler> +{ +} + +public interface IMessageHandler : IMessageHandler { } diff --git a/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs b/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs new file mode 100644 index 0000000..75681e2 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs @@ -0,0 +1,101 @@ +using System.Collections.Immutable; +using Confluent.Kafka; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; + +namespace LocalPost.KafkaConsumer; + +internal sealed class KafkaConsumerService : INamedService +{ + public static KafkaConsumerService Create(IServiceProvider provider, string name, + MiddlewareStack> handlerStack, + Action> configureClient) + { + var options = provider.GetOptions(name); + + var clientBuilder = new ConsumerBuilder(options.Kafka); + configureClient(clientBuilder); + + var kafkaClient = clientBuilder.Build(); + var messageSource = ActivatorUtilities.CreateInstance>(provider, + options.TopicName, kafkaClient); + var queueSupervisor = ActivatorUtilities.CreateInstance(provider, + name, messageSource); + + HandlerFactory> handlerFactory = handlerStack.Resolve; + Handler> handler = + ActivatorUtilities.CreateInstance>>(provider, + name, handlerFactory).InvokeAsync; + + var consumers = Enumerable.Range(1, options.MaxConcurrency) + .Select(_ => + { + var consumer = new BackgroundQueue>.Consumer(messageSource, handler); + var supervisor = ActivatorUtilities.CreateInstance(provider, + name, consumer); + + return supervisor; + }).ToImmutableList(); + + return new KafkaConsumerService(name, options, queueSupervisor, consumers); + } + + public KafkaConsumerService(string name, Options options, + IBackgroundServiceSupervisor reader, + IEnumerable consumers) + { + Name = name; + Options = options; + + _queueReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); + _queueLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); + + var consumerGroup= new IBackgroundServiceSupervisor.Combined(consumers); + _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); + _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); + + Supervisor = new CombinedHostedService(reader, consumerGroup); + } + + public string Name { get; } + + public Options Options { get; } + + // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services + // synchronously by default, so if consumers are stopped first, they will block the reader from completing the + // channel). + public IHostedService Supervisor { get; } + + private readonly IHealthCheck _queueReadinessCheck; + private readonly IHealthCheck _queueLivenessCheck; + + private readonly IHealthCheck _consumerGroupReadinessCheck; + private readonly IHealthCheck _consumerGroupLivenessCheck; + + public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService>(name)._queueReadinessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService>(name)._queueLivenessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration ConsumerGroupReadinessCheck(string name, + HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService>(name)._consumerGroupReadinessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration ConsumerGroupLivenessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService>(name)._consumerGroupLivenessCheck, + failureStatus, + tags); +} diff --git a/src/LocalPost.KafkaConsumer/MessageSource.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs index 641c513..7fcda42 100644 --- a/src/LocalPost.KafkaConsumer/MessageSource.cs +++ b/src/LocalPost.KafkaConsumer/MessageSource.cs @@ -4,88 +4,101 @@ namespace LocalPost.KafkaConsumer; -public static partial class MessageSource +internal sealed class MessageSource : IBackgroundService, + IAsyncEnumerable>, IDisposable { - internal sealed class Service : IBackgroundService, IDisposable - { - private readonly ILogger _logger; - private readonly Options _options; + private readonly ILogger> _logger; + private readonly string _topicName; - private readonly IConsumer _kafka; - private readonly ChannelWriter> _queue; + private readonly IConsumer _kafka; + private readonly Channel> _queue; - public Service(ILogger logger, string name, Options options, IConsumer kafka, - ChannelWriter> queue) + public MessageSource(ILogger> logger, string topicName, IConsumer kafka) + { + _logger = logger; + _topicName = topicName; + _kafka = kafka; + _queue = Channel.CreateBounded>(new BoundedChannelOptions(1) { - _logger = logger; - _options = options; - _kafka = kafka; - _queue = queue; - Name = name; - } + SingleWriter = true, + SingleReader = false + }); + } - public string Name { get; } + public async IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct = default) + { + // Track full or not later + while (await _queue.Reader.WaitToReadAsync(ct)) + while (_queue.Reader.TryRead(out var item)) + yield return item; + } - public Task StartAsync(CancellationToken ct) => Task.Run(() => _kafka.Subscribe(_options.TopicName), ct); + public static implicit operator ChannelReader>(MessageSource that) => + that._queue.Reader; - public Task ExecuteAsync(CancellationToken ct) => Task.Run(() => Run(ct), ct); + public static implicit operator ChannelWriter>(MessageSource that) => + that._queue.Writer; - private async Task Run(CancellationToken stoppingToken = default) - { - while (!stoppingToken.IsCancellationRequested) - { - await Task.Yield(); - - try - { - await _queue.WaitToWriteAsync(stoppingToken); // Wait for the buffer capacity - - await Consume(stoppingToken); - } - catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) - { - // Just complete the method normally... - } - } - } + public Task StartAsync(CancellationToken ct) => Task.Run(() => _kafka.Subscribe(_topicName), ct); - private async Task Consume(CancellationToken stoppingToken) + public Task ExecuteAsync(CancellationToken ct) => Task.Run(() => Run(ct), ct); + + private async Task Run(CancellationToken stoppingToken = default) + { + while (!stoppingToken.IsCancellationRequested) { - // TODO Transaction activity... + await Task.Yield(); + try { - var consumeResult = _kafka.Consume(stoppingToken); + await _queue.Writer.WaitToWriteAsync(stoppingToken); // Wait for the buffer capacity - if (consumeResult is null || consumeResult.IsPartitionEOF || consumeResult.Message is null) - return; // Continue the loop - - await _queue.WriteAsync(new ConsumeContext - { - Client = _kafka, - Result = consumeResult, - }, stoppingToken); + await Consume(stoppingToken); } - catch (ConsumeException e) + catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) { - _logger.LogError(e, "Kafka {Topic} consumer error, help link: {HelpLink}", - _options.TopicName, e.HelpLink); - - // Bubble up, so the supervisor can report the error and the whole app can be restarted (Kubernetes) - throw; + // Just complete the method normally... } } + } - public Task StopAsync(CancellationToken ct) => Task.Run(() => + private async Task Consume(CancellationToken stoppingToken) + { + // TODO Transaction activity... + try { - _logger.LogInformation("Stopping Kafka {Topic} consumer...", _options.TopicName); + var consumeResult = _kafka.Consume(stoppingToken); - _kafka.Close(); - _queue.Complete(); - }, ct); + if (consumeResult is null || consumeResult.IsPartitionEOF || consumeResult.Message is null) + return; // Continue the loop - public void Dispose() + await _queue.Writer.WriteAsync(new ConsumeContext + { +// Client = _kafka, + Result = consumeResult, + }, stoppingToken); + } + catch (ConsumeException e) { - _kafka.Dispose(); + _logger.LogError(e, "Kafka {Topic} consumer error, help link: {HelpLink}", + _topicName, e.HelpLink); + + // Bubble up, so the supervisor can report the error and the whole app can be restarted (by Kubernetes or + // another orchestrator) + throw; } } + + public Task StopAsync(CancellationToken ct) => Task.Run(() => + { + _logger.LogInformation("Stopping Kafka {Topic} consumer...", _topicName); + + _kafka.Close(); + _queue.Writer.Complete(); + }, ct); + + public void Dispose() + { + _kafka.Dispose(); + } } diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 12a57f0..5ea44cd 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -9,5 +9,8 @@ public sealed class Options [Required] public string TopicName { get; set; } = null!; - public ConsumerOptions Consumer { get; set; } = new(); + /// + /// How many messages to process in parallel. + /// + [Required] public ushort MaxConcurrency { get; set; } = ushort.MaxValue; } diff --git a/src/LocalPost.SqsConsumer/Builder.cs b/src/LocalPost.SqsConsumer/Builder.cs deleted file mode 100644 index c8795b3..0000000 --- a/src/LocalPost.SqsConsumer/Builder.cs +++ /dev/null @@ -1,40 +0,0 @@ -using System.Threading.Channels; -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost.SqsConsumer; - -public static partial class MessageSource -{ - public sealed class Builder - { - private Channel _queue = Channel.CreateBounded(new BoundedChannelOptions(config.BufferSize) - { - SingleWriter = true, - SingleReader = true - }); - - public Builder(string name) - { - Name = name; - MiddlewareStackBuilder.Append(); - } - - public string Name { get; } - - public MiddlewareStackBuilder MiddlewareStackBuilder { get; } = new(); - - internal HandlerFactory BuildHandlerFactory() => - MiddlewareStackBuilder.Build().Resolve; - - internal BackgroundServiceSupervisor Build(IServiceProvider provider) - { - var client = ActivatorUtilities.CreateInstance(provider, Name); - var consumer = ActivatorUtilities.CreateInstance(provider, Name, client); - - var supervisor = ActivatorUtilities - .CreateInstance>(provider, consumer); - - return supervisor; - } - } -} diff --git a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs index cb4824d..8b71982 100644 --- a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs +++ b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs @@ -2,27 +2,24 @@ namespace LocalPost.SqsConsumer; -public static partial class MessageSource +internal sealed class ProcessedMessageHandler : IMiddleware { - internal sealed class Middleware : IMiddleware // TODO Rename - { - private readonly IAmazonSQS _sqs; + private readonly IAmazonSQS _sqs; - public Middleware(IAmazonSQS sqs) - { - _sqs = sqs; - } + public ProcessedMessageHandler(IAmazonSQS sqs) + { + _sqs = sqs; + } - public Handler Invoke(Handler next) => async (context, ct) => - { - if (context.IsStale) - return; + public Handler Invoke(Handler next) => async (context, ct) => + { + if (context.IsStale) + return; - // TODO Processing timeout from the visibility timeout - await next(context, ct); // Extend message's VisibilityTimeout in case of long processing?.. + // TODO Processing timeout from the visibility timeout + await next(context, ct); // Extend message's VisibilityTimeout in case of long processing?.. - // Won't be deleted in case of an exception in the handler - await _sqs.DeleteMessageAsync(context.QueueUrl, context.Message.ReceiptHandle, ct); - }; - } + // Won't be deleted in case of an exception in the handler + await _sqs.DeleteMessageAsync(context.QueueUrl, context.Message.ReceiptHandle, ct); + }; } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs index ad21ed0..22b2fba 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs @@ -9,15 +9,13 @@ public static class HealthChecks { [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddAmazonSqsConsumerReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(SqsConsumerService.QueueReadinessCheck(name, failureStatus, tags)) + .Add(SqsConsumerService.ConsumerGroupReadinessCheck(name, failureStatus, tags)); [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddAmazonSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceLivenessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerLivenessCheck(name, failureStatus, tags, timeout); + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(SqsConsumerService.QueueLivenessCheck(name, failureStatus, tags)) + .Add(SqsConsumerService.ConsumerGroupLivenessCheck(name, failureStatus, tags)); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs index f8022da..5e8f646 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs @@ -1,12 +1,14 @@ using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Options; namespace LocalPost.SqsConsumer.DependencyInjection; public static class ServiceRegistration { + // TODO Implement // public static OptionsBuilder AddAmazonSqsJsonConsumer(this IServiceCollection services, // string name, Action? configure = null) where THandler : IHandler => // services.AddAmazonSqsConsumer(name, builder => @@ -16,20 +18,21 @@ public static class ServiceRegistration // }); public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Action? configure = null) where THandler : IHandler => + string name, Action>? configure = null) + where THandler : IHandler => services.AddAmazonSqsConsumer(name, builder => { - builder.MiddlewareStackBuilder.SetHandler(); + builder.SetHandler(); configure?.Invoke(builder); }); - public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Handler handler, Action? configure = null) => - services.AddAmazonSqsConsumer(name, builder => - { - builder.MiddlewareStackBuilder.SetHandler(handler); - configure?.Invoke(builder); - }); +// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, +// string name, Handler handler, Action? configure = null) => +// services.AddAmazonSqsConsumer(name, builder => +// { +// builder.MiddlewareStackBuilder.SetHandler(handler); +// configure?.Invoke(builder); +// }); // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, // string name, Func handler) where TDep1 : notnull => @@ -72,20 +75,20 @@ public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollecti // .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Action configure) + string name, Action> configure) { - var builder = new MessageSource.Builder(name); - configure(builder); - services.AddHostedService(builder.Build); + var handleStackBuilder = new MiddlewareStackBuilder(); + services.TryAddSingleton(); + handleStackBuilder.Append(); + configure(handleStackBuilder); + var handlerStack = handleStackBuilder.Build(); + + services.TryAddSingleton(provider => SqsConsumerService.Create(provider, name, handlerStack)); - services.TryAddSingleton(); + services.AddSingleton(provider => + provider.GetRequiredService(name).Supervisor); - services - .AddBackgroundQueueConsumer(name, b => b - .SetReaderFactory(provider => provider.GetRequiredService(name).Messages) - .MiddlewareStackBuilder.SetHandler(builder.BuildHandlerFactory())) - .Configure>( - (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); + // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... return services.AddOptions(name).Configure(options => options.QueueName = name); } diff --git a/src/LocalPost.SqsConsumer/MessageSource.cs b/src/LocalPost.SqsConsumer/MessageSource.cs index 72d927c..a4a0e65 100644 --- a/src/LocalPost.SqsConsumer/MessageSource.cs +++ b/src/LocalPost.SqsConsumer/MessageSource.cs @@ -1,34 +1,42 @@ using System.Threading.Channels; -using Microsoft.Extensions.Options; namespace LocalPost.SqsConsumer; -public static partial class MessageSource +internal sealed class MessageSource : IBackgroundService, IAsyncEnumerable { - internal sealed class Service : IBackgroundService - { - private readonly QueueClient _client; - private readonly Channel _queue; + private readonly QueueClient _client; + private readonly Channel _queue; - public Service(string name, QueueClient client, IOptionsMonitor options) + public MessageSource(QueueClient client) + { + _client = client; + _queue = Channel.CreateBounded(new BoundedChannelOptions(1) { - var config = options.Get(name); - - _client = client; + SingleWriter = true, // Spawn multiple readers later?.. + SingleReader = false + }); + } - Name = name; - } + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) + { + // Track full or not later + while (await _queue.Reader.WaitToReadAsync(ct)) + while (_queue.Reader.TryRead(out var item)) + yield return item; + } - public string Name { get; } + public static implicit operator ChannelReader(MessageSource that) => that._queue.Reader; - public IAsyncEnumerable Messages => _queue.Reader.ReadAllAsync(); + public static implicit operator ChannelWriter(MessageSource that) => that._queue.Writer; - public async Task StartAsync(CancellationToken ct) - { - await _client.ConnectAsync(ct); - } + public async Task StartAsync(CancellationToken ct) + { + await _client.ConnectAsync(ct); + } - public async Task ExecuteAsync(CancellationToken ct) + public async Task ExecuteAsync(CancellationToken ct) + { + try { while (!ct.IsCancellationRequested) { @@ -37,20 +45,23 @@ public async Task ExecuteAsync(CancellationToken ct) await Consume(ct); } } - - public Task StopAsync(CancellationToken ct) + finally { - _queue.Writer.Complete(); - - return Task.CompletedTask; } + } - private async Task Consume(CancellationToken stoppingToken) - { - var messages = await _client.PullMessagesAsync(stoppingToken); + public Task StopAsync(CancellationToken ct) + { + _queue.Writer.Complete(); - foreach (var message in messages) - await _queue.Writer.WriteAsync(message, stoppingToken); - } + return Task.CompletedTask; + } + + private async Task Consume(CancellationToken stoppingToken) + { + var messages = await _client.PullMessagesAsync(stoppingToken); + + foreach (var message in messages) + await _queue.Writer.WriteAsync(message, stoppingToken); } } diff --git a/src/LocalPost.SqsConsumer/Options.cs b/src/LocalPost.SqsConsumer/Options.cs index 43020ca..7108032 100644 --- a/src/LocalPost.SqsConsumer/Options.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -1,8 +1,4 @@ -using System.Collections.Immutable; using System.ComponentModel.DataAnnotations; -using Amazon.SQS; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; namespace LocalPost.SqsConsumer; @@ -17,7 +13,7 @@ public sealed record Options public const int DefaultTimeout = 30; /// - /// How many messages to process in parallel. + /// How many messages to process in parallel. Default is 10. /// [Required] public ushort MaxConcurrency { get; set; } = 10; @@ -42,6 +38,7 @@ [Url] public string? QueueUrl /// /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. + /// Default is 20. /// /// /// Amazon SQS short and long polling @@ -51,13 +48,21 @@ [Url] public string? QueueUrl /// [Range(0, 20)] public byte WaitTimeSeconds { get; set; } = 20; + /// + /// The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, + /// fewer messages might be returned). Valid values: 1 to 10. Default is 1. + /// + /// + /// Amazon SQS short and long polling + /// + /// + /// Setting up long polling + /// [Range(1, 10)] public byte MaxNumberOfMessages { get; set; } = 10; - [Range(1, uint.MaxValue)] public byte BufferSize { get; set; } = 1; - /// - /// Message processing timeout, in seconds. If not set, IAmazonSQS.GetQueueAttributesAsync() will be used once, to get - /// VisibilityTimeout for the queue. + /// Message processing timeout, in seconds. If not set, IAmazonSQS.GetQueueAttributesAsync() will be used once, + /// to get VisibilityTimeout for the queue. /// /// /// Amazon SQS visibility timeout diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index 48c8800..2781412 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -17,11 +17,6 @@ internal sealed class QueueClient private readonly IAmazonSQS _sqs; private readonly Options _options; - public QueueClient(ILogger logger, string name, IOptionsMonitor options, IAmazonSQS sqs) : - this(logger, options.Get(name), sqs) - { - } - public QueueClient(ILogger logger, Options options, IAmazonSQS sqs) { _logger = logger; diff --git a/src/LocalPost.SqsConsumer/SqsConsumerService.cs b/src/LocalPost.SqsConsumer/SqsConsumerService.cs new file mode 100644 index 0000000..523de6e --- /dev/null +++ b/src/LocalPost.SqsConsumer/SqsConsumerService.cs @@ -0,0 +1,92 @@ +using System.Collections.Immutable; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; + +namespace LocalPost.SqsConsumer; + +internal sealed class SqsConsumerService : INamedService +{ + public static SqsConsumerService Create(IServiceProvider provider, string name, + MiddlewareStack handlerStack) + { + var options = provider.GetOptions(name); + + var client = ActivatorUtilities.CreateInstance(provider, options); + var messageSource = new MessageSource(client); + var queueSupervisor = ActivatorUtilities.CreateInstance(provider, name, messageSource); + + HandlerFactory handlerFactory = handlerStack.Resolve; + Handler handler = ActivatorUtilities.CreateInstance>(provider, + name, handlerFactory).InvokeAsync; + + var consumers = Enumerable.Range(1, options.MaxConcurrency) + .Select(_ => + { + var consumer = new BackgroundQueue.Consumer(messageSource, handler); + var supervisor = ActivatorUtilities.CreateInstance(provider, + name, consumer); + + return supervisor; + }).ToImmutableList(); + + return new SqsConsumerService(name, options, queueSupervisor, consumers); + } + + public SqsConsumerService(string name, Options options, + IBackgroundServiceSupervisor reader, + IEnumerable consumers) + { + Name = name; + Options = options; + + _queueReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); + _queueLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); + + var consumerGroup = new IBackgroundServiceSupervisor.Combined(consumers); + _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); + _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); + + Supervisor = new CombinedHostedService(reader, consumerGroup); + } + + public string Name { get; } + + public Options Options { get; } + + // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services + // synchronously by default, so if consumers are stopped first, they will block the reader from completing the + // channel). + public IHostedService Supervisor { get; } + + private readonly IHealthCheck _queueReadinessCheck; + private readonly IHealthCheck _queueLivenessCheck; + + private readonly IHealthCheck _consumerGroupReadinessCheck; + private readonly IHealthCheck _consumerGroupLivenessCheck; + + public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService(name)._queueReadinessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService(name)._queueLivenessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration ConsumerGroupReadinessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService(name)._consumerGroupReadinessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration ConsumerGroupLivenessCheck(string name, HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(name, + provider => provider.GetRequiredService(name)._consumerGroupLivenessCheck, + failureStatus, + tags); +} diff --git a/src/LocalPost/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs similarity index 91% rename from src/LocalPost/AsyncEnumerableEx.cs rename to src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs index 2784b1b..1459ede 100644 --- a/src/LocalPost/AsyncEnumerableEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs @@ -1,4 +1,4 @@ -namespace LocalPost; +namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumerableEx { diff --git a/src/LocalPost/AsyncEnumerableMerger.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs similarity index 98% rename from src/LocalPost/AsyncEnumerableMerger.cs rename to src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs index df92a82..4c7ef21 100644 --- a/src/LocalPost/AsyncEnumerableMerger.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs @@ -2,7 +2,7 @@ using System.Diagnostics.CodeAnalysis; using System.Threading.Channels; -namespace LocalPost; +namespace LocalPost.AsyncEnumerable; internal sealed class AsyncEnumerableMerger : IAsyncEnumerable, IDisposable { diff --git a/src/LocalPost/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs similarity index 88% rename from src/LocalPost/BatchBuilder.cs rename to src/LocalPost/AsyncEnumerable/BatchBuilder.cs index 2bc3e12..85880e1 100644 --- a/src/LocalPost/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -1,6 +1,6 @@ using Nito.AsyncEx; -namespace LocalPost; +namespace LocalPost.AsyncEnumerable; internal delegate IBatchBuilder BatchBuilderFactory(); @@ -43,6 +43,9 @@ public void Dispose() internal sealed class BoundedBatchBuilder : BatchBuilder> { + public static BatchBuilderFactory> Factory(int maxSize, int timeWindow) => + () => new BoundedBatchBuilder(maxSize, TimeSpan.FromMilliseconds(timeWindow)); + private readonly int _max; private readonly List _batch = new(); diff --git a/src/LocalPost/BatchingAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs similarity index 98% rename from src/LocalPost/BatchingAsyncEnumerable.cs rename to src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs index 071d0b0..da98eb7 100644 --- a/src/LocalPost/BatchingAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs @@ -1,4 +1,4 @@ -namespace LocalPost; +namespace LocalPost.AsyncEnumerable; internal sealed class BatchingAsyncEnumerable : IAsyncEnumerable { diff --git a/src/LocalPost/ConcurrentSet.cs b/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs similarity index 98% rename from src/LocalPost/ConcurrentSet.cs rename to src/LocalPost/AsyncEnumerable/ConcurrentSet.cs index e52964d..59bf0c0 100644 --- a/src/LocalPost/ConcurrentSet.cs +++ b/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs @@ -2,7 +2,7 @@ using System.Collections.Immutable; using Nito.AsyncEx; -namespace LocalPost; +namespace LocalPost.AsyncEnumerable; internal sealed class ConcurrentSet : IEnumerable, IDisposable { diff --git a/src/LocalPost/BackgroundJobQueue.cs b/src/LocalPost/BackgroundJobQueue.cs index 8df07cf..d59db53 100644 --- a/src/LocalPost/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundJobQueue.cs @@ -9,7 +9,7 @@ public interface IBackgroundJobQueue : IBackgroundQueue { } -internal sealed class BackgroundJobQueue : IBackgroundJobQueue, IBackgroundQueueManager +internal sealed class BackgroundJobQueue : IBackgroundJobQueue { private readonly BackgroundQueue _queue; @@ -18,9 +18,5 @@ public BackgroundJobQueue(BackgroundQueue queue) _queue = queue; } - public bool IsClosed => _queue.IsClosed; - public ValueTask Enqueue(Job item, CancellationToken ct = default) => _queue.Enqueue(item, ct); - - public ValueTask CompleteAsync(CancellationToken ct = default) => _queue.CompleteAsync(ct); } diff --git a/src/LocalPost/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue.cs index ac42e48..5d885bd 100644 --- a/src/LocalPost/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue.cs @@ -3,14 +3,14 @@ namespace LocalPost; -public interface IBackgroundQueue +public partial interface IBackgroundQueue { // TODO Custom exception when closed?.. Or just return true/false?.. ValueTask Enqueue(T item, CancellationToken ct = default); } // TODO Open to public later -internal interface IBackgroundQueueManager +internal interface IBackgroundQueueManager { // Implement later for a better health check // bool IsFull { get; } @@ -41,26 +41,21 @@ public interface IMiddleware -public sealed partial class BackgroundQueue : IBackgroundQueue, IBackgroundQueueManager, IAsyncEnumerable +internal sealed partial class BackgroundQueue : IBackgroundQueue, IBackgroundQueueManager, IAsyncEnumerable { private readonly TimeSpan _completionTimeout; - // For the DI container - public BackgroundQueue(IOptions> options) : this(options.Value.Queue) - { - } - - public BackgroundQueue(QueueOptions options) : this( + public BackgroundQueue(BackgroundQueueOptions options) : this( options.MaxSize switch { not null => Channel.CreateBounded(new BoundedChannelOptions(options.MaxSize.Value) { - SingleReader = true, + SingleReader = options.MaxConcurrency == 1, SingleWriter = false, }), _ => Channel.CreateUnbounded(new UnboundedChannelOptions { - SingleReader = true, + SingleReader = options.MaxConcurrency == 1, SingleWriter = false, }) }, @@ -74,19 +69,24 @@ public BackgroundQueue(Channel messages, TimeSpan completionTimeout) Messages = messages; } - protected Channel Messages { get; } - - public bool IsClosed { get; private set; } - - public ValueTask Enqueue(T item, CancellationToken ct = default) => Messages.Writer.WriteAsync(item, ct); + internal Channel Messages { get; } public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) { + // Track full or not later while (await Messages.Reader.WaitToReadAsync(ct)) while (Messages.Reader.TryRead(out var item)) yield return item; } + public static implicit operator ChannelReader(BackgroundQueue that) => that.Messages.Reader; + + public static implicit operator ChannelWriter(BackgroundQueue that) => that.Messages.Writer; + + public ValueTask Enqueue(T item, CancellationToken ct = default) => Messages.Writer.WriteAsync(item, ct); + + public bool IsClosed { get; private set; } + public async ValueTask CompleteAsync(CancellationToken ct = default) { if (IsClosed) diff --git a/src/LocalPost/BackgroundQueueBuilder.cs b/src/LocalPost/BackgroundQueueBuilder.cs deleted file mode 100644 index 3ee1886..0000000 --- a/src/LocalPost/BackgroundQueueBuilder.cs +++ /dev/null @@ -1,44 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost; - -public sealed partial class BackgroundQueue -{ - public sealed class ConsumerBuilder - { - private Func>? _readerFactory; - - public ConsumerBuilder(string name) - { - Name = name; - } - - public string Name { get; } - - public MiddlewareStackBuilder MiddlewareStackBuilder { get; } = new(); - - public ConsumerBuilder SetReaderFactory(Func> factory) - { - _readerFactory = factory; - - return this; - } - - private HandlerFactory BuildHandlerFactory() => - MiddlewareStackBuilder.Build().Resolve; - - internal BackgroundServiceSupervisor Build(IServiceProvider provider) - { - // TODO Custom exception - var readerFactory = _readerFactory ?? throw new Exception($"Reader factory is required"); - - var executor = ActivatorUtilities.CreateInstance(provider, Name); - var consumer = ActivatorUtilities.CreateInstance>(provider, Name, - executor, readerFactory(provider), BuildHandlerFactory()); - var supervisor = ActivatorUtilities - .CreateInstance>>(provider, consumer); - - return supervisor; - } - } -} diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 66a8e7f..f06e066 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -1,90 +1,98 @@ using System.Threading.Channels; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; +using LocalPost.AsyncEnumerable; namespace LocalPost; -internal sealed class BackgroundQueueConsumer : IBackgroundService +internal sealed partial class BackgroundQueue { - private readonly ILogger> _logger; - private readonly IServiceScopeFactory _scopeFactory; - - private readonly IAsyncEnumerable _reader; - private readonly IExecutor _executor; - private readonly HandlerFactory _handlerFactory; - - public BackgroundQueueConsumer(ILogger> logger, string name, - IServiceScopeFactory scopeFactory, - IExecutor executor, - IAsyncEnumerable reader, - HandlerFactory handlerFactory) + internal sealed class Consumer : IBackgroundService { - Name = name; - _logger = logger; - _scopeFactory = scopeFactory; - _reader = reader; - _executor = executor; - _handlerFactory = handlerFactory; - } + private readonly IAsyncEnumerable _reader; + private readonly Handler _handler; - public string Name { get; } + public Consumer(IAsyncEnumerable reader, Handler handler) + { + _reader = reader; + _handler = handler; + } - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - public async Task ExecuteAsync(CancellationToken ct) - { - try + public async Task ExecuteAsync(CancellationToken ct) { await foreach (var message in _reader.WithCancellation(ct)) - await _executor.StartAsync(() => Process(message, ct), ct); - } - catch (ChannelClosedException e) - { - _logger.LogWarning(e, "{Name} queue has been closed, stop listening", Name); - - // All currently running tasks will be processed in StopAsync() below + await _handler(message, ct); } - } - public async Task StopAsync(CancellationToken forceExitToken) - { - // Good to have later: an option to NOT process the rest of the messages - try + public async Task StopAsync(CancellationToken forceExitToken) { - // TODO An option to NOT process the rest of the messages... + // Good to have later: an option to NOT process the rest of the messages... await foreach (var message in _reader.WithCancellation(forceExitToken)) - await _executor.StartAsync(() => Process(message, forceExitToken), forceExitToken); + await _handler(message, forceExitToken); } - catch (ChannelClosedException) - { - // OK, just wait for the rest of the tasks to finish - } - - // Wait until all currently running tasks are finished - await _executor.WaitAsync(forceExitToken); } - private async Task Process(T message, CancellationToken ct) + internal sealed class BatchConsumer : IBackgroundService { - // TODO Tracing... + private readonly ChannelReader _source; + private readonly ChannelWriter _destination; + private readonly BatchBuilderFactory _factory; - using var scope = _scopeFactory.CreateScope(); + private IBatchBuilder _batch; - // Make it specific for this queue somehow?.. - var handler = _handlerFactory(scope.ServiceProvider); - - try + public BatchConsumer(ChannelReader source, ChannelWriter destination, BatchBuilderFactory factory) { - // Await the handler, to keep the container scope alive - await handler(message, ct); + _source = source; + _destination = destination; + _factory = factory; + + _batch = factory(); } - catch (OperationCanceledException e) when (e.CancellationToken == ct) + + private async Task ProcessBatch(CancellationToken ct) { - throw; + await _destination.WriteAsync(_batch.Build(), ct); + _batch.Dispose(); + _batch = _factory(); } - catch (Exception e) + + private async Task Process(CancellationToken ct) { - _logger.LogError(e, "{Queue}: unhandled exception while processing a message", Name); + while (true) + { + using var cts = CancellationTokenSource.CreateLinkedTokenSource(ct, _batch.TimeWindow); + try + { + if (!await _source.WaitToReadAsync(cts.Token)) + break; + + while (_source.TryRead(out var message)) + { + if (_batch.TryAdd(message)) continue; + if (_batch.IsEmpty) + throw new Exception("Cannot fit a message in a batch"); + + await ProcessBatch(ct); + + if (!_batch.TryAdd(message)) + throw new Exception("Cannot fit a message in a batch"); + } + } + catch (OperationCanceledException e) when (e.CancellationToken == _batch.TimeWindow) + { + // Just process the current batch + if (!_batch.IsEmpty) + await ProcessBatch(ct); + } + } + + _destination.Complete(); } + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + + public Task ExecuteAsync(CancellationToken ct) => Process(ct); + + public Task StopAsync(CancellationToken forceExitToken) => Process(forceExitToken); } } diff --git a/src/LocalPost/BackgroundQueueOptions.cs b/src/LocalPost/BackgroundQueueOptions.cs new file mode 100644 index 0000000..13dcac9 --- /dev/null +++ b/src/LocalPost/BackgroundQueueOptions.cs @@ -0,0 +1,22 @@ +using System.ComponentModel.DataAnnotations; +using System.Threading.Channels; + +namespace LocalPost; + +/// +/// Background queue configuration. +/// +public record BackgroundQueueOptions +{ + // TODO Use + public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; + + public ushort? MaxSize { get; set; } = null; + + public ushort? CompletionTimeout { get; set; } = 1_000; // Milliseconds + + /// + /// How many messages to process in parallel. Default is 10. + /// + [Required] public ushort MaxConcurrency { get; set; } = 10; +} diff --git a/src/LocalPost/BackgroundQueueService.cs b/src/LocalPost/BackgroundQueueService.cs new file mode 100644 index 0000000..9bbb7f6 --- /dev/null +++ b/src/LocalPost/BackgroundQueueService.cs @@ -0,0 +1,112 @@ +using LocalPost.AsyncEnumerable; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; + +namespace LocalPost; + +internal sealed class BackgroundQueueService +{ + public static readonly string Name = Reflection.FriendlyNameOf(); + + public static BackgroundQueueService Create(IServiceProvider provider, MiddlewareStack handlerStack) + { + var options = provider.GetOptions>(); + + var queue = new BackgroundQueue(options); + + HandlerFactory handlerFactory = handlerStack.Resolve; + Handler handler = ActivatorUtilities.CreateInstance>(provider, + Name, handlerFactory).InvokeAsync; + + var consumers = Enumerable.Range(1, options.MaxConcurrency) + .Select(_ => + { + var consumer = new BackgroundQueue.Consumer(queue, handler); + var supervisor = ActivatorUtilities.CreateInstance(provider, + Name, consumer); + + return supervisor; + }); + + return new BackgroundQueueService(options, queue, consumers); + } + + public static BackgroundQueueService CreateBatched(IServiceProvider provider, + MiddlewareStack> handlerStack, + int maxBatchSize = 10, int batchCompletionTimeWindow = 1_000) => CreateBatched(provider, handlerStack, + BoundedBatchBuilder.Factory(maxBatchSize, batchCompletionTimeWindow)); + + public static BackgroundQueueService CreateBatched(IServiceProvider provider, + MiddlewareStack handlerStack, BatchBuilderFactory batchFactory) + { + var options = provider.GetOptions>(); + + var queue = new BackgroundQueue(options); + var batchQueue = new BackgroundQueue(options); + + HandlerFactory handlerFactory = handlerStack.Resolve; + Handler handler = ActivatorUtilities.CreateInstance>(provider, + Name, handlerFactory).InvokeAsync; + + // Just a single consumer, to do the batching properly + var batchSupervisor = ActivatorUtilities.CreateInstance(provider, + // A different name for the batching consumer?.. + Name, new BackgroundQueue.BatchConsumer(queue, batchQueue, batchFactory)); + + // And the actual consumers, to process the batches + var consumers = Enumerable.Range(1, options.MaxConcurrency) + .Select(_ => + { + var consumer = new BackgroundQueue.Consumer(batchQueue, handler); + var supervisor = ActivatorUtilities.CreateInstance(provider, + Name, consumer); + + return supervisor; + }).Prepend(batchSupervisor); + + return new BackgroundQueueService(options, queue, consumers); + } + + public BackgroundQueueService(BackgroundQueueOptions options, BackgroundQueue queue, + IEnumerable consumers) + { + Options = options; + + Queue = queue; + var queueSupervisor = new BackgroundQueue.Supervisor(queue); + + var consumerGroup = new IBackgroundServiceSupervisor.Combined(consumers); + + _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); + _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); + + Supervisor = new CombinedHostedService(queueSupervisor, consumerGroup); + } + + public BackgroundQueueOptions Options { get; } + + public IBackgroundQueue Queue { get; } + + // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services + // synchronously by default, so if consumers are stopped first, they will block the reader from completing the + // channel). + public IHostedService Supervisor { get; } + + private readonly IHealthCheck _consumerGroupReadinessCheck; + private readonly IHealthCheck _consumerGroupLivenessCheck; + + public static HealthCheckRegistration ConsumerGroupReadinessCheck(HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(Name, + provider => provider.GetRequiredService>()._consumerGroupReadinessCheck, + failureStatus, + tags); + + public static HealthCheckRegistration ConsumerGroupLivenessCheck(HealthStatus? failureStatus = default, + IEnumerable? tags = default) => new(Name, + provider => provider.GetRequiredService>()._consumerGroupLivenessCheck, + failureStatus, + tags); + +} diff --git a/src/LocalPost/BackgroundQueueSupervisor.cs b/src/LocalPost/BackgroundQueueSupervisor.cs index 324b2e8..ab738fc 100644 --- a/src/LocalPost/BackgroundQueueSupervisor.cs +++ b/src/LocalPost/BackgroundQueueSupervisor.cs @@ -1,30 +1,22 @@ -using LocalPost.DependencyInjection; using Microsoft.Extensions.Hosting; namespace LocalPost; -public sealed partial class BackgroundQueue +internal sealed partial class BackgroundQueue { - // TODO Use - internal sealed class Supervisor : IHostedService, INamedService + internal sealed class Supervisor : IHostedService { - // TODO Health checks + // Health checks later?.. Like full or not. - public Supervisor(IBackgroundQueueManager queue, string name) + private readonly IBackgroundQueueManager _queue; + + public Supervisor(IBackgroundQueueManager queue) { - Queue = queue; - Name = name; + _queue = queue; } - internal IBackgroundQueueManager Queue { get; } - - public string Name { get; } - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - public async Task StopAsync(CancellationToken forceExitToken) - { - await Queue.CompleteAsync(forceExitToken); - } + public async Task StopAsync(CancellationToken forceExitToken) => await _queue.CompleteAsync(forceExitToken); } } diff --git a/src/LocalPost/BackgroundService.cs b/src/LocalPost/BackgroundService.cs index 8b7d9cb..b11aaa0 100644 --- a/src/LocalPost/BackgroundService.cs +++ b/src/LocalPost/BackgroundService.cs @@ -1,8 +1,6 @@ -using LocalPost.DependencyInjection; - namespace LocalPost; -internal interface IBackgroundService : INamedService +internal interface IBackgroundService { Task StartAsync(CancellationToken ct); diff --git a/src/LocalPost/BackgroundServiceSupervisor.cs b/src/LocalPost/BackgroundServiceSupervisor.cs index 3ab0887..c14853a 100644 --- a/src/LocalPost/BackgroundServiceSupervisor.cs +++ b/src/LocalPost/BackgroundServiceSupervisor.cs @@ -1,19 +1,75 @@ +using System.Collections.Immutable; using System.Diagnostics.CodeAnalysis; -using LocalPost.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; +using Nito.AsyncEx; using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; namespace LocalPost; -internal abstract class BackgroundServiceSupervisor : IHostedService, INamedService, IDisposable +internal sealed record CombinedHostedService(ImmutableArray Services) : IHostedService { + public CombinedHostedService(IHostedService s1, IHostedService s2) : this(new[] { s1, s2 }) + { + } + + public CombinedHostedService(IEnumerable services) : this(services.ToImmutableArray()) + { + } + + public Task StartAsync(CancellationToken cancellationToken) => + Task.WhenAll(Services.Select(c => c.StartAsync(cancellationToken))); + + public Task StopAsync(CancellationToken cancellationToken) => + Task.WhenAll(Services.Select(c => c.StopAsync(cancellationToken))); + +} + +internal interface IBackgroundServiceSupervisor : IHostedService +{ + // With predefined static size + // (IAsyncDisposable later?..) + internal sealed record Combined(ImmutableArray Supervisors) : + IBackgroundServiceSupervisor, IDisposable + { + public Combined(IEnumerable supervisors) : this(supervisors.ToImmutableArray()) + { + } + + public Task StartAsync(CancellationToken cancellationToken) => + Task.WhenAll(Supervisors.Select(c => c.StartAsync(cancellationToken))); + + public Task StopAsync(CancellationToken cancellationToken) => + Task.WhenAll(Supervisors.Select(c => c.StopAsync(cancellationToken))); + + public bool Started => Supervisors.All(c => c.Started); + public bool Running => Supervisors.All(c => c.Running); + public bool Crashed => Supervisors.Any(c => c.Crashed); + public Exception? Exception => null; // TODO Implement + + public void Dispose() + { + foreach (var consumer in Supervisors) + if (consumer is IDisposable disposable) + disposable.Dispose(); + } + } + + public bool Started { get; } + + public bool Running { get; } + + [MemberNotNullWhen(true, nameof(Exception))] + public bool Crashed { get; } + + public Exception? Exception { get; } + public sealed class LivenessCheck : IHealthCheck { - private readonly BackgroundServiceSupervisor _supervisor; + private readonly IBackgroundServiceSupervisor _supervisor; - public LivenessCheck(BackgroundServiceSupervisor supervisor) + public LivenessCheck(IBackgroundServiceSupervisor supervisor) { _supervisor = supervisor; } @@ -24,21 +80,21 @@ public Task CheckHealthAsync(HealthCheckContext context, private HealthCheckResult CheckHealth(HealthCheckContext _) { if (_supervisor.Crashed) - return Unhealthy($"{_supervisor.Name} has crashed", _supervisor.Exception); + return Unhealthy("Service has crashed", _supervisor.Exception); if (_supervisor is { Started: true, Running: false }) - return Unhealthy($"{_supervisor.Name} is not running"); + return Unhealthy("Service is not running"); // Starting or running - return Healthy($"{_supervisor.Name} is alive"); + return Healthy("Service is alive"); } } public sealed class ReadinessCheck : IHealthCheck { - private readonly BackgroundServiceSupervisor _supervisor; + private readonly IBackgroundServiceSupervisor _supervisor; - public ReadinessCheck(BackgroundServiceSupervisor supervisor) + public ReadinessCheck(IBackgroundServiceSupervisor supervisor) { _supervisor = supervisor; } @@ -49,33 +105,38 @@ public Task CheckHealthAsync(HealthCheckContext context, private HealthCheckResult CheckHealth(HealthCheckContext context) { if (!_supervisor.Started) - return Unhealthy($"{_supervisor.Name} has not been started yet", _supervisor.Exception); + return Unhealthy("Service has not been started yet", _supervisor.Exception); if (_supervisor.Crashed) - return Unhealthy($"{_supervisor.Name} has crashed", _supervisor.Exception); + return Unhealthy("Service has crashed", _supervisor.Exception); - return Healthy($"{_supervisor.Name} is running"); + return Healthy("Service is running"); } } +} +internal class BackgroundServiceSupervisor : IBackgroundServiceSupervisor, IDisposable +{ private readonly ILogger _logger; private CancellationTokenSource? _executionCts; private Task? _execution; - public BackgroundServiceSupervisor(ILogger logger, IBackgroundService service) + public BackgroundServiceSupervisor(ILogger logger, string name, + IBackgroundService service) { _logger = logger; + Name = name; Service = service; } - public IBackgroundService Service { get; } + public string Name { get; } - public string Name => Service.Name; + public IBackgroundService Service { get; } public bool Started => _executionCts is not null && _execution is not null; - public bool Running => _execution is not null && _execution.IsCompleted; + public bool Running => _execution is not null && !_execution.IsCompleted; [MemberNotNullWhen(true, nameof(Exception))] public bool Crashed => Exception is not null; @@ -96,14 +157,10 @@ public async Task StartAsync(CancellationToken ct) // Store the task we're executing _execution = ExecuteAsync(_executionCts.Token); } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - _logger.LogInformation("{Name} start has been aborted", Name); - } catch (Exception e) { Exception = e; - _logger.LogCritical(e, "Unhandled exception while starting {Name} background queue", Name); + _logger.LogCritical(e, "Unhandled exception while starting {Name} service", Name); } } @@ -116,17 +173,17 @@ private async Task ExecuteAsync(CancellationToken stoppingToken) try { await Service.ExecuteAsync(stoppingToken); - _logger.LogInformation("{Name} background queue is completed", Name); + _logger.LogWarning("{Name} is done", Name); } catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) { // The rest of the queue will be processed in StopAsync() below - _logger.LogInformation("Application exit has been requested, stopping {Name} background queue...", Name); + _logger.LogInformation("Application exit has been requested, stopping {Name}...", Name); } catch (Exception e) { Exception = e; - _logger.LogCritical(e, "Unhandled exception in {Name} background queue", Name); + _logger.LogCritical(e, "{Name}: Unhandled exception", Name); } } @@ -139,30 +196,32 @@ public async Task StopAsync(CancellationToken forceExitToken) } finally { - if (_execution is not null) - // Wait until the execution completes or the app is forced to exit - await Task.WhenAny(_execution, Task.Delay(Timeout.Infinite, forceExitToken)); + // Wait until the execution completes or the app is forced to exit + _execution?.WaitAsync(forceExitToken); } await Service.StopAsync(forceExitToken); + _logger.LogInformation("{Name} has been stopped", Name); } public void Dispose() { _executionCts?.Cancel(); + _executionCts?.Dispose(); // ReSharper disable once SuspiciousTypeConversion.Global if (Service is IDisposable disposableService) disposableService.Dispose(); } } -internal sealed class BackgroundServiceSupervisor : BackgroundServiceSupervisor - where T : class, IBackgroundService -{ - public BackgroundServiceSupervisor(ILogger> logger, T service) : base(logger, service) - { - Service = service; - } - - public new T Service { get; } -} +//internal sealed class BackgroundServiceSupervisor : BackgroundServiceSupervisor +// where T : class, IBackgroundService, INamedService +//{ +// public BackgroundServiceSupervisor(ILogger> logger, T service) : +// base(logger, service.Name, service) +// { +// Service = service; +// } +// +// public new T Service { get; } +//} diff --git a/src/LocalPost/ChannelReaderExtensions.cs b/src/LocalPost/ChannelReaderEx.cs similarity index 92% rename from src/LocalPost/ChannelReaderExtensions.cs rename to src/LocalPost/ChannelReaderEx.cs index 2f364d1..10ead5a 100644 --- a/src/LocalPost/ChannelReaderExtensions.cs +++ b/src/LocalPost/ChannelReaderEx.cs @@ -3,7 +3,7 @@ namespace LocalPost; -internal static class ChannelReaderExtensions +internal static class ChannelReaderEx { // netstandard2.0 does not contain this overload, it's available only from netstandard2.1 (.NET Core 3.0) public static async IAsyncEnumerable ReadAllAsync(this ChannelReader reader, diff --git a/src/LocalPost/ConcurrentTasksList.cs b/src/LocalPost/ConcurrentTasksList.cs deleted file mode 100644 index 29d3ada..0000000 --- a/src/LocalPost/ConcurrentTasksList.cs +++ /dev/null @@ -1,42 +0,0 @@ -using Nito.AsyncEx; - -namespace LocalPost; - -internal sealed class ConcurrentTasksList -{ - private readonly object _tasksLock = new(); - private readonly List _tasks; - - public ConcurrentTasksList(int capacityHint) - { - _tasks = new List(capacityHint); - } - - public int Count => _tasks.Count; - - private Task WhenAny() - { - lock (_tasksLock) - return Task.WhenAny(_tasks); - } - - private void Remove(Task item) - { - lock (_tasksLock) - _tasks.Remove(item); - } - - public void CleanupCompleted() - { - lock (_tasksLock) - _tasks.RemoveAll(task => task.IsCompleted); - } - - public void Track(Task item) - { - lock (_tasksLock) - _tasks.Add(item); - } - - public async Task WaitForCompleted(CancellationToken ct) => Remove(await WhenAny().WaitAsync(ct)); -} diff --git a/src/LocalPost/DependencyInjection/Configuration.cs b/src/LocalPost/DependencyInjection/Configuration.cs deleted file mode 100644 index 0bc12a8..0000000 --- a/src/LocalPost/DependencyInjection/Configuration.cs +++ /dev/null @@ -1,14 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.DependencyInjection; - -// TODO Open to public later -internal static class Configuration -{ - public static OptionsBuilder AddBackgroundQueueOptions(this IServiceCollection services) => - services.AddOptions(Reflection.FriendlyNameOf>()); - - public static OptionsBuilder AddBackgroundQueueOptions(this IServiceCollection services, string name) => - services.AddOptions(name); -} diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index 6a7ba0e..3b7fcc2 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -1,4 +1,3 @@ -using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; @@ -6,52 +5,11 @@ namespace LocalPost.DependencyInjection; public static class HealthChecks { - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddBackgroundQueueReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); + HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(BackgroundQueueService.ConsumerGroupReadinessCheck(failureStatus, tags)); - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceLivenessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerLivenessCheck(name, failureStatus, tags, timeout); -} - -public static class HealthCheckBuilderEx -{ - internal static IHealthChecksBuilder AddBackgroundQueueConsumerReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => - builder.AddBackgroundServiceReadinessCheck>(name, failureStatus, tags, timeout); - - internal static IHealthChecksBuilder AddBackgroundQueueConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => - builder.AddBackgroundServiceLivenessCheck>(name, failureStatus, tags, timeout); - - internal static IHealthChecksBuilder AddBackgroundServiceReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) where T : class, IBackgroundService => - builder.Add(new HealthCheckRegistration( - name, - provider => ActivatorUtilities.CreateInstance(provider, - provider.GetSupervisor(name)), - failureStatus, - tags, - timeout)); - - internal static IHealthChecksBuilder AddBackgroundServiceLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) where T : class, IBackgroundService => - builder.Add(new HealthCheckRegistration( - name, - provider => ActivatorUtilities.CreateInstance(provider, - provider.GetSupervisor(name)), - failureStatus, - tags, - timeout)); + HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(BackgroundQueueService.ConsumerGroupLivenessCheck(failureStatus, tags)); } diff --git a/src/LocalPost/DependencyInjection/JobQueueRegistration.cs b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs index b963e29..8b5a2ce 100644 --- a/src/LocalPost/DependencyInjection/JobQueueRegistration.cs +++ b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs @@ -12,6 +12,6 @@ public static OptionsBuilder> AddBackgroundJobQueue( services.TryAddSingleton(provider => provider.GetRequiredService()); return services.AddBackgroundQueue(builder => - builder.MiddlewareStackBuilder.SetHandler((job, ct) => job(ct))); + builder.SetHandler((job, ct) => job(ct))); } } diff --git a/src/LocalPost/DependencyInjection/QueueRegistration.cs b/src/LocalPost/DependencyInjection/QueueRegistration.cs index 6ca2d0e..c08dd73 100644 --- a/src/LocalPost/DependencyInjection/QueueRegistration.cs +++ b/src/LocalPost/DependencyInjection/QueueRegistration.cs @@ -1,5 +1,6 @@ using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; +using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; @@ -8,55 +9,39 @@ public static class QueueRegistration { // THandler has to be registered by the user public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - Action.ConsumerBuilder>? configure = null) where THandler : IHandler => + Action>? configure = null) where THandler : IHandler => services.AddBackgroundQueue(builder => { - builder.MiddlewareStackBuilder.SetHandler(); + builder.SetHandler(); configure?.Invoke(builder); }); public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - Handler handler, Action.ConsumerBuilder>? configure = null) => + Handler handler, Action>? configure = null) => services.AddBackgroundQueue(builder => { - builder.MiddlewareStackBuilder.SetHandler(handler); + builder.SetHandler(handler); configure?.Invoke(builder); }); public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - Action.ConsumerBuilder> configure) + Action> configure) { - services.TryAddSingleton>(); - services.TryAddSingleton>(provider => provider.GetRequiredService>()); + var handleStackBuilder = new MiddlewareStackBuilder(); + configure(handleStackBuilder); + var handlerStack = handleStackBuilder.Build(); - services - .AddBackgroundQueueConsumer>(configure) - .Configure>>( - (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Value.Consumer.MaxConcurrency; }); + services.TryAddSingleton(provider => BackgroundQueueService.Create(provider, handlerStack)); - return services.AddOptions>(); - } - - // TReader has to be registered by the user - public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, - Action.ConsumerBuilder> configure) where TReader : class, IAsyncEnumerable => - services.AddBackgroundQueueConsumer(Reflection.FriendlyNameOf(), builder => configure( - builder.SetReaderFactory(provider => provider.GetRequiredService()))); - - public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, - Action.ConsumerBuilder> configure) => - services.AddBackgroundQueueConsumer(Reflection.FriendlyNameOf(), configure); + services.TryAddSingleton>(provider => + provider.GetRequiredService>().Queue); + services.AddSingleton(provider => + provider.GetRequiredService>().Supervisor); - public static OptionsBuilder AddBackgroundQueueConsumer(this IServiceCollection services, - string name, Action.ConsumerBuilder> configure) - { - var builder = new BackgroundQueue.ConsumerBuilder(name); - configure(builder); - - // TODO Try...() version of this one, to be gentle with multiple registrations of the same queue - // (extend ServiceDescriptor, add name to it and search using it) - services.AddHostedService(builder.Build); + // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... - return services.AddOptions(name); + return services.AddOptions>(); } + + // TODO Batched queue consumer } diff --git a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs index 22ba9f1..869cc95 100644 --- a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs +++ b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs @@ -1,4 +1,5 @@ using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; @@ -10,6 +11,8 @@ public static T GetRequiredService(this IServiceProvider provider, string nam return provider.GetRequiredService>().First(x => x.Name == name); } - public static BackgroundServiceSupervisor GetSupervisor(this IServiceProvider provider, string name) - where T : class, IBackgroundService => provider.GetRequiredService>(name); + public static T GetOptions(this IServiceProvider provider) => provider.GetOptions(Options.DefaultName); + + public static T GetOptions(this IServiceProvider provider, string name) => + provider.GetRequiredService>().Get(name); } diff --git a/src/LocalPost/Executor.cs b/src/LocalPost/Executor.cs deleted file mode 100644 index b5752fd..0000000 --- a/src/LocalPost/Executor.cs +++ /dev/null @@ -1,56 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using Microsoft.Extensions.Options; - -namespace LocalPost; - -internal interface IExecutor -{ - public bool IsEmpty { get; } - - // Start only when there is a capacity - ValueTask StartAsync(Func itemProcessor, CancellationToken ct); - - // Wait for all active tasks to finish... - ValueTask WaitAsync(CancellationToken ct); -} - -internal sealed class BoundedExecutor : IExecutor -{ - private readonly ConcurrentTasksList _tasks; - - public BoundedExecutor(string name, IOptionsMonitor options) : this(options.Get(name).MaxConcurrency) - { - } - - public BoundedExecutor(ushort maxConcurrency = ushort.MaxValue) - { - MaxConcurrency = maxConcurrency; - _tasks = new ConcurrentTasksList(MaxConcurrency); - } - - public ushort MaxConcurrency { get; } - - public bool IsEmpty => _tasks.Count == 0; - - private async ValueTask WaitForCapacityAsync(CancellationToken ct) - { - _tasks.CleanupCompleted(); - while (_tasks.Count >= MaxConcurrency) - await _tasks.WaitForCompleted(ct); - } - - public async ValueTask StartAsync(Func itemProcessor, CancellationToken ct) - { - if (_tasks.Count >= MaxConcurrency) - await WaitForCapacityAsync(ct); - - _tasks.Track(itemProcessor()); - } - - public async ValueTask WaitAsync(CancellationToken ct) - { - _tasks.CleanupCompleted(); - while (!IsEmpty) - await _tasks.WaitForCompleted(ct); - } -} diff --git a/src/LocalPost/MiddlewareStackBuilder.cs b/src/LocalPost/MiddlewareStackBuilder.cs index e54a5bc..3ee5f3e 100644 --- a/src/LocalPost/MiddlewareStackBuilder.cs +++ b/src/LocalPost/MiddlewareStackBuilder.cs @@ -34,7 +34,7 @@ public TBuilder SetHandler(HandlerFactory factory) public TBuilder Append(Middleware middleware) => Append(_ => middleware); - public TBuilder Append() where TMiddleware : IMiddleware + public TBuilder Append() where TMiddleware : class, IMiddleware { Middlewares.Add(provider => provider.GetRequiredService().Invoke); diff --git a/src/LocalPost/QueueOptions.cs b/src/LocalPost/QueueOptions.cs index 4098f2c..2864fbe 100644 --- a/src/LocalPost/QueueOptions.cs +++ b/src/LocalPost/QueueOptions.cs @@ -1,35 +1,4 @@ -using System.ComponentModel.DataAnnotations; - namespace LocalPost; -/// -/// Background queue configuration. -/// -public sealed record BackgroundQueueOptions -{ - public QueueOptions Queue { get; set; } = new(); - - public ConsumerOptions Consumer { get; set; } = new(); -} - -/// -/// Consumer configuration. -/// -public sealed record ConsumerOptions -{ - /// - /// How many messages to process in parallel. - /// - [Required] public ushort MaxConcurrency { get; set; } = ushort.MaxValue; -} - -/// -/// Queue configuration. -/// -public sealed record QueueOptions -{ - // TODO Drop strategy - public ushort? MaxSize { get; set; } = ushort.MaxValue; - - public ushort? CompletionTimeout { get; set; } = 1_000; // Milliseconds -} +// For the DI container and, to distinguish between different queues +public sealed record BackgroundQueueOptions : BackgroundQueueOptions; diff --git a/src/LocalPost/ScopedHandler.cs b/src/LocalPost/ScopedHandler.cs new file mode 100644 index 0000000..faf282a --- /dev/null +++ b/src/LocalPost/ScopedHandler.cs @@ -0,0 +1,48 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; + +namespace LocalPost; + +internal sealed class ScopedHandler : IHandler +{ + private readonly ILogger> _logger; + private readonly IServiceScopeFactory _scopeFactory; + private readonly HandlerFactory _handlerFactory; + + private readonly string _name; + + public ScopedHandler(ILogger> logger, string name, IServiceScopeFactory scopeFactory, + HandlerFactory handlerFactory) + { + _logger = logger; + _scopeFactory = scopeFactory; + _handlerFactory = handlerFactory; + _name = name; + } + + public async Task InvokeAsync(T payload, CancellationToken ct) + { + // TODO Tracing... + + // See https://andrewlock.net/exploring-dotnet-6-part-10-new-dependency-injection-features-in-dotnet-6/#handling-iasyncdisposable-services-with-iservicescope + // And also https://devblogs.microsoft.com/dotnet/announcing-net-6/#microsoft-extensions-dependencyinjection-createasyncscope-apis + await using var scope = _scopeFactory.CreateAsyncScope(); + + // Make it specific for this queue somehow?.. + var handler = _handlerFactory(scope.ServiceProvider); + + try + { + // Await the handler, to keep the container scope alive + await handler(payload, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + _logger.LogError(e, "{Queue}: unhandled exception while processing a message", _name); + } + } +} diff --git a/tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs b/tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs new file mode 100644 index 0000000..c6a615f --- /dev/null +++ b/tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs @@ -0,0 +1,10 @@ +namespace LocalPost.SqsConsumer.Tests; + +public class AsyncEnumTests +{ + [Fact] + public async Task multi_enumerators() + { + + } +} diff --git a/tests/LocalPost.Tests/AsyncEnumerableMergerTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs similarity index 98% rename from tests/LocalPost.Tests/AsyncEnumerableMergerTests.cs rename to tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs index 53a0ad1..ee46c60 100644 --- a/tests/LocalPost.Tests/AsyncEnumerableMergerTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs @@ -1,7 +1,8 @@ using System.Threading.Channels; using FluentAssertions; +using LocalPost.AsyncEnumerable; -namespace LocalPost.Tests; +namespace LocalPost.Tests.AsyncEnumerable; public class AsyncEnumerableMergerTests { diff --git a/tests/LocalPost.Tests/BatchingAsyncEnumerableTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs similarity index 97% rename from tests/LocalPost.Tests/BatchingAsyncEnumerableTests.cs rename to tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs index e4f017c..61234f6 100644 --- a/tests/LocalPost.Tests/BatchingAsyncEnumerableTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs @@ -1,5 +1,6 @@ using System.Threading.Channels; using FluentAssertions; +using LocalPost.AsyncEnumerable; namespace LocalPost.Tests; From 2491e6646b50c60722a1b08aa3e0c6c5eeb4eaaa Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Wed, 24 May 2023 15:14:52 +0000 Subject: [PATCH 04/33] WORKING again --- .../ServiceRegistration.cs | 9 +- .../KafkaConsumerService.cs | 45 +++--- .../ServiceRegistration.cs | 8 +- .../SqsConsumerService.cs | 44 +++--- src/LocalPost/BackgroundQueueConsumer.cs | 138 ++++++++++++++--- src/LocalPost/BackgroundQueueService.cs | 59 ++------ src/LocalPost/BackgroundQueueSupervisor.cs | 4 +- src/LocalPost/BackgroundServiceSupervisor.cs | 142 +++++++----------- .../DependencyInjection/QueueRegistration.cs | 10 +- .../DependencyInjection/Registration.cs | 9 ++ src/LocalPost/HostedServices.cs | 31 ++++ 11 files changed, 274 insertions(+), 225 deletions(-) create mode 100644 src/LocalPost/DependencyInjection/Registration.cs create mode 100644 src/LocalPost/HostedServices.cs diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs index 11c3473..6d1abab 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs @@ -2,7 +2,6 @@ using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Options; namespace LocalPost.KafkaConsumer.DependencyInjection; @@ -18,6 +17,8 @@ public static OptionsBuilder AddKafkaConsumer(this IServi Action>> configure, Action> configureClient) { + services.TryAddConcurrentHostedServices(); + var handleStackBuilder = new MiddlewareStackBuilder>(); configure(handleStackBuilder); var handlerStack = handleStackBuilder.Build(); @@ -25,8 +26,10 @@ public static OptionsBuilder AddKafkaConsumer(this IServi services.TryAddSingleton(provider => KafkaConsumerService.Create(provider, name, handlerStack, configureClient)); - services.AddSingleton(provider => - provider.GetRequiredService>(name).Supervisor); + services.AddSingleton(provider => + provider.GetRequiredService>(name).Reader); + services.AddSingleton(provider => + provider.GetRequiredService>(name).ConsumerGroup); // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... diff --git a/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs b/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs index 75681e2..09b4154 100644 --- a/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs +++ b/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs @@ -21,68 +21,57 @@ public static KafkaConsumerService Create(IServiceProvider provide var kafkaClient = clientBuilder.Build(); var messageSource = ActivatorUtilities.CreateInstance>(provider, options.TopicName, kafkaClient); - var queueSupervisor = ActivatorUtilities.CreateInstance(provider, - name, messageSource); + var reader = new BackgroundServiceSupervisor(messageSource); HandlerFactory> handlerFactory = handlerStack.Resolve; Handler> handler = ActivatorUtilities.CreateInstance>>(provider, name, handlerFactory).InvokeAsync; - var consumers = Enumerable.Range(1, options.MaxConcurrency) - .Select(_ => - { - var consumer = new BackgroundQueue>.Consumer(messageSource, handler); - var supervisor = ActivatorUtilities.CreateInstance(provider, - name, consumer); + var consumer = new BackgroundQueue>.Consumer(messageSource, handler); + var consumerGroup = new ConsumerGroup(consumer.Run, options.MaxConcurrency); - return supervisor; - }).ToImmutableList(); - - return new KafkaConsumerService(name, options, queueSupervisor, consumers); + return new KafkaConsumerService(name, reader, consumerGroup); } - public KafkaConsumerService(string name, Options options, - IBackgroundServiceSupervisor reader, - IEnumerable consumers) + public KafkaConsumerService(string name, IBackgroundServiceSupervisor reader, + IBackgroundServiceSupervisor consumerGroup) { Name = name; - Options = options; - _queueReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); - _queueLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); + Reader = reader; + _readerReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); + _readerLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); - var consumerGroup= new IBackgroundServiceSupervisor.Combined(consumers); + ConsumerGroup = consumerGroup; _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); - - Supervisor = new CombinedHostedService(reader, consumerGroup); } public string Name { get; } - public Options Options { get; } - // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services // synchronously by default, so if consumers are stopped first, they will block the reader from completing the // channel). - public IHostedService Supervisor { get; } +// public IHostedService Supervisor { get; } - private readonly IHealthCheck _queueReadinessCheck; - private readonly IHealthCheck _queueLivenessCheck; + public IConcurrentHostedService Reader { get; } + private readonly IHealthCheck _readerReadinessCheck; + private readonly IHealthCheck _readerLivenessCheck; + public IConcurrentHostedService ConsumerGroup { get; } private readonly IHealthCheck _consumerGroupReadinessCheck; private readonly IHealthCheck _consumerGroupLivenessCheck; public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService>(name)._queueReadinessCheck, + provider => provider.GetRequiredService>(name)._readerReadinessCheck, failureStatus, tags); public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService>(name)._queueLivenessCheck, + provider => provider.GetRequiredService>(name)._readerLivenessCheck, failureStatus, tags); diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs index 5e8f646..e48fc0a 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs @@ -77,6 +77,8 @@ public static OptionsBuilder AddAmazonSqsConsumer(this IServi public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, string name, Action> configure) { + services.TryAddConcurrentHostedServices(); + var handleStackBuilder = new MiddlewareStackBuilder(); services.TryAddSingleton(); handleStackBuilder.Append(); @@ -85,8 +87,10 @@ public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollecti services.TryAddSingleton(provider => SqsConsumerService.Create(provider, name, handlerStack)); - services.AddSingleton(provider => - provider.GetRequiredService(name).Supervisor); + services.AddSingleton(provider => + provider.GetRequiredService(name).Reader); + services.AddSingleton(provider => + provider.GetRequiredService(name).ConsumerGroup); // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... diff --git a/src/LocalPost.SqsConsumer/SqsConsumerService.cs b/src/LocalPost.SqsConsumer/SqsConsumerService.cs index 523de6e..7ddd4f0 100644 --- a/src/LocalPost.SqsConsumer/SqsConsumerService.cs +++ b/src/LocalPost.SqsConsumer/SqsConsumerService.cs @@ -15,66 +15,56 @@ public static SqsConsumerService Create(IServiceProvider provider, string name, var client = ActivatorUtilities.CreateInstance(provider, options); var messageSource = new MessageSource(client); - var queueSupervisor = ActivatorUtilities.CreateInstance(provider, name, messageSource); + var reader = new BackgroundServiceSupervisor(messageSource); HandlerFactory handlerFactory = handlerStack.Resolve; Handler handler = ActivatorUtilities.CreateInstance>(provider, name, handlerFactory).InvokeAsync; - var consumers = Enumerable.Range(1, options.MaxConcurrency) - .Select(_ => - { - var consumer = new BackgroundQueue.Consumer(messageSource, handler); - var supervisor = ActivatorUtilities.CreateInstance(provider, - name, consumer); + var consumer = new BackgroundQueue.Consumer(messageSource, handler); + var consumerGroup = new ConsumerGroup(consumer.Run, options.MaxConcurrency); - return supervisor; - }).ToImmutableList(); - - return new SqsConsumerService(name, options, queueSupervisor, consumers); + return new SqsConsumerService(name, reader, consumerGroup); } - public SqsConsumerService(string name, Options options, - IBackgroundServiceSupervisor reader, - IEnumerable consumers) + public SqsConsumerService(string name, IBackgroundServiceSupervisor reader, + IBackgroundServiceSupervisor consumerGroup) { Name = name; - Options = options; - _queueReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); - _queueLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); + Reader = reader; + _readerReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); + _readerLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); - var consumerGroup = new IBackgroundServiceSupervisor.Combined(consumers); + ConsumerGroup = consumerGroup; _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); - - Supervisor = new CombinedHostedService(reader, consumerGroup); } public string Name { get; } - public Options Options { get; } - // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services // synchronously by default, so if consumers are stopped first, they will block the reader from completing the // channel). - public IHostedService Supervisor { get; } +// public IHostedService Supervisor { get; } - private readonly IHealthCheck _queueReadinessCheck; - private readonly IHealthCheck _queueLivenessCheck; + public IConcurrentHostedService Reader { get; } + private readonly IHealthCheck _readerReadinessCheck; + private readonly IHealthCheck _readerLivenessCheck; + public IConcurrentHostedService ConsumerGroup { get; } private readonly IHealthCheck _consumerGroupReadinessCheck; private readonly IHealthCheck _consumerGroupLivenessCheck; public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService(name)._queueReadinessCheck, + provider => provider.GetRequiredService(name)._readerReadinessCheck, failureStatus, tags); public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService(name)._queueLivenessCheck, + provider => provider.GetRequiredService(name)._readerLivenessCheck, failureStatus, tags); diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index f06e066..351e17e 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -1,11 +1,114 @@ +using System.Collections.Immutable; using System.Threading.Channels; using LocalPost.AsyncEnumerable; namespace LocalPost; +internal sealed class ConsumerSupervisor : IBackgroundServiceSupervisor +{ + private CancellationTokenSource? _executionCts; + private Task? _execution; + + private readonly Func _consumer; + + public ConsumerSupervisor(Func consumer) + { + _consumer = consumer; + } + + public bool Started => _executionCts is not null && _execution is not null; + + public bool Running => _execution is not null && !_execution.IsCompleted; + + public bool Crashed => Exception is not null; + + public Exception? Exception { get; private set; } + + public Task StartAsync(CancellationToken ct) + { + if (_executionCts is not null) + throw new InvalidOperationException("Execution has been already started"); + + _executionCts = new CancellationTokenSource(); + _execution = ExecuteAsync(_executionCts.Token); + + return Task.CompletedTask; + } + + private async Task ExecuteAsync(CancellationToken ct) + { + if (ct.IsCancellationRequested) + return; + + try + { + await _consumer(ct); + + // Completed + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + // Completed gracefully on request + } + catch (Exception e) + { + Exception = e; + } + } + + public async Task StopAsync(CancellationToken forceExitToken) + { + // Do not cancel the execution immediately, as it will finish gracefully itself (when the channel is closed) + + // TODO .NET 6 async... + using var linked = forceExitToken.Register(() => _executionCts?.Cancel()); + + if (_execution is not null) + await _execution; + } + + public void Dispose() + { + _executionCts?.Dispose(); + _execution?.Dispose(); + } +} + +// With predefined static size +internal sealed class ConsumerGroup : IBackgroundServiceSupervisor +{ + private readonly ImmutableArray _services; + + public ConsumerGroup(Func consumer, int maxConcurrency) + { + _services = Enumerable.Range(1, maxConcurrency) + .Select(_ => new ConsumerSupervisor(consumer)) + .ToImmutableArray(); + } + + public Task StartAsync(CancellationToken ct) => + Task.WhenAll(_services.Select(service => service.StartAsync(ct))); + // TODO Log info + + public Task StopAsync(CancellationToken ct) => + Task.WhenAll(_services.Select(service => service.StopAsync(ct))); + // TODO Log info + + public bool Started => _services.All(c => c.Started); + public bool Running => _services.All(c => c.Running); + public bool Crashed => _services.Any(c => c.Crashed); + public Exception? Exception => null; // TODO Implement + + public void Dispose() + { + foreach (var disposable in _services) + disposable.Dispose(); + } +} + internal sealed partial class BackgroundQueue { - internal sealed class Consumer : IBackgroundService + internal sealed class Consumer { private readonly IAsyncEnumerable _reader; private readonly Handler _handler; @@ -16,23 +119,14 @@ public Consumer(IAsyncEnumerable reader, Handler handler) _handler = handler; } - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - - public async Task ExecuteAsync(CancellationToken ct) + public async Task Run(CancellationToken ct) { await foreach (var message in _reader.WithCancellation(ct)) await _handler(message, ct); } - - public async Task StopAsync(CancellationToken forceExitToken) - { - // Good to have later: an option to NOT process the rest of the messages... - await foreach (var message in _reader.WithCancellation(forceExitToken)) - await _handler(message, forceExitToken); - } } - internal sealed class BatchConsumer : IBackgroundService + internal sealed class BatchConsumer { private readonly ChannelReader _source; private readonly ChannelWriter _destination; @@ -49,14 +143,7 @@ public BatchConsumer(ChannelReader source, ChannelWriter destination, B _batch = factory(); } - private async Task ProcessBatch(CancellationToken ct) - { - await _destination.WriteAsync(_batch.Build(), ct); - _batch.Dispose(); - _batch = _factory(); - } - - private async Task Process(CancellationToken ct) + public async Task Run(CancellationToken ct) { while (true) { @@ -89,10 +176,11 @@ private async Task Process(CancellationToken ct) _destination.Complete(); } - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - - public Task ExecuteAsync(CancellationToken ct) => Process(ct); - - public Task StopAsync(CancellationToken forceExitToken) => Process(forceExitToken); + private async Task ProcessBatch(CancellationToken ct) + { + await _destination.WriteAsync(_batch.Build(), ct); + _batch.Dispose(); + _batch = _factory(); + } } } diff --git a/src/LocalPost/BackgroundQueueService.cs b/src/LocalPost/BackgroundQueueService.cs index 9bbb7f6..d944cf1 100644 --- a/src/LocalPost/BackgroundQueueService.cs +++ b/src/LocalPost/BackgroundQueueService.cs @@ -2,7 +2,6 @@ using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; -using Microsoft.Extensions.Hosting; namespace LocalPost; @@ -20,17 +19,10 @@ public static BackgroundQueueService Create(IServiceProvider provider, Middle Handler handler = ActivatorUtilities.CreateInstance>(provider, Name, handlerFactory).InvokeAsync; - var consumers = Enumerable.Range(1, options.MaxConcurrency) - .Select(_ => - { - var consumer = new BackgroundQueue.Consumer(queue, handler); - var supervisor = ActivatorUtilities.CreateInstance(provider, - Name, consumer); + var consumer = new BackgroundQueue.Consumer(queue, handler); + var consumerGroup = new ConsumerGroup(consumer.Run, options.MaxConcurrency); - return supervisor; - }); - - return new BackgroundQueueService(options, queue, consumers); + return new BackgroundQueueService(queue, consumerGroup); } public static BackgroundQueueService CreateBatched(IServiceProvider provider, @@ -46,54 +38,35 @@ public static BackgroundQueueService CreateBatched(IServiceProvider pro var queue = new BackgroundQueue(options); var batchQueue = new BackgroundQueue(options); + // Just a single consumer, to do the batching properly + var consumer = new BackgroundQueue.BatchConsumer(queue, batchQueue, batchFactory); + var consumerSupervisor = new ConsumerSupervisor(consumer.Run); + HandlerFactory handlerFactory = handlerStack.Resolve; Handler handler = ActivatorUtilities.CreateInstance>(provider, Name, handlerFactory).InvokeAsync; + var batchConsumer = new BackgroundQueue.Consumer(batchQueue, handler); + var batchConsumerGroup = new ConsumerGroup(batchConsumer.Run, options.MaxConcurrency); - // Just a single consumer, to do the batching properly - var batchSupervisor = ActivatorUtilities.CreateInstance(provider, - // A different name for the batching consumer?.. - Name, new BackgroundQueue.BatchConsumer(queue, batchQueue, batchFactory)); - - // And the actual consumers, to process the batches - var consumers = Enumerable.Range(1, options.MaxConcurrency) - .Select(_ => - { - var consumer = new BackgroundQueue.Consumer(batchQueue, handler); - var supervisor = ActivatorUtilities.CreateInstance(provider, - Name, consumer); - - return supervisor; - }).Prepend(batchSupervisor); - - return new BackgroundQueueService(options, queue, consumers); + return new BackgroundQueueService(queue, + new IBackgroundServiceSupervisor.Combined(consumerSupervisor, batchConsumerGroup)); } - public BackgroundQueueService(BackgroundQueueOptions options, BackgroundQueue queue, - IEnumerable consumers) + public BackgroundQueueService(BackgroundQueue queue, IBackgroundServiceSupervisor consumerGroup) { - Options = options; - Queue = queue; - var queueSupervisor = new BackgroundQueue.Supervisor(queue); - - var consumerGroup = new IBackgroundServiceSupervisor.Combined(consumers); + QueueSupervisor = new BackgroundQueue.Supervisor(queue); + ConsumerGroup = consumerGroup; _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); - - Supervisor = new CombinedHostedService(queueSupervisor, consumerGroup); } - public BackgroundQueueOptions Options { get; } - public IBackgroundQueue Queue { get; } - // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services - // synchronously by default, so if consumers are stopped first, they will block the reader from completing the - // channel). - public IHostedService Supervisor { get; } + public IConcurrentHostedService QueueSupervisor { get; } + public IConcurrentHostedService ConsumerGroup { get; } private readonly IHealthCheck _consumerGroupReadinessCheck; private readonly IHealthCheck _consumerGroupLivenessCheck; diff --git a/src/LocalPost/BackgroundQueueSupervisor.cs b/src/LocalPost/BackgroundQueueSupervisor.cs index ab738fc..c08dcfa 100644 --- a/src/LocalPost/BackgroundQueueSupervisor.cs +++ b/src/LocalPost/BackgroundQueueSupervisor.cs @@ -1,10 +1,8 @@ -using Microsoft.Extensions.Hosting; - namespace LocalPost; internal sealed partial class BackgroundQueue { - internal sealed class Supervisor : IHostedService + internal sealed class Supervisor : IConcurrentHostedService { // Health checks later?.. Like full or not. diff --git a/src/LocalPost/BackgroundServiceSupervisor.cs b/src/LocalPost/BackgroundServiceSupervisor.cs index c14853a..8b618b8 100644 --- a/src/LocalPost/BackgroundServiceSupervisor.cs +++ b/src/LocalPost/BackgroundServiceSupervisor.cs @@ -1,47 +1,31 @@ using System.Collections.Immutable; using System.Diagnostics.CodeAnalysis; using Microsoft.Extensions.Diagnostics.HealthChecks; -using Microsoft.Extensions.Hosting; -using Microsoft.Extensions.Logging; using Nito.AsyncEx; using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; namespace LocalPost; -internal sealed record CombinedHostedService(ImmutableArray Services) : IHostedService -{ - public CombinedHostedService(IHostedService s1, IHostedService s2) : this(new[] { s1, s2 }) - { - } - - public CombinedHostedService(IEnumerable services) : this(services.ToImmutableArray()) - { - } - - public Task StartAsync(CancellationToken cancellationToken) => - Task.WhenAll(Services.Select(c => c.StartAsync(cancellationToken))); - - public Task StopAsync(CancellationToken cancellationToken) => - Task.WhenAll(Services.Select(c => c.StopAsync(cancellationToken))); - -} - -internal interface IBackgroundServiceSupervisor : IHostedService +internal interface IBackgroundServiceSupervisor : IConcurrentHostedService, IDisposable { // With predefined static size // (IAsyncDisposable later?..) internal sealed record Combined(ImmutableArray Supervisors) : - IBackgroundServiceSupervisor, IDisposable + IBackgroundServiceSupervisor { + public Combined(IBackgroundServiceSupervisor s1, IBackgroundServiceSupervisor s2) : this(new[] { s1, s2 }) + { + } + public Combined(IEnumerable supervisors) : this(supervisors.ToImmutableArray()) { } - public Task StartAsync(CancellationToken cancellationToken) => - Task.WhenAll(Supervisors.Select(c => c.StartAsync(cancellationToken))); + public Task StartAsync(CancellationToken ct) => + Task.WhenAll(Supervisors.Select(service => service.StartAsync(ct))); - public Task StopAsync(CancellationToken cancellationToken) => - Task.WhenAll(Supervisors.Select(c => c.StopAsync(cancellationToken))); + public Task StopAsync(CancellationToken ct) => + Task.WhenAll(Supervisors.Select(service => service.StopAsync(ct))); public bool Started => Supervisors.All(c => c.Started); public bool Running => Supervisors.All(c => c.Running); @@ -50,21 +34,11 @@ public Task StopAsync(CancellationToken cancellationToken) => public void Dispose() { - foreach (var consumer in Supervisors) - if (consumer is IDisposable disposable) - disposable.Dispose(); + foreach (var disposable in Supervisors) + disposable.Dispose(); } } - public bool Started { get; } - - public bool Running { get; } - - [MemberNotNullWhen(true, nameof(Exception))] - public bool Crashed { get; } - - public Exception? Exception { get; } - public sealed class LivenessCheck : IHealthCheck { private readonly IBackgroundServiceSupervisor _supervisor; @@ -80,13 +54,13 @@ public Task CheckHealthAsync(HealthCheckContext context, private HealthCheckResult CheckHealth(HealthCheckContext _) { if (_supervisor.Crashed) - return Unhealthy("Service has crashed", _supervisor.Exception); + return Unhealthy("Crashed", _supervisor.Exception); if (_supervisor is { Started: true, Running: false }) - return Unhealthy("Service is not running"); + return Unhealthy("Not running"); // Starting or running - return Healthy("Service is alive"); + return Healthy("Alive"); } } @@ -105,40 +79,42 @@ public Task CheckHealthAsync(HealthCheckContext context, private HealthCheckResult CheckHealth(HealthCheckContext context) { if (!_supervisor.Started) - return Unhealthy("Service has not been started yet", _supervisor.Exception); + return Unhealthy("Has not been started yet", _supervisor.Exception); if (_supervisor.Crashed) - return Unhealthy("Service has crashed", _supervisor.Exception); + return Unhealthy("Crashed", _supervisor.Exception); - return Healthy("Service is running"); + return Healthy("Running or completed"); } } + + public bool Started { get; } + + public bool Running { get; } + + [MemberNotNullWhen(true, nameof(Exception))] + public bool Crashed { get; } + + public Exception? Exception { get; } } -internal class BackgroundServiceSupervisor : IBackgroundServiceSupervisor, IDisposable +internal sealed class BackgroundServiceSupervisor : IBackgroundServiceSupervisor { - private readonly ILogger _logger; - private CancellationTokenSource? _executionCts; private Task? _execution; - public BackgroundServiceSupervisor(ILogger logger, string name, - IBackgroundService service) + public BackgroundServiceSupervisor(IBackgroundService service) { - _logger = logger; - Name = name; Service = service; } - public string Name { get; } - public IBackgroundService Service { get; } + // TODO StartedSuccessfully public bool Started => _executionCts is not null && _execution is not null; public bool Running => _execution is not null && !_execution.IsCompleted; - [MemberNotNullWhen(true, nameof(Exception))] public bool Crashed => Exception is not null; public Exception? Exception { get; private set; } @@ -146,7 +122,7 @@ public BackgroundServiceSupervisor(ILogger logger, public async Task StartAsync(CancellationToken ct) { if (_executionCts is not null) - throw new InvalidOperationException("Service has been already started"); + throw new InvalidOperationException("Execution has been already started"); _executionCts = new CancellationTokenSource(); @@ -160,68 +136,52 @@ public async Task StartAsync(CancellationToken ct) catch (Exception e) { Exception = e; - _logger.LogCritical(e, "Unhandled exception while starting {Name} service", Name); } } - private async Task ExecuteAsync(CancellationToken stoppingToken) + private async Task ExecuteAsync(CancellationToken ct) { - // In case stop has been already requested - if (stoppingToken.IsCancellationRequested) + if (ct.IsCancellationRequested) return; try { - await Service.ExecuteAsync(stoppingToken); - _logger.LogWarning("{Name} is done", Name); + await Service.ExecuteAsync(ct); + + // Completed } - catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) + catch (OperationCanceledException e) when (e.CancellationToken == ct) { - // The rest of the queue will be processed in StopAsync() below - _logger.LogInformation("Application exit has been requested, stopping {Name}...", Name); + // Completed gracefully on request } catch (Exception e) { Exception = e; - _logger.LogCritical(e, "{Name}: Unhandled exception", Name); } } public async Task StopAsync(CancellationToken forceExitToken) { - try - { - // Signal cancellation to the executing method - _executionCts?.Cancel(); - } - finally - { - // Wait until the execution completes or the app is forced to exit - _execution?.WaitAsync(forceExitToken); - } + if (_executionCts is null || _executionCts.IsCancellationRequested) + return; + + // Signal cancellation to the executing method + _executionCts.Cancel(); + + if (_execution is null) + return; + + // Wait until the execution completes or the app is forced to exit + await _execution.WaitAsync(forceExitToken); await Service.StopAsync(forceExitToken); - _logger.LogInformation("{Name} has been stopped", Name); } public void Dispose() { - _executionCts?.Cancel(); _executionCts?.Dispose(); // ReSharper disable once SuspiciousTypeConversion.Global - if (Service is IDisposable disposableService) - disposableService.Dispose(); + if (Service is IDisposable disposable) + disposable.Dispose(); } } - -//internal sealed class BackgroundServiceSupervisor : BackgroundServiceSupervisor -// where T : class, IBackgroundService, INamedService -//{ -// public BackgroundServiceSupervisor(ILogger> logger, T service) : -// base(logger, service.Name, service) -// { -// Service = service; -// } -// -// public new T Service { get; } -//} diff --git a/src/LocalPost/DependencyInjection/QueueRegistration.cs b/src/LocalPost/DependencyInjection/QueueRegistration.cs index c08dd73..8122195 100644 --- a/src/LocalPost/DependencyInjection/QueueRegistration.cs +++ b/src/LocalPost/DependencyInjection/QueueRegistration.cs @@ -1,6 +1,5 @@ using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; @@ -27,6 +26,8 @@ public static OptionsBuilder> AddBackgroundQueue(th public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, Action> configure) { + services.TryAddConcurrentHostedServices(); + var handleStackBuilder = new MiddlewareStackBuilder(); configure(handleStackBuilder); var handlerStack = handleStackBuilder.Build(); @@ -35,8 +36,11 @@ public static OptionsBuilder> AddBackgroundQueue(th services.TryAddSingleton>(provider => provider.GetRequiredService>().Queue); - services.AddSingleton(provider => - provider.GetRequiredService>().Supervisor); + + services.AddSingleton(provider => + provider.GetRequiredService>().QueueSupervisor); + services.AddSingleton(provider => + provider.GetRequiredService>().ConsumerGroup); // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... diff --git a/src/LocalPost/DependencyInjection/Registration.cs b/src/LocalPost/DependencyInjection/Registration.cs new file mode 100644 index 0000000..e650636 --- /dev/null +++ b/src/LocalPost/DependencyInjection/Registration.cs @@ -0,0 +1,9 @@ +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost.DependencyInjection; + +public static class Registration +{ + public static void TryAddConcurrentHostedServices(this IServiceCollection services) => + services.AddHostedService(); +} diff --git a/src/LocalPost/HostedServices.cs b/src/LocalPost/HostedServices.cs new file mode 100644 index 0000000..851ed03 --- /dev/null +++ b/src/LocalPost/HostedServices.cs @@ -0,0 +1,31 @@ +using System.Collections.Immutable; +using Microsoft.Extensions.Hosting; + +namespace LocalPost; + +internal interface IConcurrentHostedService : IHostedService +{ +} + +internal sealed class HostedServices : IHostedService, IDisposable +{ + private readonly ImmutableArray _services; + + public HostedServices(IEnumerable services) + { + _services = services.ToImmutableArray(); + } + + public Task StartAsync(CancellationToken cancellationToken) => + Task.WhenAll(_services.Select(c => c.StartAsync(cancellationToken))); + + public Task StopAsync(CancellationToken cancellationToken) => + Task.WhenAll(_services.Select(c => c.StopAsync(cancellationToken))); + + public void Dispose() + { + foreach (var service in _services) + if (service is IDisposable disposable) + disposable.Dispose(); + } +} From a91a0e496c74160566a06f3b110800acb6523b5e Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 13 May 2024 18:59:24 +0000 Subject: [PATCH 05/33] WIP --- .github/dependabot.yml | 16 +- ARTICLES.md | 3 + LocalPost.sln | 48 +++- README.md | 3 +- docker-compose.yml | 127 +++++++++ samples/AmazonSqsApp/AmazonSqsApp.csproj | 19 -- samples/AmazonSqsApp/Program.cs | 52 ---- samples/AmazonSqsApp/WeatherForecast.cs | 12 - samples/AzureQueueApp/AzureQueueApp.csproj | 19 -- .../Controllers/WeatherForecastController.cs | 32 --- samples/AzureQueueApp/Program.cs | 36 --- .../Properties/launchSettings.json | 31 --- .../BackgroundQueueApp.csproj | 23 ++ .../Controllers/WeatherForecastController.cs | 2 +- samples/BackgroundQueueApp/Program.cs | 47 ++++ .../Properties/launchSettings.json | 0 .../WeatherForecast.cs | 2 +- .../appsettings.Development.json | 0 .../appsettings.json | 0 .../Controllers/WeatherForecastController.cs | 32 --- .../KafkaConsumerApp/KafkaConsumerApp.csproj | 24 +- samples/KafkaConsumerApp/Program.cs | 74 ++++-- .../Properties/launchSettings.json | 22 +- samples/KafkaConsumerApp/README.md | 3 + samples/KafkaConsumerApp/WeatherForecast.cs | 12 - .../appsettings.Development.json | 2 +- samples/KafkaConsumerApp/appsettings.json | 11 +- samples/SqsConsumerApp/Program.cs | 26 ++ .../Properties/launchSettings.json | 11 + samples/SqsConsumerApp/README.md | 1 + samples/SqsConsumerApp/SqsConsumerApp.csproj | 25 ++ .../appsettings.Development.json | 2 +- .../appsettings.json | 5 +- sonar-scan.sh | 4 +- .../AzureQueues.cs | 20 -- .../ConsumerOptions.cs | 18 -- .../DependencyInjection/HealthChecks.cs | 23 -- .../ServiceRegistration.cs | 37 --- .../IMessageHandler.cs | 7 - .../LocalPost.Azure.QueueConsumer.csproj | 55 ---- .../MessagePuller.cs | 65 ----- src/LocalPost.KafkaConsumer/ConsumeContext.cs | 77 +++++- .../DependencyInjection/HealthChecks.cs | 20 -- .../DependencyInjection/KafkaBuilder.cs | 99 +++++++ .../ServiceHealthCheckRegistration.cs | 21 ++ .../ServiceRegistration.cs | 51 ++-- src/LocalPost.KafkaConsumer/Exceptions.cs | 10 + src/LocalPost.KafkaConsumer/HandlerStack.cs | 76 ++++++ .../IMessageHandler.cs | 11 - .../KafkaActivitySource.cs | 122 +++++++++ .../KafkaConsumerService.cs | 90 ------- .../KafkaTopicClient.cs | 74 ++++++ .../LocalPost.KafkaConsumer.csproj | 9 +- src/LocalPost.KafkaConsumer/MessageSource.cs | 136 +++++----- src/LocalPost.KafkaConsumer/Middlewares.cs | 61 +++++ src/LocalPost.KafkaConsumer/OffsetManager.cs | 58 +++++ src/LocalPost.KafkaConsumer/Options.cs | 37 ++- src/LocalPost.KafkaConsumer/README.md | 23 ++ src/LocalPost.Polly/HandlerStack.cs | 24 ++ .../LocalPost.Polly.csproj} | 15 +- src/LocalPost.Polly/README.md | 1 + src/LocalPost.SqsConsumer/ConsumeContext.cs | 98 ++++++- .../ConsumerMiddleware.cs | 25 -- .../DependencyInjection/HealthChecks.cs | 21 -- .../ServiceHealthCheckRegistration.cs | 19 ++ .../ServiceRegistration.cs | 174 +++++++------ .../DependencyInjection/SqsBuilder.cs | 73 ++++++ src/LocalPost.SqsConsumer/HandlerStack.cs | 83 ++++++ src/LocalPost.SqsConsumer/IMessageHandler.cs | 8 - .../LocalPost.SqsConsumer.csproj | 9 +- src/LocalPost.SqsConsumer/MessageSource.cs | 90 ++++--- src/LocalPost.SqsConsumer/MetricsReporter.cs | 9 + src/LocalPost.SqsConsumer/Options.cs | 97 ++++--- src/LocalPost.SqsConsumer/QueueClient.cs | 109 ++++---- src/LocalPost.SqsConsumer/README.md | 1 + .../SqsActivitySource.cs | 148 +++++++++++ .../SqsConsumerService.cs | 153 +++++------ src/LocalPost/ActivityEx.cs | 27 ++ .../AsyncEnumerable/AsyncEnumerableEx.cs | 4 + .../AsyncEnumerable/AsyncEnumeratorEx.cs | 30 +++ src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 121 +++++++-- .../BatchingAsyncEnumerable.cs | 162 +++++++----- .../ConcurrentAsyncEnumerable.cs | 43 +++ src/LocalPost/BackgroundJobQueue.cs | 7 +- src/LocalPost/BackgroundQueue.cs | 144 ++++++----- src/LocalPost/BackgroundQueueConsumer.cs | 224 ++++++---------- src/LocalPost/BackgroundQueueOptions.cs | 28 +- src/LocalPost/BackgroundQueueService.cs | 151 ++++++----- src/LocalPost/BackgroundQueueSupervisor.cs | 37 +-- src/LocalPost/BackgroundService.cs | 10 - src/LocalPost/BackgroundServiceSupervisor.cs | 187 -------------- src/LocalPost/ConcurrentHostedServices.cs | 194 ++++++++++++++ .../DependencyInjection/HealthChecks.cs | 32 ++- .../DependencyInjection/INamedService.cs | 26 ++ .../JobQueueRegistration.cs | 17 -- .../DependencyInjection/QueueRegistration.cs | 51 ---- .../DependencyInjection/Registration.cs | 145 ++++++++++- .../ServiceHealthCheckRegistration.cs | 16 ++ .../ServiceProviderLookups.cs | 10 +- .../ServiceRegistration.cs | 39 +++ src/LocalPost/HandlerStack.cs | 244 ++++++++++++++++++ src/LocalPost/HostedServices.cs | 31 --- src/LocalPost/LocalPost.csproj | 16 +- src/LocalPost/MiddlewareStack.cs | 26 -- src/LocalPost/MiddlewareStackBuilder.cs | 52 ---- src/LocalPost/Middlewares.cs | 50 ++++ .../{ChannelReaderEx.cs => Polyfills.cs} | 17 +- src/LocalPost/QueueOptions.cs | 4 - src/LocalPost/ScopedHandler.cs | 38 +-- .../LocalPost.KafkaConsumer.Tests.csproj | 29 +++ tests/LocalPost.KafkaConsumer.Tests/Usings.cs | 1 + .../LocalPost.SnsPublisher.Tests.csproj | 2 +- .../LocalPost.SqsConsumer.Tests.csproj | 4 +- .../BatchingAsyncEnumerableTests.cs | 5 +- tests/LocalPost.Tests/LocalPost.Tests.csproj | 7 +- 115 files changed, 3363 insertions(+), 1951 deletions(-) create mode 100644 ARTICLES.md create mode 100644 docker-compose.yml delete mode 100644 samples/AmazonSqsApp/AmazonSqsApp.csproj delete mode 100644 samples/AmazonSqsApp/Program.cs delete mode 100644 samples/AmazonSqsApp/WeatherForecast.cs delete mode 100644 samples/AzureQueueApp/AzureQueueApp.csproj delete mode 100644 samples/AzureQueueApp/Controllers/WeatherForecastController.cs delete mode 100644 samples/AzureQueueApp/Program.cs delete mode 100644 samples/AzureQueueApp/Properties/launchSettings.json create mode 100644 samples/BackgroundQueueApp/BackgroundQueueApp.csproj rename samples/{AmazonSqsApp => BackgroundQueueApp}/Controllers/WeatherForecastController.cs (95%) create mode 100644 samples/BackgroundQueueApp/Program.cs rename samples/{AmazonSqsApp => BackgroundQueueApp}/Properties/launchSettings.json (100%) rename samples/{AzureQueueApp => BackgroundQueueApp}/WeatherForecast.cs (88%) rename samples/{AmazonSqsApp => BackgroundQueueApp}/appsettings.Development.json (100%) rename samples/{AmazonSqsApp => BackgroundQueueApp}/appsettings.json (100%) delete mode 100644 samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs create mode 100644 samples/KafkaConsumerApp/README.md delete mode 100644 samples/KafkaConsumerApp/WeatherForecast.cs create mode 100644 samples/SqsConsumerApp/Program.cs create mode 100644 samples/SqsConsumerApp/Properties/launchSettings.json create mode 100644 samples/SqsConsumerApp/README.md create mode 100644 samples/SqsConsumerApp/SqsConsumerApp.csproj rename samples/{AzureQueueApp => SqsConsumerApp}/appsettings.Development.json (61%) rename samples/{AzureQueueApp => SqsConsumerApp}/appsettings.json (52%) delete mode 100644 src/LocalPost.Azure.QueueConsumer/AzureQueues.cs delete mode 100644 src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs delete mode 100644 src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs delete mode 100644 src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs delete mode 100644 src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs delete mode 100644 src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj delete mode 100644 src/LocalPost.Azure.QueueConsumer/MessagePuller.cs delete mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs create mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs create mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs create mode 100644 src/LocalPost.KafkaConsumer/Exceptions.cs create mode 100644 src/LocalPost.KafkaConsumer/HandlerStack.cs delete mode 100644 src/LocalPost.KafkaConsumer/IMessageHandler.cs create mode 100644 src/LocalPost.KafkaConsumer/KafkaActivitySource.cs delete mode 100644 src/LocalPost.KafkaConsumer/KafkaConsumerService.cs create mode 100644 src/LocalPost.KafkaConsumer/KafkaTopicClient.cs create mode 100644 src/LocalPost.KafkaConsumer/Middlewares.cs create mode 100644 src/LocalPost.KafkaConsumer/OffsetManager.cs create mode 100644 src/LocalPost.KafkaConsumer/README.md create mode 100644 src/LocalPost.Polly/HandlerStack.cs rename src/{LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj => LocalPost.Polly/LocalPost.Polly.csproj} (77%) create mode 100644 src/LocalPost.Polly/README.md delete mode 100644 src/LocalPost.SqsConsumer/ConsumerMiddleware.cs delete mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs create mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs create mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs create mode 100644 src/LocalPost.SqsConsumer/HandlerStack.cs delete mode 100644 src/LocalPost.SqsConsumer/IMessageHandler.cs create mode 100644 src/LocalPost.SqsConsumer/MetricsReporter.cs create mode 100644 src/LocalPost.SqsConsumer/README.md create mode 100644 src/LocalPost.SqsConsumer/SqsActivitySource.cs create mode 100644 src/LocalPost/ActivityEx.cs create mode 100644 src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs create mode 100644 src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs delete mode 100644 src/LocalPost/BackgroundService.cs delete mode 100644 src/LocalPost/BackgroundServiceSupervisor.cs create mode 100644 src/LocalPost/ConcurrentHostedServices.cs delete mode 100644 src/LocalPost/DependencyInjection/JobQueueRegistration.cs delete mode 100644 src/LocalPost/DependencyInjection/QueueRegistration.cs create mode 100644 src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs create mode 100644 src/LocalPost/DependencyInjection/ServiceRegistration.cs create mode 100644 src/LocalPost/HandlerStack.cs delete mode 100644 src/LocalPost/HostedServices.cs delete mode 100644 src/LocalPost/MiddlewareStack.cs delete mode 100644 src/LocalPost/MiddlewareStackBuilder.cs create mode 100644 src/LocalPost/Middlewares.cs rename src/LocalPost/{ChannelReaderEx.cs => Polyfills.cs} (51%) delete mode 100644 src/LocalPost/QueueOptions.cs create mode 100644 tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj create mode 100644 tests/LocalPost.KafkaConsumer.Tests/Usings.cs diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 4c5c935..91610e9 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,11 +1,23 @@ ---- version: 2 updates: - - package-ecosystem: github-actions + - package-ecosystem: "github-actions" directory: "/" schedule: interval: weekly - package-ecosystem: nuget directory: "/" + groups: + xunit: + patterns: + - xunit* schedule: interval: weekly + ignore: + # Ignore the libraries which are pinned + - dependency-name: "Microsoft.Bcl.AsyncInterfaces" + - dependency-name: "Microsoft.Extensions.Logging" + - dependency-name: "Microsoft.Extensions.Logging.Abstractions" + - dependency-name: "Microsoft.Extensions.Hosting.Abstractions" + - dependency-name: "Microsoft.Extensions.Options" + - dependency-name: "System.Diagnostics.DiagnosticSource" + - dependency-name: "System.Threading.Channels" diff --git a/ARTICLES.md b/ARTICLES.md new file mode 100644 index 0000000..d11b015 --- /dev/null +++ b/ARTICLES.md @@ -0,0 +1,3 @@ +# Existing tutorials + +https://blog.elmah.io/async-processing-of-long-running-tasks-in-asp-net-core/ diff --git a/LocalPost.sln b/LocalPost.sln index 304488f..84a536d 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -2,7 +2,7 @@ Microsoft Visual Studio Solution File, Format Version 12.00 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost", "src\LocalPost\LocalPost.csproj", "{474D2C1A-5557-4ED9-AF20-FE195D4C1AF7}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AmazonSqsApp", "samples\AmazonSqsApp\AmazonSqsApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BackgroundQueueApp", "samples\BackgroundQueueApp\BackgroundQueueApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SnsPublisher", "src\LocalPost.SnsPublisher\LocalPost.SnsPublisher.csproj", "{D256C568-2B42-4DCC-AB54-15B512A99C44}" EndProject @@ -22,10 +22,20 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer", " EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Examples", "Examples", "{405721DC-F290-4191-B638-9907D5EB042B}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AzureQueueApp", "samples\AzureQueueApp\AzureQueueApp.csproj", "{7C21BB9A-9C68-4750-84AA-272F201878A1}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{C310487A-B976-4D3E-80AF-4ADBE1C63139}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SqsConsumerApp", "samples\SqsConsumerApp\SqsConsumerApp.csproj", "{2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Polly", "src\LocalPost.Polly\LocalPost.Polly.csproj", "{EA69FF51-BEF7-415C-836A-BB5432206F7E}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "RabbitMqConsumerApp", "samples\RabbitMqConsumerApp\RabbitMqConsumerApp.csproj", "{F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer.Tests", "tests\LocalPost.KafkaConsumer.Tests\LocalPost.KafkaConsumer.Tests.csproj", "{734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.RabbitMqConsumer.Tests", "tests\LocalPost.RabbitMqConsumer.Tests\LocalPost.RabbitMqConsumer.Tests.csproj", "{92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -72,18 +82,40 @@ Global {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.Build.0 = Debug|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.ActiveCfg = Release|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.Build.0 = Release|Any CPU - {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B}.Release|Any CPU.Build.0 = Release|Any CPU {7C21BB9A-9C68-4750-84AA-272F201878A1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {7C21BB9A-9C68-4750-84AA-272F201878A1}.Debug|Any CPU.Build.0 = Debug|Any CPU {7C21BB9A-9C68-4750-84AA-272F201878A1}.Release|Any CPU.ActiveCfg = Release|Any CPU {7C21BB9A-9C68-4750-84AA-272F201878A1}.Release|Any CPU.Build.0 = Release|Any CPU + {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Release|Any CPU.Build.0 = Release|Any CPU + {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Release|Any CPU.Build.0 = Release|Any CPU + {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Release|Any CPU.Build.0 = Release|Any CPU + {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Release|Any CPU.Build.0 = Release|Any CPU + {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Debug|Any CPU.Build.0 = Debug|Any CPU + {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Release|Any CPU.ActiveCfg = Release|Any CPU + {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Release|Any CPU.Build.0 = Release|Any CPU + {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Debug|Any CPU.Build.0 = Debug|Any CPU + {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Release|Any CPU.ActiveCfg = Release|Any CPU + {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} - {CB4358CA-D6AD-4106-A7B5-FA4AC8C7E55B} = {405721DC-F290-4191-B638-9907D5EB042B} {7C21BB9A-9C68-4750-84AA-272F201878A1} = {405721DC-F290-4191-B638-9907D5EB042B} + {C310487A-B976-4D3E-80AF-4ADBE1C63139} = {405721DC-F290-4191-B638-9907D5EB042B} + {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD} = {405721DC-F290-4191-B638-9907D5EB042B} + {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA} = {405721DC-F290-4191-B638-9907D5EB042B} EndGlobalSection EndGlobal diff --git a/README.md b/README.md index 7d1c192..37dc6a8 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,8 @@ Before version 8 .NET runtime handled start/stop of the services only synchronou See https://github.com/dotnet/runtime/blob/v8.0.0/src/libraries/Microsoft.Extensions.Hosting/src/Internal/Host.cs See https://github.com/dotnet/runtime/blob/main/src/libraries/Microsoft.Extensions.Hosting/src/HostOptions.cs -## Similar projects +## Similar projects / Inspiration +- [FastStream](https://github.com/airtai/faststream) — Python framework with almost the same concept - [Coravel queue](https://docs.coravel.net/Queuing/)/event broadcasting — only invocable queueing, event broadcasting is different from consuming a queue - [Hangfire](https://www.hangfire.io/) — for persistent queues (means payload serialisation), LocalPost is completely about in-memory ones diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..0e95e9a --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,127 @@ +# See https://github.com/conduktor/kafka-stack-docker-compose/blob/master/full-stack.yml + +services: + zoo1: + image: confluentinc/cp-zookeeper:7.3.2 + hostname: zoo1 + container_name: zoo1 + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_SERVER_ID: 1 + ZOOKEEPER_SERVERS: zoo1:2888:3888 + + kafka1: + image: confluentinc/cp-kafka:7.3.2 + hostname: kafka1 + container_name: kafka1 + ports: + - "9092:9092" + - "29092:29092" + - "9999:9999" + environment: + KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092 + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT + KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL + KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" + KAFKA_BROKER_ID: 1 + KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_JMX_PORT: 9001 + KAFKA_JMX_HOSTNAME: ${DOCKER_HOST_IP:-127.0.0.1} + KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer + KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" + depends_on: + - zoo1 + + kafka-schema-registry: + image: confluentinc/cp-schema-registry:7.3.2 + hostname: kafka-schema-registry + container_name: kafka-schema-registry + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092 + SCHEMA_REGISTRY_HOST_NAME: kafka-schema-registry + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 + depends_on: + - zoo1 + - kafka1 + + + kafka-rest-proxy: + image: confluentinc/cp-kafka-rest:7.3.2 + hostname: kafka-rest-proxy + container_name: kafka-rest-proxy + ports: + - "8082:8082" + environment: + # KAFKA_REST_ZOOKEEPER_CONNECT: zoo1:2181 + KAFKA_REST_LISTENERS: http://0.0.0.0:8082/ + KAFKA_REST_SCHEMA_REGISTRY_URL: http://kafka-schema-registry:8081/ + KAFKA_REST_HOST_NAME: kafka-rest-proxy + KAFKA_REST_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092 + depends_on: + - zoo1 + - kafka1 + - kafka-schema-registry + + + kafka-connect: + image: confluentinc/cp-kafka-connect:7.3.2 + hostname: kafka-connect + container_name: kafka-connect + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: "kafka1:19092" + CONNECT_REST_PORT: 8083 + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://kafka-schema-registry:8081' + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://kafka-schema-registry:8081' + CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" + CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect" + CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO" + CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR" + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1" + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1" + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1" + CONNECT_PLUGIN_PATH: '/usr/share/java,/etc/kafka-connect/jars,/usr/share/confluent-hub-components' + volumes: + - ./connectors:/etc/kafka-connect/jars/ + depends_on: + - zoo1 + - kafka1 + - kafka-schema-registry + - kafka-rest-proxy + command: + - bash + - -c + - | + confluent-hub install --no-prompt debezium/debezium-connector-mysql:latest + confluent-hub install --no-prompt confluentinc/kafka-connect-datagen:0.4.0 + /etc/confluent/docker/run + + + ksqldb-server: + image: confluentinc/cp-ksqldb-server:7.3.2 + hostname: ksqldb-server + container_name: ksqldb-server + ports: + - "8088:8088" + environment: + KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092 + KSQL_LISTENERS: http://0.0.0.0:8088/ + KSQL_KSQL_SERVICE_ID: ksqldb-server_ + depends_on: + - zoo1 + - kafka1 diff --git a/samples/AmazonSqsApp/AmazonSqsApp.csproj b/samples/AmazonSqsApp/AmazonSqsApp.csproj deleted file mode 100644 index d090cfa..0000000 --- a/samples/AmazonSqsApp/AmazonSqsApp.csproj +++ /dev/null @@ -1,19 +0,0 @@ - - - - net7 - enable - - - - - - - - - - - - - - diff --git a/samples/AmazonSqsApp/Program.cs b/samples/AmazonSqsApp/Program.cs deleted file mode 100644 index bfd7da2..0000000 --- a/samples/AmazonSqsApp/Program.cs +++ /dev/null @@ -1,52 +0,0 @@ -using Amazon.SQS; -using AmazonSqsApp; -using LocalPost; -using LocalPost.DependencyInjection; -using LocalPost.SqsConsumer; -using LocalPost.SqsConsumer.DependencyInjection; - -var builder = WebApplication.CreateBuilder(args); - -// A background queue with an inline handler -builder.Services.AddBackgroundQueue( - // TODO Automatically add the health checks?.. - async (weather, ct) => - { - await Task.Delay(TimeSpan.FromSeconds(2), ct); - Console.WriteLine(weather.Summary); - }); - - -// An Amazon SQS consumer -builder.Services.AddAWSService(); -builder.Services.AddScoped(); -builder.Services.AddAmazonSqsConsumer("test"); - - -builder.Services.AddControllers(); -// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle -builder.Services.AddEndpointsApiExplorer(); -builder.Services.AddSwaggerGen(); - -var app = builder.Build(); -if (app.Environment.IsDevelopment()) -{ - app.UseSwagger(); - app.UseSwaggerUI(); -} - -app.UseHttpsRedirection(); -app.UseAuthorization(); -app.MapControllers(); -app.Run(); - - - -class SqsHandler : IHandler -{ - public async Task InvokeAsync(ConsumeContext payload, CancellationToken ct) - { - await Task.Delay(1_000, ct); - Console.WriteLine(payload.Message.Body); - } -} diff --git a/samples/AmazonSqsApp/WeatherForecast.cs b/samples/AmazonSqsApp/WeatherForecast.cs deleted file mode 100644 index 6c1a09b..0000000 --- a/samples/AmazonSqsApp/WeatherForecast.cs +++ /dev/null @@ -1,12 +0,0 @@ -namespace AmazonSqsApp; - -public class WeatherForecast -{ - public DateTime Date { get; set; } - - public int TemperatureC { get; set; } - - public int TemperatureF => 32 + (int) (TemperatureC / 0.5556); - - public string? Summary { get; set; } -} diff --git a/samples/AzureQueueApp/AzureQueueApp.csproj b/samples/AzureQueueApp/AzureQueueApp.csproj deleted file mode 100644 index 2be9b2b..0000000 --- a/samples/AzureQueueApp/AzureQueueApp.csproj +++ /dev/null @@ -1,19 +0,0 @@ - - - - net7 - enable - - - - - - - - - - - - - - diff --git a/samples/AzureQueueApp/Controllers/WeatherForecastController.cs b/samples/AzureQueueApp/Controllers/WeatherForecastController.cs deleted file mode 100644 index c76fc47..0000000 --- a/samples/AzureQueueApp/Controllers/WeatherForecastController.cs +++ /dev/null @@ -1,32 +0,0 @@ -using Microsoft.AspNetCore.Mvc; - -namespace AzureQueueApp.Controllers; - -[ApiController] -[Route("[controller]")] -public class WeatherForecastController : ControllerBase -{ - private static readonly string[] Summaries = new[] - { - "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" - }; - - private readonly ILogger _logger; - - public WeatherForecastController(ILogger logger) - { - _logger = logger; - } - - [HttpGet(Name = "GetWeatherForecast")] - public IEnumerable Get() - { - return Enumerable.Range(1, 5).Select(index => new WeatherForecast - { - Date = DateTime.Now.AddDays(index), - TemperatureC = Random.Shared.Next(-20, 55), - Summary = Summaries[Random.Shared.Next(Summaries.Length)] - }) - .ToArray(); - } -} diff --git a/samples/AzureQueueApp/Program.cs b/samples/AzureQueueApp/Program.cs deleted file mode 100644 index 1f194e4..0000000 --- a/samples/AzureQueueApp/Program.cs +++ /dev/null @@ -1,36 +0,0 @@ -using Azure.Identity; -using Microsoft.Extensions.Azure; - -var builder = WebApplication.CreateBuilder(args); - -// Add services to the container. - -builder.Services.AddControllers(); -// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle -builder.Services.AddEndpointsApiExplorer(); -builder.Services.AddSwaggerGen(); - -builder.Services.AddAzureClients(clientBuilder => -{ - clientBuilder.AddQueueServiceClient(); - // Use DefaultAzureCredential by default - clientBuilder.UseCredential(new DefaultAzureCredential()); -}); - - -var app = builder.Build(); - -// Configure the HTTP request pipeline. -if (app.Environment.IsDevelopment()) -{ - app.UseSwagger(); - app.UseSwaggerUI(); -} - -app.UseHttpsRedirection(); - -app.UseAuthorization(); - -app.MapControllers(); - -app.Run(); diff --git a/samples/AzureQueueApp/Properties/launchSettings.json b/samples/AzureQueueApp/Properties/launchSettings.json deleted file mode 100644 index 6964d33..0000000 --- a/samples/AzureQueueApp/Properties/launchSettings.json +++ /dev/null @@ -1,31 +0,0 @@ -{ - "$schema": "https://json.schemastore.org/launchsettings.json", - "iisSettings": { - "windowsAuthentication": false, - "anonymousAuthentication": true, - "iisExpress": { - "applicationUrl": "http://localhost:41855", - "sslPort": 44346 - } - }, - "profiles": { - "AzureQueueApp": { - "commandName": "Project", - "dotnetRunMessages": true, - "launchBrowser": true, - "launchUrl": "swagger", - "applicationUrl": "https://localhost:7088;http://localhost:5207", - "environmentVariables": { - "ASPNETCORE_ENVIRONMENT": "Development" - } - }, - "IIS Express": { - "commandName": "IISExpress", - "launchBrowser": true, - "launchUrl": "swagger", - "environmentVariables": { - "ASPNETCORE_ENVIRONMENT": "Development" - } - } - } -} diff --git a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj new file mode 100644 index 0000000..4495560 --- /dev/null +++ b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj @@ -0,0 +1,23 @@ + + + + net8 + enable + enable + + + + + + + + + + + + + appsettings.json + + + + diff --git a/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs b/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs similarity index 95% rename from samples/AmazonSqsApp/Controllers/WeatherForecastController.cs rename to samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs index 928117d..1e84d13 100644 --- a/samples/AmazonSqsApp/Controllers/WeatherForecastController.cs +++ b/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs @@ -1,7 +1,7 @@ using LocalPost; using Microsoft.AspNetCore.Mvc; -namespace AmazonSqsApp.Controllers; +namespace BackgroundQueueApp.Controllers; [ApiController] [Route("[controller]")] diff --git a/samples/BackgroundQueueApp/Program.cs b/samples/BackgroundQueueApp/Program.cs new file mode 100644 index 0000000..1036890 --- /dev/null +++ b/samples/BackgroundQueueApp/Program.cs @@ -0,0 +1,47 @@ +using BackgroundQueueApp; +using LocalPost; +using LocalPost.Polly; +using LocalPost.DependencyInjection; +using Polly; +using Polly.Retry; + +var builder = WebApplication.CreateBuilder(args); + +// See https://github.com/App-vNext/Polly/blob/main/docs/migration-v8.md +var resiliencePipeline = new ResiliencePipelineBuilder() + .AddRetry(new RetryStrategyOptions + { + MaxRetryAttempts = 3, + Delay = TimeSpan.FromSeconds(1), + BackoffType = DelayBackoffType.Constant, + ShouldHandle = new PredicateBuilder().Handle() + }) + .AddTimeout(TimeSpan.FromSeconds(3)) + .Build(); + +// A background queue with an inline handler +builder.Services.AddBackgroundQueue( + HandlerStack.For(async (weather, ct) => + { + await Task.Delay(TimeSpan.FromSeconds(2), ct); + Console.WriteLine(weather.Summary); + }) + .UsePollyPipeline(resiliencePipeline) + .LogErrors() +); +builder.Services.AddControllers(); +// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle +builder.Services.AddEndpointsApiExplorer(); +builder.Services.AddSwaggerGen(); + +var app = builder.Build(); +if (app.Environment.IsDevelopment()) +{ + app.UseSwagger(); + app.UseSwaggerUI(); +} + +app.UseHttpsRedirection(); +app.UseAuthorization(); +app.MapControllers(); +app.Run(); diff --git a/samples/AmazonSqsApp/Properties/launchSettings.json b/samples/BackgroundQueueApp/Properties/launchSettings.json similarity index 100% rename from samples/AmazonSqsApp/Properties/launchSettings.json rename to samples/BackgroundQueueApp/Properties/launchSettings.json diff --git a/samples/AzureQueueApp/WeatherForecast.cs b/samples/BackgroundQueueApp/WeatherForecast.cs similarity index 88% rename from samples/AzureQueueApp/WeatherForecast.cs rename to samples/BackgroundQueueApp/WeatherForecast.cs index 507bcdc..a7947f4 100644 --- a/samples/AzureQueueApp/WeatherForecast.cs +++ b/samples/BackgroundQueueApp/WeatherForecast.cs @@ -1,4 +1,4 @@ -namespace AzureQueueApp; +namespace BackgroundQueueApp; public class WeatherForecast { diff --git a/samples/AmazonSqsApp/appsettings.Development.json b/samples/BackgroundQueueApp/appsettings.Development.json similarity index 100% rename from samples/AmazonSqsApp/appsettings.Development.json rename to samples/BackgroundQueueApp/appsettings.Development.json diff --git a/samples/AmazonSqsApp/appsettings.json b/samples/BackgroundQueueApp/appsettings.json similarity index 100% rename from samples/AmazonSqsApp/appsettings.json rename to samples/BackgroundQueueApp/appsettings.json diff --git a/samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs b/samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs deleted file mode 100644 index 26a207e..0000000 --- a/samples/KafkaConsumerApp/Controllers/WeatherForecastController.cs +++ /dev/null @@ -1,32 +0,0 @@ -using Microsoft.AspNetCore.Mvc; - -namespace KafkaConsumerApp.Controllers; - -[ApiController] -[Route("[controller]")] -public class WeatherForecastController : ControllerBase -{ - private static readonly string[] Summaries = new[] - { - "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" - }; - - private readonly ILogger _logger; - - public WeatherForecastController(ILogger logger) - { - _logger = logger; - } - - [HttpGet(Name = "GetWeatherForecast")] - public IEnumerable Get() - { - return Enumerable.Range(1, 5).Select(index => new WeatherForecast - { - Date = DateTime.Now.AddDays(index), - TemperatureC = Random.Shared.Next(-20, 55), - Summary = Summaries[Random.Shared.Next(Summaries.Length)] - }) - .ToArray(); - } -} diff --git a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj index e2ff44c..623a387 100644 --- a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj +++ b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj @@ -1,19 +1,29 @@ - + - net7 + net8 + enable enable - - - - + - + + + + + + + + + + + + appsettings.json + diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index 48863a6..a00ba14 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -1,25 +1,65 @@ -var builder = WebApplication.CreateBuilder(args); +using System.ComponentModel.DataAnnotations; +using Confluent.Kafka; +using LocalPost; +using LocalPost.KafkaConsumer; +using LocalPost.KafkaConsumer.DependencyInjection; -// Add services to the container. +var host = Host.CreateDefaultBuilder(args) + .ConfigureServices((context, services) => + { + services.AddOptions() + .Bind(context.Configuration.GetSection(KafkaOptions.ConfigSection)) + .ValidateDataAnnotations(); -builder.Services.AddControllers(); -// Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle -builder.Services.AddEndpointsApiExplorer(); -builder.Services.AddSwaggerGen(); + services.AddScoped(); + services.AddKafkaConsumer("orders", + builder => + { + builder.SetHandler(); + }, + builder => + { + builder.SetValueDeserializer(new StringDeserializer()); + }).Configure((options, kafkaOptions) => + { + options.Kafka.GroupId = ""; + options.Kafka.AutoOffsetReset = AutoOffsetReset.Earliest; + options.Kafka.EnableAutoCommit = false; // TODO DryRun -var app = builder.Build(); + options.Kafka.BootstrapServers = "localhost:9092"; + options.Kafka.SecurityProtocol = SecurityProtocol.SaslSsl; + options.Kafka.SaslMechanism = SaslMechanism.Plain; + options.Kafka.SaslUsername = "admin"; + options.Kafka.SaslPassword = ""; + }); + // Only one consumer per name (topic) is allowed + services.AddBatchKafkaConsumer("orders", + builder => + { + }, + builder => + { + }); + }) + .Build(); -// Configure the HTTP request pipeline. -if (app.Environment.IsDevelopment()) -{ - app.UseSwagger(); - app.UseSwaggerUI(); -} +host.Run(); -app.UseHttpsRedirection(); +public sealed record KafkaOptions +{ + public const string ConfigSection = "Kafka"; -app.UseAuthorization(); + [Required] + public string BootstrapServers { get; init; } = null!; -app.MapControllers(); + public Dictionary Consumers { get; init; } = new(); +} -app.Run(); +internal class KafkaTopicHandler : IHandler> +{ + public async Task InvokeAsync(ConsumeContext payload, CancellationToken ct) + { + await Task.Delay(1_000, ct); + Console.WriteLine(payload.Payload); + } +} diff --git a/samples/KafkaConsumerApp/Properties/launchSettings.json b/samples/KafkaConsumerApp/Properties/launchSettings.json index e82fbd4..0f2fe16 100644 --- a/samples/KafkaConsumerApp/Properties/launchSettings.json +++ b/samples/KafkaConsumerApp/Properties/launchSettings.json @@ -1,30 +1,10 @@ { - "$schema": "https://json.schemastore.org/launchsettings.json", - "iisSettings": { - "windowsAuthentication": false, - "anonymousAuthentication": true, - "iisExpress": { - "applicationUrl": "http://localhost:13538", - "sslPort": 44379 - } - }, "profiles": { "KafkaConsumerApp": { "commandName": "Project", "dotnetRunMessages": true, - "launchBrowser": true, - "launchUrl": "swagger", - "applicationUrl": "https://localhost:7104;http://localhost:5164", - "environmentVariables": { - "ASPNETCORE_ENVIRONMENT": "Development" - } - }, - "IIS Express": { - "commandName": "IISExpress", - "launchBrowser": true, - "launchUrl": "swagger", "environmentVariables": { - "ASPNETCORE_ENVIRONMENT": "Development" + "DOTNET_ENVIRONMENT": "Development" } } } diff --git a/samples/KafkaConsumerApp/README.md b/samples/KafkaConsumerApp/README.md new file mode 100644 index 0000000..3a9614d --- /dev/null +++ b/samples/KafkaConsumerApp/README.md @@ -0,0 +1,3 @@ +# Kafka Consumer + +See https://github.com/confluentinc/confluent-kafka-dotnet/tree/master/examples/Protobuf diff --git a/samples/KafkaConsumerApp/WeatherForecast.cs b/samples/KafkaConsumerApp/WeatherForecast.cs deleted file mode 100644 index af7b542..0000000 --- a/samples/KafkaConsumerApp/WeatherForecast.cs +++ /dev/null @@ -1,12 +0,0 @@ -namespace KafkaConsumerApp; - -public class WeatherForecast -{ - public DateTime Date { get; set; } - - public int TemperatureC { get; set; } - - public int TemperatureF => 32 + (int) (TemperatureC / 0.5556); - - public string? Summary { get; set; } -} diff --git a/samples/KafkaConsumerApp/appsettings.Development.json b/samples/KafkaConsumerApp/appsettings.Development.json index 0c208ae..b2dcdb6 100644 --- a/samples/KafkaConsumerApp/appsettings.Development.json +++ b/samples/KafkaConsumerApp/appsettings.Development.json @@ -2,7 +2,7 @@ "Logging": { "LogLevel": { "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Microsoft.Hosting.Lifetime": "Information" } } } diff --git a/samples/KafkaConsumerApp/appsettings.json b/samples/KafkaConsumerApp/appsettings.json index 10f68b8..e4eb4e5 100644 --- a/samples/KafkaConsumerApp/appsettings.json +++ b/samples/KafkaConsumerApp/appsettings.json @@ -2,8 +2,15 @@ "Logging": { "LogLevel": { "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Microsoft.Hosting.Lifetime": "Information" } }, - "AllowedHosts": "*" + "Kafka": { + "DryRun": true, + "BootstrapServers": "localhost:9092", + "orders": { + "Topic": "orders", + "GroupId": "orders-group" + } + } } diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs new file mode 100644 index 0000000..9b8bf4f --- /dev/null +++ b/samples/SqsConsumerApp/Program.cs @@ -0,0 +1,26 @@ +using Amazon.SQS; +using LocalPost; +using LocalPost.SqsConsumer; +using LocalPost.SqsConsumer.DependencyInjection; + +var host = Host.CreateDefaultBuilder(args) + .ConfigureServices(services => + { + services.AddAWSService(); + services.AddScoped(); + services.AddAmazonSqsConsumer("test"); + }) + .Build(); + +host.Run(); + + +// FIXME System.Text.Json deserializer... +internal class SqsHandler : IHandler +{ + public async Task InvokeAsync(ConsumeContext payload, CancellationToken ct) + { + await Task.Delay(1_000, ct); + Console.WriteLine(payload.Message.Body); + } +} diff --git a/samples/SqsConsumerApp/Properties/launchSettings.json b/samples/SqsConsumerApp/Properties/launchSettings.json new file mode 100644 index 0000000..91ef085 --- /dev/null +++ b/samples/SqsConsumerApp/Properties/launchSettings.json @@ -0,0 +1,11 @@ +{ + "profiles": { + "SqsConsumerApp": { + "commandName": "Project", + "dotnetRunMessages": true, + "environmentVariables": { + "DOTNET_ENVIRONMENT": "Development" + } + } + } +} diff --git a/samples/SqsConsumerApp/README.md b/samples/SqsConsumerApp/README.md new file mode 100644 index 0000000..cddfea0 --- /dev/null +++ b/samples/SqsConsumerApp/README.md @@ -0,0 +1 @@ +# SQS Consumer diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/samples/SqsConsumerApp/SqsConsumerApp.csproj new file mode 100644 index 0000000..d91229f --- /dev/null +++ b/samples/SqsConsumerApp/SqsConsumerApp.csproj @@ -0,0 +1,25 @@ + + + + net8 + enable + enable + + + + + + + + + + + + + + + appsettings.json + + + + diff --git a/samples/AzureQueueApp/appsettings.Development.json b/samples/SqsConsumerApp/appsettings.Development.json similarity index 61% rename from samples/AzureQueueApp/appsettings.Development.json rename to samples/SqsConsumerApp/appsettings.Development.json index 0c208ae..b2dcdb6 100644 --- a/samples/AzureQueueApp/appsettings.Development.json +++ b/samples/SqsConsumerApp/appsettings.Development.json @@ -2,7 +2,7 @@ "Logging": { "LogLevel": { "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Microsoft.Hosting.Lifetime": "Information" } } } diff --git a/samples/AzureQueueApp/appsettings.json b/samples/SqsConsumerApp/appsettings.json similarity index 52% rename from samples/AzureQueueApp/appsettings.json rename to samples/SqsConsumerApp/appsettings.json index 10f68b8..b2dcdb6 100644 --- a/samples/AzureQueueApp/appsettings.json +++ b/samples/SqsConsumerApp/appsettings.json @@ -2,8 +2,7 @@ "Logging": { "LogLevel": { "Default": "Information", - "Microsoft.AspNetCore": "Warning" + "Microsoft.Hosting.Lifetime": "Information" } - }, - "AllowedHosts": "*" + } } diff --git a/sonar-scan.sh b/sonar-scan.sh index 3252ee4..33a9c09 100755 --- a/sonar-scan.sh +++ b/sonar-scan.sh @@ -8,7 +8,7 @@ set -e # $SONAR_TOKEN must be defined # $GitVersion_FullSemVer can be used to specify the current version (see GitVersion) -VERSION="" +VERSION="dev" if [ -n "$GitVersion_FullSemVer" ]; then VERSION="/v:"$GitVersion_FullSemVer fi @@ -16,7 +16,7 @@ fi dotnet build-server shutdown dotnet sonarscanner begin \ /d:sonar.host.url="https://sonarcloud.io" /d:sonar.login="$SONAR_TOKEN" \ - /o:"alexeyshockov" /k:"alexeyshockov_LocalPost" $VERSION \ + /o:"alexeyshockov" /k:"alexeyshockov_LocalPost" "$VERSION" \ /d:sonar.dotnet.excludeTestProjects=true \ /d:sonar.cs.opencover.reportsPaths="tests/*/TestResults/*/coverage.opencover.xml" \ /d:sonar.cs.vstest.reportsPaths="tests/*/TestResults/*.trx" diff --git a/src/LocalPost.Azure.QueueConsumer/AzureQueues.cs b/src/LocalPost.Azure.QueueConsumer/AzureQueues.cs deleted file mode 100644 index 9dc1b60..0000000 --- a/src/LocalPost.Azure.QueueConsumer/AzureQueues.cs +++ /dev/null @@ -1,20 +0,0 @@ -using Azure.Storage.Queues; - -namespace LocalPost.Azure.QueueConsumer; - -internal interface IAzureQueues -{ - QueueClient Get(string name); -} - -internal sealed class AzureQueues : IAzureQueues -{ - private readonly QueueServiceClient _client; - - public AzureQueues(QueueServiceClient client) - { - _client = client; - } - - public QueueClient Get(string name) => _client.GetQueueClient(name); -} diff --git a/src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs b/src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs deleted file mode 100644 index 7765394..0000000 --- a/src/LocalPost.Azure.QueueConsumer/ConsumerOptions.cs +++ /dev/null @@ -1,18 +0,0 @@ -using System.ComponentModel.DataAnnotations; - -namespace LocalPost.Azure.QueueConsumer; - -/// -/// General Azure Storage Queue consumer settings -/// -public sealed record ConsumerOptions -{ - /// - /// How many messages to process in parallel. - /// - [Required] public ushort MaxConcurrency { get; set; } = 10; - - [Required] public string QueueName { get; set; } = null!; - - [Range(1, 32)] public byte BufferSize { get; set; } = 10; -} diff --git a/src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs deleted file mode 100644 index 5cb5e4f..0000000 --- a/src/LocalPost.Azure.QueueConsumer/DependencyInjection/HealthChecks.cs +++ /dev/null @@ -1,23 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost.Azure.QueueConsumer.DependencyInjection; - -public static class HealthChecks -{ - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddAmazonSqsConsumerReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); - - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddAmazonSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - .AddBackgroundServiceReadinessCheck(name, failureStatus, tags, timeout) - .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); -} diff --git a/src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs deleted file mode 100644 index 65f8e5a..0000000 --- a/src/LocalPost.Azure.QueueConsumer/DependencyInjection/ServiceRegistration.cs +++ /dev/null @@ -1,37 +0,0 @@ -using Azure.Storage.Queues.Models; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.Azure.QueueConsumer.DependencyInjection; - -public static class ServiceRegistration -{ - public static OptionsBuilder AddAzureQueueConsumer(this IServiceCollection services, - string name) where THandler : IHandler => - services - .AddAzureQueueConsumer(name, provider => provider.GetRequiredService().InvokeAsync); - - public static OptionsBuilder AddAzureQueueConsumer(this IServiceCollection services, - string name, Func> handlerFactory) - { - // Expect Azure QueueServiceClient to be registered in the DI container using the usual way, - // see https://learn.microsoft.com/en-us/dotnet/azure/sdk/dependency-injection#register-client - services.TryAddSingleton(); - - services.TryAddSingleton(); - services.AddSingleton(provider => ActivatorUtilities.CreateInstance(provider, name)); - - services - .AddCustomBackgroundQueue($"AzureQueue/{name}", - provider => provider.GetQueue(name), - provider => provider.GetQueue(name).Wrap(handlerFactory(provider))) - .Configure>( - (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Get(name).MaxConcurrency; }); - - // TODO Health check, metrics - - return services.AddOptions(name).Configure(options => options.QueueName = name); - } -} diff --git a/src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs b/src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs deleted file mode 100644 index 3aceb1b..0000000 --- a/src/LocalPost.Azure.QueueConsumer/IMessageHandler.cs +++ /dev/null @@ -1,7 +0,0 @@ -using Azure.Storage.Queues.Models; - -namespace LocalPost.Azure.QueueConsumer; - -public interface IMessageHandler : IHandler -{ -} diff --git a/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj b/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj deleted file mode 100644 index 0062328..0000000 --- a/src/LocalPost.Azure.QueueConsumer/LocalPost.Azure.QueueConsumer.csproj +++ /dev/null @@ -1,55 +0,0 @@ - - - - netstandard2.0 - true - - false - - LocalPost.Azure.QueueConsumer - background;task;queue;azure;sqs - Local (in-process) background queue for sending to Amazon SNS. - Alexey Shokov - - README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true - - - - - - - - - - true - - - - true - true - true - true - snupkg - - - true - - - - - - - - - - - - - - - - diff --git a/src/LocalPost.Azure.QueueConsumer/MessagePuller.cs b/src/LocalPost.Azure.QueueConsumer/MessagePuller.cs deleted file mode 100644 index 5d338d8..0000000 --- a/src/LocalPost.Azure.QueueConsumer/MessagePuller.cs +++ /dev/null @@ -1,65 +0,0 @@ -using System.Diagnostics; -using Azure; -using Azure.Storage.Queues; -using Azure.Storage.Queues.Models; -using Microsoft.Extensions.Options; - -namespace LocalPost.Azure.QueueConsumer; - -internal sealed class MessagePuller : IAsyncEnumerable -{ - private static readonly ActivitySource Tracer = new(typeof(MessagePuller).Namespace); - - private readonly QueueClient _queue; - private readonly ConsumerOptions _options; - - public MessagePuller(IAzureQueues queues, string name, IOptionsMonitor options) - { - _options = options.Get(name); - var queueName = _options.QueueName ?? throw new ArgumentNullException(nameof(options), "Queue name is required"); - - Name = name; - - _queue = queues.Get(queueName); - } - - public string Name { get; } - - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) - { - while (!ct.IsCancellationRequested) - { - var messages = await PullMessagesAsync(ct); - - foreach (var message in messages) - yield return message; - } - } - - private async Task> PullMessagesAsync(CancellationToken ct) - { - using var span = Tracer.StartActivity(); - - // Azure SDK handles network failures - var response = await _queue.ReceiveMessagesAsync(_options.BufferSize, null, ct); - - return response.Value; - } - - public Handler Wrap(Handler handler) => async (message, ct) => - { - await handler(message, ct); - - // Won't be deleted in case of an exception in the handler - await DeleteMessageAsync(message, ct); - }; - - private async Task DeleteMessageAsync(QueueMessage message, CancellationToken ct) - { - using var span = Tracer.StartActivity(); - - await _queue.DeleteMessageAsync(message.MessageId, message.PopReceipt, ct); - - // TODO Log failures?.. - } -} diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 10356f4..badafa8 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -1,11 +1,80 @@ +using System.Diagnostics.CodeAnalysis; using Confluent.Kafka; +using JetBrains.Annotations; +using LocalPost.AsyncEnumerable; namespace LocalPost.KafkaConsumer; -public readonly record struct ConsumeContext +internal static class ConsumeContext { - // To commit the offset manually, we need something. But it's a complex case... For the future. -// public required IConsumer Client { get; init; } + public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( + BatchSize batchMaxSizeSize, TimeSpan timeWindow) => ct => + new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); +} + +[PublicAPI] +public readonly record struct ConsumeContext +{ + internal readonly KafkaTopicClient Client; + internal readonly TopicPartitionOffset Offset; + internal readonly Message Message; + public readonly T Payload; + + internal ConsumeContext(KafkaTopicClient client, TopicPartitionOffset offset, Message message, + T payload) + { + Client = client; + Offset = offset; + Message = message; + Payload = payload; + } + + public string Topic => Client.Topic; + + public IReadOnlyList Headers => Message.Headers.BackingList; + + public ConsumeContext Transform(TOut payload) => new(Client, Offset, Message, payload); + + public static implicit operator T(ConsumeContext context) => context.Payload; + + public void Deconstruct(out T payload, out IReadOnlyList headers) + { + payload = Payload; + headers = Headers; + } +} + +[PublicAPI] +public readonly record struct BatchConsumeContext +{ + internal sealed class Builder : + BoundedBatchBuilderBase, BatchConsumeContext> + { + public Builder(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : + base(batchMaxSizeSize, timeWindow, ct) + { + } + + public override BatchConsumeContext Build() => new(Batch); + } + + public readonly IReadOnlyList> Messages; + + internal BatchConsumeContext(IReadOnlyList> messages) + { + if (messages.Count == 0) + throw new ArgumentException("Batch must contain at least one message", nameof(messages)); + + Messages = messages; + } + + public BatchConsumeContext Transform(ConsumeContext[] payload) => new(payload); + + public BatchConsumeContext Transform(IEnumerable> payload) => + Transform(payload.ToArray()); + + internal KafkaTopicClient Client => Messages[^1].Client; - public required ConsumeResult Result { get; init; } + // Use .MaxBy() to not rely on the order?.. + internal TopicPartitionOffset LatestOffset => Messages[^1].Offset; } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs deleted file mode 100644 index 9c7e88e..0000000 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecks.cs +++ /dev/null @@ -1,20 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost.KafkaConsumer.DependencyInjection; - -public static class HealthChecks -{ - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddKafkaConsumerReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(KafkaConsumerService.QueueReadinessCheck(name, failureStatus, tags)) - .Add(KafkaConsumerService.ConsumerGroupReadinessCheck(name, failureStatus, tags)); - - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(KafkaConsumerService.QueueLivenessCheck(name, failureStatus, tags)) - .Add(KafkaConsumerService.ConsumerGroupLivenessCheck(name, failureStatus, tags)); -} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs new file mode 100644 index 0000000..82c118b --- /dev/null +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -0,0 +1,99 @@ +using Confluent.Kafka; +using JetBrains.Annotations; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.KafkaConsumer.DependencyInjection; + +[PublicAPI] +public sealed class KafkaBuilder : OptionsBuilder +{ + public KafkaBuilder(IServiceCollection services, string? name) : base(services, name) + { + } + +// public static OptionsBuilder AddKafkaConsumer(this IServiceCollection Services, string name, +// Action>> configure, +// Action> configureClient) => +// Services.AddKafkaConsumer(name, configure, configureClient); + +// // JSON serializer is the default... But for Kafka it can be different?.. +// public static OptionsBuilder AddKafkaConsumer(this IServiceCollection Services, +// string name) where THandler : IHandler> => Services.AddKafkaConsumer(name, provider => +// { +// // Keep .Scoped() as far as possible, as from that point all the middlewares will be resolved per request, not +// // just once +// var handlerFactory = HandlerStack2.From>().Scoped() +// .Deserialize(null) +// // TODO Error handler, just log all the errors and proceed (to not break the loop) +// .Acknowledge(); +// +// return handlerFactory(provider); +// }); + + public OptionsBuilder AddConsumer(string name, HandlerFactory> configure) + { + if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... + throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + + // Services.AddSingleton(); + + if (!Services.TryAddKafkaClient(name)) + throw new ArgumentException("Kafka consumer is already registered", nameof(name)); + + Services.TryAddNamedSingleton(name, provider => + new MessageSource(provider.GetRequiredService(name))); + Services.AddBackgroundServiceForNamed(name); + + Services.TryAddConsumerGroup, MessageSource>(name, configure); + + return Services.AddOptions(name).Configure>((options, commonConfigs) => + { + var commonConfig = commonConfigs.Get(Name); + + options.Topic = name; + options.Kafka = new ConsumerConfig(commonConfig) + { + EnableAutoOffsetStore = false, // We will store offsets manually, see ConsumeContext class + }; + }); + } + + public OptionsBuilder AddBatchConsumer(string name, + HandlerFactory> configure) + { + if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... + throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + + // Services.AddSingleton(); + + if (!Services.TryAddKafkaClient(name)) + throw new ArgumentException("Kafka consumer is already registered", nameof(name)); + + Services.TryAddNamedSingleton(name, provider => + { + var options = provider.GetOptions(name); + + return new BatchMessageSource(provider.GetRequiredService(name), + ConsumeContext.BatchBuilder( + options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); + }); + Services.AddBackgroundServiceForNamed(name); + + Services.TryAddConsumerGroup, BatchMessageSource>(name, configure); +// Services.TryAddConsumerGroup(name, provider => BackgroundQueue.ConsumerGroupFor( +// provider.GetRequiredService(name), configure(provider), 1)); + + return Services.AddOptions(name).Configure>((options, commonConfigs) => + { + var commonConfig = commonConfigs.Get(Name); + + options.Topic = name; + options.Kafka = new ConsumerConfig(commonConfig) + { + EnableAutoOffsetStore = false, // We will store offsets manually, see ConsumeContext class + }; + }); + } +} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs new file mode 100644 index 0000000..07af069 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs @@ -0,0 +1,21 @@ +using JetBrains.Annotations; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.KafkaConsumer.DependencyInjection; + +[PublicAPI] +public static class ServiceHealthCheckRegistration +{ + // Check if the same check is added twice?.. + public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) + .AddConsumerGroupLivenessCheck>(); + + public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) + .AddConsumerGroupLivenessCheck>(); +} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs index 6d1abab..78db23a 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs @@ -1,38 +1,29 @@ -using Confluent.Kafka; +using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; +using Microsoft.Extensions.Logging; namespace LocalPost.KafkaConsumer.DependencyInjection; +[PublicAPI] public static class ServiceRegistration { - public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, string name, - Action>> configure, - Action> configureClient) => - services.AddKafkaConsumer(name, configure, configureClient); - - public static OptionsBuilder AddKafkaConsumer(this IServiceCollection services, string name, - Action>> configure, - Action> configureClient) - { - services.TryAddConcurrentHostedServices(); - - var handleStackBuilder = new MiddlewareStackBuilder>(); - configure(handleStackBuilder); - var handlerStack = handleStackBuilder.Build(); - - services.TryAddSingleton(provider => KafkaConsumerService.Create(provider, - name, handlerStack, configureClient)); - - services.AddSingleton(provider => - provider.GetRequiredService>(name).Reader); - services.AddSingleton(provider => - provider.GetRequiredService>(name).ConsumerGroup); - - // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... - - return services.AddOptions(name).Configure(options => options.TopicName = name); - } + public static KafkaBuilder AddKafka(this IServiceCollection services, string? name = null) => + new(services, name); + + internal static bool TryAddKafkaClient(this IServiceCollection services, string name) + where TOptions : Options => + services.TryAddNamedSingleton(name, provider => + { + var options = provider.GetOptions(name); + + return new KafkaTopicClient(provider.GetRequiredService>(), + options.Kafka, options.Topic, name); + }); + + // Wrap it into an internal class, to avoid collision with other libraries?.. +// public static OptionsBuilder ConfigureKafkaConsumerDefaults(this IServiceCollection Services, +// Action configure) => +// // FIXME EnableAutoOffsetStore +// Services.AddOptions().Configure(configure); } diff --git a/src/LocalPost.KafkaConsumer/Exceptions.cs b/src/LocalPost.KafkaConsumer/Exceptions.cs new file mode 100644 index 0000000..782d421 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/Exceptions.cs @@ -0,0 +1,10 @@ +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +internal static class Exceptions +{ + public static bool IsTransient(this ConsumeException exception) => + // See https://github.com/confluentinc/confluent-kafka-dotnet/issues/1424#issuecomment-705749252 + exception.Error.Code is ErrorCode.Local_KeyDeserialization or ErrorCode.Local_ValueDeserialization; +} diff --git a/src/LocalPost.KafkaConsumer/HandlerStack.cs b/src/LocalPost.KafkaConsumer/HandlerStack.cs new file mode 100644 index 0000000..ae66e0b --- /dev/null +++ b/src/LocalPost.KafkaConsumer/HandlerStack.cs @@ -0,0 +1,76 @@ +using Confluent.Kafka; +using JetBrains.Annotations; + +namespace LocalPost.KafkaConsumer; + +[PublicAPI] +public static class KafkaHandlerStack +{ + public static HandlerFactory> Trace( + this HandlerFactory> handlerStack) => + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => + { + using var activity = KafkaActivitySource.StartProcessing(context); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception ex) + { + activity?.Error(ex); + } + }); + + public static HandlerFactory> Trace( + this HandlerFactory> handlerStack) => + handlerStack.Map, BatchConsumeContext>(next => + async (context, ct) => + { + using var activity = KafkaActivitySource.StartProcessing(context); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception ex) + { + activity?.Error(ex); + } + }); + + public static HandlerFactory> Acknowledge( + this HandlerFactory> handlerStack) => + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => + { + await next(context, ct); + context.Client.StoreOffset(context.Offset); + }); + + public static HandlerFactory> Acknowledge( + this HandlerFactory> handlerStack) => + handlerStack.Map, BatchConsumeContext>(next => + async (context, ct) => + { + await next(context, ct); + context.Client.StoreOffset(context.LatestOffset); + }); + + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, IAsyncDeserializer deserializer) + { + var middleware = new DeserializationMiddleware { Deserializer = deserializer }; + + return handlerStack.Map(middleware.Invoke); + } + + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, IAsyncDeserializer deserializer) + { + var middleware = new DeserializationMiddleware { Deserializer = deserializer }; + + return handlerStack.Map(middleware.Invoke); + } +} diff --git a/src/LocalPost.KafkaConsumer/IMessageHandler.cs b/src/LocalPost.KafkaConsumer/IMessageHandler.cs deleted file mode 100644 index 746a6d0..0000000 --- a/src/LocalPost.KafkaConsumer/IMessageHandler.cs +++ /dev/null @@ -1,11 +0,0 @@ -using Confluent.Kafka; - -namespace LocalPost.KafkaConsumer; - -public interface IMessageHandler : IHandler> -{ -} - -public interface IMessageHandler : IMessageHandler -{ -} diff --git a/src/LocalPost.KafkaConsumer/KafkaActivitySource.cs b/src/LocalPost.KafkaConsumer/KafkaActivitySource.cs new file mode 100644 index 0000000..ff10ffa --- /dev/null +++ b/src/LocalPost.KafkaConsumer/KafkaActivitySource.cs @@ -0,0 +1,122 @@ +using System.Diagnostics; +using System.Reflection; +using System.Text; +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +internal static class MessageUtils +{ + public static void ExtractTraceFieldFromHeaders(object? carrier, string fieldName, + out string? fieldValue, out IEnumerable? fieldValues) + { + fieldValues = default; + fieldValue = null; + if (carrier is not IEnumerable message) + return; + + var headerValue = message.FirstOrDefault(header => header.Key == fieldName)?.GetValueBytes(); + if (headerValue is not null) + fieldValue = Encoding.UTF8.GetString(headerValue); + } +} + +internal static class KafkaActivityExtensions +{ + public static void AcceptDistributedTracingFrom(this Activity activity, Message message) + { + var propagator = DistributedContextPropagator.Current; + propagator.ExtractTraceIdAndState(message.Headers, MessageUtils.ExtractTraceFieldFromHeaders, + out var traceParent, out var traceState); + + if (string.IsNullOrEmpty(traceParent)) + return; + activity.SetParentId(traceParent!); + if (!string.IsNullOrEmpty(traceState)) + activity.TraceStateString = traceState; + + var baggage = propagator.ExtractBaggage(message.Headers, MessageUtils.ExtractTraceFieldFromHeaders); + if (baggage is null) + return; + foreach (var baggageItem in baggage) + activity.AddBaggage(baggageItem.Key, baggageItem.Value); + } + + public static Activity? SetDefaultTags(this Activity? activity, KafkaTopicClient client) + { + // See https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/#messaging-attributes + activity?.SetTag("messaging.system", "kafka"); + + activity?.SetTag("messaging.destination.name", client.Topic); + activity?.SetTag("messaging.kafka.consumer.group", client.GroupId); + + // activity?.SetTag("messaging.client_id", "service_name"); + // activity?.SetTag("server.address", client.QueueUrl.Host); + // activity?.SetTag("server.port", client.QueueUrl.Port); + + return activity; + } + + public static Activity? SetTagsFor(this Activity? activity, ConsumeContext context) + { + // activity?.SetTag("messaging.message.id", context.MessageId); + activity?.SetTag("messaging.kafka.message.offset", context.Offset.Offset.Value); + + // Skip, as we always ignore the key on consumption + // activity.SetTag("messaging.kafka.message.key", context.Message.Key); + + return activity; + } + + public static Activity? SetTagsFor(this Activity? activity, BatchConsumeContext context) => + activity?.SetTag("messaging.batch.message_count", context.Messages.Count); +} + +// Npgsql as an inspiration: +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 +// Also OTEL semantic convention: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ +internal static class KafkaActivitySource +{ + private static readonly System.Diagnostics.ActivitySource Source; + + public static bool IsEnabled => Source.HasListeners(); + + static KafkaActivitySource() + { + // See https://stackoverflow.com/a/909583/322079 + var assembly = Assembly.GetExecutingAssembly(); + var version = AssemblyName.GetAssemblyName(assembly.Location).Version; + Source = new System.Diagnostics.ActivitySource(assembly.FullName, version.ToString()); + } + + public static Activity? StartProcessing(ConsumeContext context) + { + var activity = Source.CreateActivity($"{context.Client.Topic} process", ActivityKind.Consumer); + if (activity is { IsAllDataRequested: true }) + { + activity.SetDefaultTags(context.Client); + activity.SetTagsFor(context); + activity.AcceptDistributedTracingFrom(context.Message); + } + + activity?.Start(); + + return activity; + } + + + public static Activity? StartProcessing(BatchConsumeContext context) + { + var activity = Source.StartActivity($"{context.Client.Topic} process", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetDefaultTags(context.Client); + activity.SetTagsFor(context); + + // TODO Accept distributed tracing headers, per each message... + + return activity; + } +} diff --git a/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs b/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs deleted file mode 100644 index 09b4154..0000000 --- a/src/LocalPost.KafkaConsumer/KafkaConsumerService.cs +++ /dev/null @@ -1,90 +0,0 @@ -using System.Collections.Immutable; -using Confluent.Kafka; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; -using Microsoft.Extensions.Hosting; - -namespace LocalPost.KafkaConsumer; - -internal sealed class KafkaConsumerService : INamedService -{ - public static KafkaConsumerService Create(IServiceProvider provider, string name, - MiddlewareStack> handlerStack, - Action> configureClient) - { - var options = provider.GetOptions(name); - - var clientBuilder = new ConsumerBuilder(options.Kafka); - configureClient(clientBuilder); - - var kafkaClient = clientBuilder.Build(); - var messageSource = ActivatorUtilities.CreateInstance>(provider, - options.TopicName, kafkaClient); - var reader = new BackgroundServiceSupervisor(messageSource); - - HandlerFactory> handlerFactory = handlerStack.Resolve; - Handler> handler = - ActivatorUtilities.CreateInstance>>(provider, - name, handlerFactory).InvokeAsync; - - var consumer = new BackgroundQueue>.Consumer(messageSource, handler); - var consumerGroup = new ConsumerGroup(consumer.Run, options.MaxConcurrency); - - return new KafkaConsumerService(name, reader, consumerGroup); - } - - public KafkaConsumerService(string name, IBackgroundServiceSupervisor reader, - IBackgroundServiceSupervisor consumerGroup) - { - Name = name; - - Reader = reader; - _readerReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); - _readerLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); - - ConsumerGroup = consumerGroup; - _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); - _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); - } - - public string Name { get; } - - // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services - // synchronously by default, so if consumers are stopped first, they will block the reader from completing the - // channel). -// public IHostedService Supervisor { get; } - - public IConcurrentHostedService Reader { get; } - private readonly IHealthCheck _readerReadinessCheck; - private readonly IHealthCheck _readerLivenessCheck; - - public IConcurrentHostedService ConsumerGroup { get; } - private readonly IHealthCheck _consumerGroupReadinessCheck; - private readonly IHealthCheck _consumerGroupLivenessCheck; - - public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService>(name)._readerReadinessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService>(name)._readerLivenessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration ConsumerGroupReadinessCheck(string name, - HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService>(name)._consumerGroupReadinessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration ConsumerGroupLivenessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService>(name)._consumerGroupLivenessCheck, - failureStatus, - tags); -} diff --git a/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs b/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs new file mode 100644 index 0000000..1f0c4e5 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs @@ -0,0 +1,74 @@ +using Confluent.Kafka; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.Logging; + +namespace LocalPost.KafkaConsumer; + +internal sealed class KafkaTopicClient : INamedService, IDisposable +{ + private readonly ILogger _logger; + private readonly IConsumer _client; + + public KafkaTopicClient(ILogger logger, ConsumerConfig config, string topic, string name) + { + _logger = logger; + + var clientBuilder = new ConsumerBuilder(config); + // TODO Error handler, logger + _client = clientBuilder.Build(); + + Topic = topic; + GroupId = config.GroupId; + Name = name; + } + + public string Topic { get; } + + public string GroupId { get; } + + public string Name { get; } + + public void Subscribe() => _client.Subscribe(Topic); + + public void Close() + { + _logger.LogInformation("Stopping Kafka {Topic} consumer...", Topic); + + _client.Close(); // No need for additional .Dispose() call + } + + public void StoreOffset(TopicPartitionOffset topicPartitionOffset) => + _client.StoreOffset(topicPartitionOffset); + + public ConsumeContext Read(CancellationToken ct = default) + { + while (true) + { + try + { + var result = _client.Consume(ct); + + // Log an empty receive?.. + if (result is null || result.IsPartitionEOF || result.Message is null) + continue; // Continue waiting for a message + + return new ConsumeContext(this, result.TopicPartitionOffset, result.Message, + result.Message.Value); + } + catch (ConsumeException e) when (!e.Error.IsFatal) + { + _logger.LogError(e, "Kafka {Topic} consumer error, more details: {HelpLink}", + Topic, e.HelpLink); + + // "generally, the producer should recover from all errors, except where marked fatal" as per + // https://github.com/confluentinc/confluent-kafka-dotnet/issues/1213#issuecomment-599772818, so + // just continue polling + } + } + } + + public void Dispose() + { + _client.Dispose(); + } +} diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index 5863ea8..83ff635 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -7,9 +7,10 @@ false LocalPost.KafkaConsumer - background;task;queue;kafka - Local (in-process) background queue for sending to Amazon SNS. Alexey Shokov + Opinionated Kafka Consumer library, build to be simple, but yet flexible. + https://github.com/alexeyshockov/LocalPost/v$(Version) + background;task;queue;kafka README.md MIT @@ -19,7 +20,7 @@ - + @@ -44,6 +45,8 @@ + + diff --git a/src/LocalPost.KafkaConsumer/MessageSource.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs index 7fcda42..a309fc3 100644 --- a/src/LocalPost.KafkaConsumer/MessageSource.cs +++ b/src/LocalPost.KafkaConsumer/MessageSource.cs @@ -1,104 +1,86 @@ -using System.Threading.Channels; -using Confluent.Kafka; -using Microsoft.Extensions.Logging; +using System.Runtime.CompilerServices; +using LocalPost.AsyncEnumerable; +using LocalPost.DependencyInjection; namespace LocalPost.KafkaConsumer; -internal sealed class MessageSource : IBackgroundService, - IAsyncEnumerable>, IDisposable +internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> { - private readonly ILogger> _logger; - private readonly string _topicName; + private readonly ConcurrentAsyncEnumerable> _source; - private readonly IConsumer _kafka; - private readonly Channel> _queue; - - public MessageSource(ILogger> logger, string topicName, IConsumer kafka) + public MessageSource(KafkaTopicClient client) : base(client) { - _logger = logger; - _topicName = topicName; - _kafka = kafka; - _queue = Channel.CreateBounded>(new BoundedChannelOptions(1) - { - SingleWriter = true, - SingleReader = false - }); + _source = ConsumeAsync().ToConcurrent(); } - public async IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct = default) + public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); + + public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => + _source.GetAsyncEnumerator(ct); +} + +internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> +{ + private readonly ConcurrentAsyncEnumerable> _source; + + public BatchMessageSource(KafkaTopicClient client, + BatchBuilderFactory, BatchConsumeContext> factory) : base(client) { - // Track full or not later - while (await _queue.Reader.WaitToReadAsync(ct)) - while (_queue.Reader.TryRead(out var item)) - yield return item; + _source = ConsumeAsync().Batch(factory).ToConcurrent(); } - public static implicit operator ChannelReader>(MessageSource that) => - that._queue.Reader; + public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); - public static implicit operator ChannelWriter>(MessageSource that) => - that._queue.Writer; + public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => + _source.GetAsyncEnumerator(ct); +} + +internal abstract class MessageSourceBase : IBackgroundService, INamedService +{ + private readonly KafkaTopicClient _client; - public Task StartAsync(CancellationToken ct) => Task.Run(() => _kafka.Subscribe(_topicName), ct); + private bool _stopped; - public Task ExecuteAsync(CancellationToken ct) => Task.Run(() => Run(ct), ct); + // Some additional reading: https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/ +// private readonly TaskCompletionSource _executionTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); - private async Task Run(CancellationToken stoppingToken = default) + protected MessageSourceBase(KafkaTopicClient client) { - while (!stoppingToken.IsCancellationRequested) - { - await Task.Yield(); - - try - { - await _queue.Writer.WaitToWriteAsync(stoppingToken); // Wait for the buffer capacity - - await Consume(stoppingToken); - } - catch (OperationCanceledException e) when (e.CancellationToken == stoppingToken) - { - // Just complete the method normally... - } - } + _client = client; } - private async Task Consume(CancellationToken stoppingToken) + public string Name => _client.Name; + + // Run on a separate thread, as Confluent Kafka API is blocking + public Task StartAsync(CancellationToken ct) => Task.Run(() => _client.Subscribe(), ct); + + public abstract Task ExecuteAsync(CancellationToken ct); + + protected async IAsyncEnumerable> ConsumeAsync( + [EnumeratorCancellation] CancellationToken ct = default) { // TODO Transaction activity... - try - { - var consumeResult = _kafka.Consume(stoppingToken); - - if (consumeResult is null || consumeResult.IsPartitionEOF || consumeResult.Message is null) - return; // Continue the loop - - await _queue.Writer.WriteAsync(new ConsumeContext - { -// Client = _kafka, - Result = consumeResult, - }, stoppingToken); - } - catch (ConsumeException e) - { - _logger.LogError(e, "Kafka {Topic} consumer error, help link: {HelpLink}", - _topicName, e.HelpLink); - - // Bubble up, so the supervisor can report the error and the whole app can be restarted (by Kubernetes or - // another orchestrator) - throw; - } + + // Give the control back in the beginning, just before blocking in the Kafka's consumer call + await Task.Yield(); + foreach (var result in Consume(ct)) + yield return result; } - public Task StopAsync(CancellationToken ct) => Task.Run(() => + private IEnumerable> Consume(CancellationToken ct) { - _logger.LogInformation("Stopping Kafka {Topic} consumer...", _topicName); + // TODO Transaction activity... - _kafka.Close(); - _queue.Writer.Complete(); - }, ct); + while (!ct.IsCancellationRequested && !_stopped) + yield return _client.Read(ct); - public void Dispose() - { - _kafka.Dispose(); + ct.ThrowIfCancellationRequested(); } + + // Run on a separate thread, as Confluent Kafka API is blocking + public Task StopAsync(CancellationToken ct) => Task.Run(() => + { + _stopped = true; + _client.Close(); + }, ct); } diff --git a/src/LocalPost.KafkaConsumer/Middlewares.cs b/src/LocalPost.KafkaConsumer/Middlewares.cs new file mode 100644 index 0000000..a973b5b --- /dev/null +++ b/src/LocalPost.KafkaConsumer/Middlewares.cs @@ -0,0 +1,61 @@ +using System.Collections.Immutable; +using Confluent.Kafka; +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost.KafkaConsumer; + +// internal static class Middlewares +// { +// public static HandlerMiddleware, ConsumeContext> Acknowledge(IServiceProvider provider) => +// provider.GetRequiredService().Invoke; +// +// public static HandlerMiddleware, BatchConsumeContext> AcknowledgeBatch( +// IServiceProvider provider) => provider.GetRequiredService().Invoke; +// } +// +// internal sealed class AcknowledgeMiddleware +// { +// private readonly ImmutableDictionary _clients; +// +// public AcknowledgeMiddleware(IEnumerable clients) +// { +// _clients = clients.ToImmutableDictionary(client => client.Name, client => client); +// } +// +// public Handler> Invoke(Handler> next) => async (context, ct) => +// { +// await next(context, ct); +// _clients[context.ClientName].StoreOffset(context.Message); +// }; +// +// public Handler> Invoke(Handler> next) => async (context, ct) => +// { +// await next(context, ct); +// _clients[context.ConsumerName].StoreOffset(context.Latest); +// }; +// } + +internal sealed class DeserializationMiddleware : + IHandlerMiddleware, BatchConsumeContext>, + IHandlerMiddleware, ConsumeContext> +{ + public required IAsyncDeserializer Deserializer { get; init; } + + public Handler> Invoke(Handler> next) => + async (context, ct) => await next(await Deserialize(context), ct); + + public Handler> Invoke(Handler> next) => + async (context, ct) => + { + var messages = await Task.WhenAll(context.Messages.Select(Deserialize)); + await next(context.Transform(messages), ct); + }; + + private async Task> Deserialize(ConsumeContext context) + { + var payload = await Deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( + MessageComponentType.Value, context.Topic, context.Message.Headers)); + + return context.Transform(payload); + } +} diff --git a/src/LocalPost.KafkaConsumer/OffsetManager.cs b/src/LocalPost.KafkaConsumer/OffsetManager.cs new file mode 100644 index 0000000..1e0d06c --- /dev/null +++ b/src/LocalPost.KafkaConsumer/OffsetManager.cs @@ -0,0 +1,58 @@ +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +internal class OffsetManager +{ + // partition -> next offset + private readonly Dictionary _offsets = new(); + + public void Register(TopicPartitionOffset topicPartitionOffset) + { + lock (_offsets) + { + if (!_offsets.ContainsKey(topicPartitionOffset.TopicPartition)) + _offsets[topicPartitionOffset.TopicPartition] = NextOffset.From(topicPartitionOffset); + } + } + + public async ValueTask WaitToStore(TopicPartitionOffset topicPartitionOffset) + { + var offset = topicPartitionOffset.Offset; + var topicPartition = topicPartitionOffset.TopicPartition; +// if (!_offsets.ContainsKey(topicPartition)) +// throw new ArgumentOutOfRangeException(nameof(topicPartitionOffset), "Unknown topic partition"); + + var completed = false; + while (!completed) + { + NextOffset nextOffset; + lock (_offsets) + { + nextOffset = _offsets[topicPartition]; + completed = nextOffset.Offset >= offset; + if (completed) + _offsets[topicPartition] = nextOffset.Next(); + } + + if (completed) + nextOffset.Complete(); + else + await nextOffset.Completed; + } + } +} + +internal readonly record struct NextOffset(Offset Offset) +{ + // See https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/#conclusion + private readonly TaskCompletionSource _completionSource = new(TaskCreationOptions.RunContinuationsAsynchronously); + + public Task Completed => _completionSource.Task; + + public NextOffset Next() => new(Offset + 1); + + public void Complete() => _completionSource.SetResult(true); + + public static NextOffset From(TopicPartitionOffset topicPartitionOffset) => new(topicPartitionOffset.Offset); +} diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 5ea44cd..8d8b3b7 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -1,16 +1,39 @@ using System.ComponentModel.DataAnnotations; using Confluent.Kafka; +using JetBrains.Annotations; namespace LocalPost.KafkaConsumer; -public sealed class Options +[PublicAPI] +public record Options { - public ConsumerConfig Kafka { get; set; } = new(); - - [Required] public string TopicName { get; set; } = null!; - /// - /// How many messages to process in parallel. + /// Group ID, auth and other options should be set directly. /// - [Required] public ushort MaxConcurrency { get; set; } = ushort.MaxValue; + public ConsumerConfig Kafka { get; set; } = new() + { + EnableAutoOffsetStore = false, // We will store offsets manually, see ConsumeContext class + }; + + [Required] public string Topic { get; set; } = null!; + + // TODO Implement (via ApplicationLifecycle) +// public bool ShutdownAppOnFatalError { get; set; } = true; + + // Implement later?.. +// /// +// /// How many (parallel) consumers to spawn. +// /// +// [Range(1, 10)] +// public byte Instances { get; set; } = 1; +} + +[PublicAPI] +public record BatchedOptions : Options +{ + [Range(1, ushort.MaxValue)] + public ushort BatchMaxSize { get; set; } = 100; + + [Range(1, ushort.MaxValue)] + public int BatchTimeWindowMilliseconds { get; set; } = 1_000; } diff --git a/src/LocalPost.KafkaConsumer/README.md b/src/LocalPost.KafkaConsumer/README.md new file mode 100644 index 0000000..9d4a213 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/README.md @@ -0,0 +1,23 @@ +# LocalPost Kafka Consumer + +## librdkafka's background prefetching + +The Kafka client automatically prefetches messages in the background. This is done by the background thread that is +started when the client is created. The background thread will fetch messages from the broker and enqueue them on the +internal queue, so `Consume()` calls will return faster. + +Because of this behavior, there is no need to maintain our own in memory queue (channel). + + + + + + + + + +## Immutability (actually lack of it) + +Because Kafka messages are meant to be processed sequentially (parallelism is achieved by having multiple +partitions / consumers), `ConsumeContext`/`BatchConsumeContext` objects are not immutable and are reused for each +handler's call. diff --git a/src/LocalPost.Polly/HandlerStack.cs b/src/LocalPost.Polly/HandlerStack.cs new file mode 100644 index 0000000..7ad9517 --- /dev/null +++ b/src/LocalPost.Polly/HandlerStack.cs @@ -0,0 +1,24 @@ +using JetBrains.Annotations; +using Polly; + +namespace LocalPost.Polly; + +[PublicAPI] +public static class PollyHandlerStack +{ + public static HandlerFactory UsePollyPipeline(this HandlerFactory handlerStack, + ResiliencePipeline pipeline) => + handlerStack.Map(next => async (context, ct) => + { + await pipeline.ExecuteAsync(ct => next(context, ct), ct); + }); + + public static HandlerFactory UsePollyPipeline(this HandlerFactory handlerStack, + Action configure) + { + var builder = new ResiliencePipelineBuilder(); + configure(builder); + + return handlerStack.UsePollyPipeline(builder.Build()); + } +} diff --git a/src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj b/src/LocalPost.Polly/LocalPost.Polly.csproj similarity index 77% rename from src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj rename to src/LocalPost.Polly/LocalPost.Polly.csproj index b2a3faf..12ae501 100644 --- a/src/LocalPost.RabbitMqConsumer/LocalPost.RabbitMqConsumer.csproj +++ b/src/LocalPost.Polly/LocalPost.Polly.csproj @@ -6,10 +6,11 @@ false - LocalPost.RabbitMqConsumer - background;task;queue;rabbitmq - Local (in-process) background queue for sending to Amazon SNS. + LocalPost.Polly Alexey Shokov + Polly integration for LocalPost + https://github.com/alexeyshockov/LocalPost/v$(Version) + background;task;queue;retry;timeout README.md MIT @@ -19,7 +20,7 @@ - + @@ -40,10 +41,10 @@ - - - + + + diff --git a/src/LocalPost.Polly/README.md b/src/LocalPost.Polly/README.md new file mode 100644 index 0000000..ef80096 --- /dev/null +++ b/src/LocalPost.Polly/README.md @@ -0,0 +1 @@ +# LocalPost Polly integration diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index a8cdd10..471c3b5 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -1,10 +1,104 @@ +using System.Collections.Immutable; using Amazon.SQS.Model; +using JetBrains.Annotations; +using LocalPost.AsyncEnumerable; namespace LocalPost.SqsConsumer; -public readonly record struct ConsumeContext(string QueueName, string QueueUrl, Message Message) +internal static class ConsumeContext { - public readonly DateTimeOffset ReceivedAt = DateTimeOffset.Now; + public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( + BatchSize batchMaxSizeSize, TimeSpan timeWindow) => ct => + new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); +} + +[PublicAPI] +public readonly record struct ConsumeContext +{ + internal readonly QueueClient Client; + internal readonly Message Message; + public readonly T Payload; + + public DateTimeOffset ReceivedAt { get; init; } = DateTimeOffset.Now; + + internal ConsumeContext(QueueClient client, Message message, T payload) + { + Client = client; + Payload = payload; + Message = message; + } + + // TODO Headers instead of the message + public void Deconstruct(out T payload, out Message message) + { + payload = Payload; + message = Message; + } + + public string MessageId => Message.MessageId; + + public string ReceiptHandle => Message.ReceiptHandle; + + public IReadOnlyDictionary Attributes => Message.Attributes; + + public IReadOnlyDictionary MessageAttributes => Message.MessageAttributes; public bool IsStale => false; // TODO Check the visibility timeout + + public ConsumeContext Transform(TOut payload) => + new(Client, Message, payload) + { + ReceivedAt = ReceivedAt + }; + + public ConsumeContext Transform(Func, TOut> transform) => Transform(transform(this)); + + public async Task> Transform(Func, Task> transform) => + Transform(await transform(this)); + + public static implicit operator T(ConsumeContext context) => context.Payload; +} + +[PublicAPI] +public readonly record struct BatchConsumeContext +{ + internal sealed class Builder : BoundedBatchBuilderBase, BatchConsumeContext> + { + public Builder(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : + base(batchMaxSizeSize, timeWindow, ct) + { + } + + public override BatchConsumeContext Build() => new(Batch); + } + + public readonly IReadOnlyList> Messages; + + internal BatchConsumeContext(IReadOnlyList> messages) + { + Messages = messages; + } + + public BatchConsumeContext Transform(ConsumeContext[] payload) => new(payload); + + public BatchConsumeContext Transform(IEnumerable> payload) => + Transform(payload.ToArray()); + + public BatchConsumeContext Transform(IEnumerable batchPayload) => + Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); + + public BatchConsumeContext Transform(Func, TOut> transform) + { + // TODO Parallel LINQ + var messages = Messages.Select(transform); + return Transform(messages); + } + + public async Task> Transform(Func, Task> transform) + { + var messages = await Task.WhenAll(Messages.Select(transform)); + return Transform(messages); + } + + internal QueueClient Client => Messages[0].Client; } diff --git a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs b/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs deleted file mode 100644 index 8b71982..0000000 --- a/src/LocalPost.SqsConsumer/ConsumerMiddleware.cs +++ /dev/null @@ -1,25 +0,0 @@ -using Amazon.SQS; - -namespace LocalPost.SqsConsumer; - -internal sealed class ProcessedMessageHandler : IMiddleware -{ - private readonly IAmazonSQS _sqs; - - public ProcessedMessageHandler(IAmazonSQS sqs) - { - _sqs = sqs; - } - - public Handler Invoke(Handler next) => async (context, ct) => - { - if (context.IsStale) - return; - - // TODO Processing timeout from the visibility timeout - await next(context, ct); // Extend message's VisibilityTimeout in case of long processing?.. - - // Won't be deleted in case of an exception in the handler - await _sqs.DeleteMessageAsync(context.QueueUrl, context.Message.ReceiptHandle, ct); - }; -} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs deleted file mode 100644 index 22b2fba..0000000 --- a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecks.cs +++ /dev/null @@ -1,21 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost.SqsConsumer.DependencyInjection; - -public static class HealthChecks -{ - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddAmazonSqsConsumerReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(SqsConsumerService.QueueReadinessCheck(name, failureStatus, tags)) - .Add(SqsConsumerService.ConsumerGroupReadinessCheck(name, failureStatus, tags)); - - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddAmazonSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(SqsConsumerService.QueueLivenessCheck(name, failureStatus, tags)) - .Add(SqsConsumerService.ConsumerGroupLivenessCheck(name, failureStatus, tags)); -} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs new file mode 100644 index 0000000..7b88cd7 --- /dev/null +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs @@ -0,0 +1,19 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.SqsConsumer.DependencyInjection; + +public static class ServiceHealthCheckRegistration +{ + // Check if the same check is added twice?.. + public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) + .AddConsumerGroupLivenessCheck>(); + + public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) + .AddConsumerGroupLivenessCheck>(); +} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs index e48fc0a..4d7eed1 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs @@ -1,99 +1,111 @@ +using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.DependencyInjection.Extensions; using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; namespace LocalPost.SqsConsumer.DependencyInjection; +[PublicAPI] public static class ServiceRegistration { - // TODO Implement -// public static OptionsBuilder AddAmazonSqsJsonConsumer(this IServiceCollection services, -// string name, Action? configure = null) where THandler : IHandler => -// services.AddAmazonSqsConsumer(name, builder => -// { -// builder.MiddlewareStackBuilder.SetHandler(); -// configure?.Invoke(builder); -// }); + public static SqsBuilder AddSqsConsumers(this IServiceCollection services) => + new(services, null); - public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Action>? configure = null) - where THandler : IHandler => - services.AddAmazonSqsConsumer(name, builder => - { - builder.SetHandler(); - configure?.Invoke(builder); - }); + internal static bool TryAddQueueClient(this IServiceCollection services, string name) + where TOptions : Options => + services.TryAddNamedSingleton(name, provider => + ActivatorUtilities.CreateInstance(provider, name)); +} -// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, -// string name, Handler handler, Action? configure = null) => -// services.AddAmazonSqsConsumer(name, builder => -// { -// builder.MiddlewareStackBuilder.SetHandler(handler); -// configure?.Invoke(builder); -// }); -// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// string name, Func handler) where TDep1 : notnull => -// services.AddAmazonSqsConsumer(name, provider => (context, ct) => -// { -// var dep1 = provider.GetRequiredService(); +// TODO Remove +// [PublicAPI] +// public static class ServiceRegistration +// { +// // TODO Implement +// // public static OptionsBuilder AddAmazonSqsJsonConsumer(this IServiceCollection services, +// // string name, Action? configure = null) where THandler : IHandler => +// // services.AddAmazonSqsConsumer(name, builder => +// // { +// // builder.MiddlewareStackBuilder.SetHandler(); +// // configure?.Invoke(builder); +// // }); // -// return handler(dep1, context, ct); -// }); +// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, +// string name, Action>? configure = null) +// where THandler : IHandler => +// services.AddAmazonSqsConsumer(name, builder => +// { +// builder.SetHandler(); +// configure?.Invoke(builder); +// }); // -// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// string name, Func handler) -// where TDep1 : notnull -// where TDep2 : notnull => -// services.AddAmazonSqsConsumer(name, provider => (context, ct) => -// { -// var dep1 = provider.GetRequiredService(); -// var dep2 = provider.GetRequiredService(); +// // public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, +// // string name, Handler handler, Action? configure = null) => +// // services.AddAmazonSqsConsumer(name, builder => +// // { +// // builder.MiddlewareStackBuilder.SetHandler(handler); +// // configure?.Invoke(builder); +// // }); // -// return handler(dep1, dep2, context, ct); -// }); +// // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// // string name, Func handler) where TDep1 : notnull => +// // services.AddAmazonSqsConsumer(name, provider => (context, ct) => +// // { +// // var dep1 = provider.GetRequiredService(); +// // +// // return handler(dep1, context, ct); +// // }); +// // +// // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// // string name, Func handler) +// // where TDep1 : notnull +// // where TDep2 : notnull => +// // services.AddAmazonSqsConsumer(name, provider => (context, ct) => +// // { +// // var dep1 = provider.GetRequiredService(); +// // var dep2 = provider.GetRequiredService(); +// // +// // return handler(dep1, dep2, context, ct); +// // }); +// // +// // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, +// // string name, Func handler) +// // where TDep1 : notnull +// // where TDep2 : notnull +// // where TDep3 : notnull => +// // services.AddAmazonSqsConsumer(name, provider => (context, ct) => +// // { +// // var dep1 = provider.GetRequiredService(); +// // var dep2 = provider.GetRequiredService(); +// // var dep3 = provider.GetRequiredService(); +// // +// // return handler(dep1, dep2, dep3, context, ct); +// // }); // -// public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// string name, Func handler) -// where TDep1 : notnull -// where TDep2 : notnull -// where TDep3 : notnull => -// services.AddAmazonSqsConsumer(name, provider => (context, ct) => -// { -// var dep1 = provider.GetRequiredService(); -// var dep2 = provider.GetRequiredService(); -// var dep3 = provider.GetRequiredService(); +// // public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, +// // string name) where THandler : IMessageHandler => +// // services +// // .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); // -// return handler(dep1, dep2, dep3, context, ct); -// }); - -// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, -// string name) where THandler : IMessageHandler => -// services -// .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); - - public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, - string name, Action> configure) - { - services.TryAddConcurrentHostedServices(); - - var handleStackBuilder = new MiddlewareStackBuilder(); - services.TryAddSingleton(); - handleStackBuilder.Append(); - configure(handleStackBuilder); - var handlerStack = handleStackBuilder.Build(); - - services.TryAddSingleton(provider => SqsConsumerService.Create(provider, name, handlerStack)); - - services.AddSingleton(provider => - provider.GetRequiredService(name).Reader); - services.AddSingleton(provider => - provider.GetRequiredService(name).ConsumerGroup); - - // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... - - return services.AddOptions(name).Configure(options => options.QueueName = name); - } -} +// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, +// string name, Action> configure) +// { +// services.AddConcurrentHostedServices(); +// +// services.TryAddSingleton(); +// +// if (services.TryAddNamedSingleton(name, provider => SqsConsumerService.Create(provider, name, configure))) +// throw new InvalidOperationException($"SQS consumer is already registered: {name}"); +// +// services.AddSingleton(provider => +// provider.GetRequiredService(name).Reader); +// services.AddSingleton(provider => +// provider.GetRequiredService(name).ConsumerGroup); +// +// return services.AddOptions(name).Configure(options => options.QueueName = name); +// } +// } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs new file mode 100644 index 0000000..23fb64a --- /dev/null +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -0,0 +1,73 @@ +using JetBrains.Annotations; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.SqsConsumer.DependencyInjection; + +[PublicAPI] +public sealed class SqsBuilder : OptionsBuilder +{ + internal SqsBuilder(IServiceCollection services, string? name) : base(services, name) + { + } + + public OptionsBuilder AddConsumer(string name, HandlerFactory> configure) + { + if (string.IsNullOrEmpty(name)) // TODO Just default empty name... + throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + + // Services.AddSingleton(); + + if (!Services.TryAddQueueClient(name)) + throw new ArgumentException("SQS consumer is already registered", nameof(name)); + + Services.TryAddNamedSingleton(name, provider => + new MessageSource(provider.GetRequiredService(name))); + Services.AddBackgroundServiceForNamed(name); + + Services.TryAddConsumerGroup, MessageSource>(name, configure); + + return Services.AddOptions(name).Configure>((options, commonConfigs) => + { + var commonConfig = commonConfigs.Get(Name); + + options.UpdateFrom(commonConfig); + options.QueueName = name; + }); + } + + public OptionsBuilder AddBatchConsumer(string name, + HandlerFactory> configure) + { + if (string.IsNullOrEmpty(name)) // TODO Just default empty name... + throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + + // Services.AddSingleton(); + + if (!Services.TryAddQueueClient(name)) + throw new ArgumentException("SQS consumer is already registered", nameof(name)); + + Services.TryAddNamedSingleton(name, provider => + { + var options = provider.GetOptions(name); + + return new BatchMessageSource(provider.GetRequiredService(name), + ConsumeContext.BatchBuilder( + options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); + }); + Services.AddBackgroundServiceForNamed(name); + + Services.TryAddConsumerGroup, BatchMessageSource>(name, configure); +// Services.TryAddConsumerGroup(name, provider => BackgroundQueue.ConsumerGroupFor( +// provider.GetRequiredService(name), configure(provider), 1)); + + return Services.AddOptions(name).Configure>((options, commonConfigs) => + { + var commonConfig = commonConfigs.Get(Name); + + options.UpdateFrom(commonConfig); + options.QueueName = name; + }); + } +} diff --git a/src/LocalPost.SqsConsumer/HandlerStack.cs b/src/LocalPost.SqsConsumer/HandlerStack.cs new file mode 100644 index 0000000..ea35841 --- /dev/null +++ b/src/LocalPost.SqsConsumer/HandlerStack.cs @@ -0,0 +1,83 @@ +using System.Diagnostics; +using Amazon.Runtime.Internal; +using JetBrains.Annotations; + +namespace LocalPost.SqsConsumer; + +[PublicAPI] +public static class SqsHandlerStack +{ + public static HandlerFactory> Trace( + this HandlerFactory> handlerStack) => + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => + { + using var activity = SqsActivitySource.StartProcessing(context); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception ex) + { + activity?.Error(ex); + } + }); + + public static HandlerFactory> Trace( + this HandlerFactory> handlerStack) => + handlerStack.Map, BatchConsumeContext>(next => + async (context, ct) => + { + using var activity = SqsActivitySource.StartProcessing(context); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception ex) + { + activity?.Error(ex); + } + }); + + public static HandlerFactory> Acknowledge( + this HandlerFactory> handlerStack) => + // handlerStack.Map(Middlewares.Acknowledge); + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => + { + await next(context, ct); + await context.Client.DeleteMessageAsync(context); // TODO Instrument + }); + + public static HandlerFactory> Acknowledge( + this HandlerFactory> handlerStack) => + // handlerStack.Map(Middlewares.AcknowledgeBatch); + handlerStack.Map, BatchConsumeContext>(next => + async (context, ct) => + { + await next(context, ct); + await context.Client.DeleteMessagesAsync(context); // TODO Instrument + }); + + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, Func, T> deserialize) => + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => await next(context.Transform(deserialize), ct)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, Func, Task> deserialize) => + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => await next(await context.Transform(deserialize), ct)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, Func, T> deserialize) => + handlerStack.Map, BatchConsumeContext>(next => + async (context, ct) => await next(context.Transform(deserialize), ct)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, Func, Task> deserialize) => + handlerStack.Map, BatchConsumeContext>(next => + async (context, ct) => await next(await context.Transform(deserialize), ct)); +} diff --git a/src/LocalPost.SqsConsumer/IMessageHandler.cs b/src/LocalPost.SqsConsumer/IMessageHandler.cs deleted file mode 100644 index 5cceb8d..0000000 --- a/src/LocalPost.SqsConsumer/IMessageHandler.cs +++ /dev/null @@ -1,8 +0,0 @@ -using Amazon.SQS.Model; -using LocalPost; - -namespace LocalPost.SqsConsumer; - -public interface IMessageHandler : IHandler -{ -} diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index 55ecf76..e509fa6 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -7,9 +7,10 @@ false LocalPost.SqsConsumer - background;task;queue;amazon;sqs;aws - Local (in-process) background queue for sending to Amazon SNS. Alexey Shokov + Local (in-process) background queue for sending to Amazon SNS. + https://github.com/alexeyshockov/LocalPost/releases/v$(Version) + background;task;queue;amazon;sqs;aws README.md MIT @@ -19,7 +20,7 @@ - + @@ -44,6 +45,8 @@ + + diff --git a/src/LocalPost.SqsConsumer/MessageSource.cs b/src/LocalPost.SqsConsumer/MessageSource.cs index a4a0e65..820495f 100644 --- a/src/LocalPost.SqsConsumer/MessageSource.cs +++ b/src/LocalPost.SqsConsumer/MessageSource.cs @@ -1,67 +1,73 @@ -using System.Threading.Channels; +using System.Runtime.CompilerServices; +using LocalPost.AsyncEnumerable; +using LocalPost.DependencyInjection; namespace LocalPost.SqsConsumer; -internal sealed class MessageSource : IBackgroundService, IAsyncEnumerable +internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> { - private readonly QueueClient _client; - private readonly Channel _queue; + private readonly ConcurrentAsyncEnumerable> _source; - public MessageSource(QueueClient client) + public MessageSource(QueueClient client) : base(client) { - _client = client; - _queue = Channel.CreateBounded(new BoundedChannelOptions(1) - { - SingleWriter = true, // Spawn multiple readers later?.. - SingleReader = false - }); + _source = ConsumeAsync().ToConcurrent(); } - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) - { - // Track full or not later - while (await _queue.Reader.WaitToReadAsync(ct)) - while (_queue.Reader.TryRead(out var item)) - yield return item; - } + public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); - public static implicit operator ChannelReader(MessageSource that) => that._queue.Reader; + public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => + _source.GetAsyncEnumerator(ct); +} - public static implicit operator ChannelWriter(MessageSource that) => that._queue.Writer; +internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> +{ + private readonly ConcurrentAsyncEnumerable> _source; - public async Task StartAsync(CancellationToken ct) + public BatchMessageSource(QueueClient client, + BatchBuilderFactory, BatchConsumeContext> factory) : base(client) { - await _client.ConnectAsync(ct); + _source = ConsumeAsync().Batch(factory).ToConcurrent(); } - public async Task ExecuteAsync(CancellationToken ct) + public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); + + public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => + _source.GetAsyncEnumerator(ct); +} + +internal abstract class MessageSourceBase : IBackgroundService, INamedService +{ + private readonly QueueClient _client; + + private bool _stopped; + + protected MessageSourceBase(QueueClient client) { - try - { - while (!ct.IsCancellationRequested) - { - await _queue.Writer.WaitToWriteAsync(ct); // Wait for the buffer capacity - - await Consume(ct); - } - } - finally - { - } + _client = client; } - public Task StopAsync(CancellationToken ct) + public string Name => _client.Name; + + public async Task StartAsync(CancellationToken ct) => await _client.ConnectAsync(ct); + + public abstract Task ExecuteAsync(CancellationToken ct); + + protected async IAsyncEnumerable> ConsumeAsync( + [EnumeratorCancellation] CancellationToken ct = default) { - _queue.Writer.Complete(); + while (!ct.IsCancellationRequested && !_stopped) + foreach (var message in await _client.PullMessagesAsync(ct)) + yield return message; - return Task.CompletedTask; + ct.ThrowIfCancellationRequested(); } - private async Task Consume(CancellationToken stoppingToken) + // Run on a separate thread, as Confluent Kafka API is blocking + public Task StopAsync(CancellationToken ct) { - var messages = await _client.PullMessagesAsync(stoppingToken); + _stopped = true; +// _client.Close(); - foreach (var message in messages) - await _queue.Writer.WriteAsync(message, stoppingToken); + return Task.CompletedTask; } } diff --git a/src/LocalPost.SqsConsumer/MetricsReporter.cs b/src/LocalPost.SqsConsumer/MetricsReporter.cs new file mode 100644 index 0000000..862e5b9 --- /dev/null +++ b/src/LocalPost.SqsConsumer/MetricsReporter.cs @@ -0,0 +1,9 @@ +using System.Diagnostics; +using System.Diagnostics.Metrics; +using System.Reflection; + +namespace LocalPost.SqsConsumer; + +// TODO Metrics +// .NET docs on metric instrumentation: https://learn.microsoft.com/en-us/dotnet/core/diagnostics/metrics-instrumentation +// OpenTelemetry semantic conventions: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-metrics/#consumer-metrics diff --git a/src/LocalPost.SqsConsumer/Options.cs b/src/LocalPost.SqsConsumer/Options.cs index 7108032..e702dbc 100644 --- a/src/LocalPost.SqsConsumer/Options.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -1,40 +1,27 @@ using System.ComponentModel.DataAnnotations; +using JetBrains.Annotations; namespace LocalPost.SqsConsumer; /// -/// General SQS consumer settings +/// General SQS settings. /// -public sealed record Options +[PublicAPI] +public record EndpointOptions { + // AWS SDK requires List... No way to make it readonly / immutable :( internal static readonly List AllAttributes = new() { "All" }; internal static readonly List AllMessageAttributes = new() { "All" }; - public const int DefaultTimeout = 30; - /// /// How many messages to process in parallel. Default is 10. /// [Required] public ushort MaxConcurrency { get; set; } = 10; - [Required] public string QueueName { get; set; } = null!; - - private string? _queueUrl; /// - /// If not set, IAmazonSQS.GetQueueUrlAsync(QueueName) will be used once, to get the actual URL of the queue. + /// How many messages to prefetch from the queue. Default is 10. /// - [Url] public string? QueueUrl - { - get => _queueUrl; - set - { - _queueUrl = value; - - // Extract name (MyQueue) from an URL (https://sqs.us-east-2.amazonaws.com/123456789012/MyQueue) - if (Uri.TryCreate(value, UriKind.Absolute, out var url) && url.Segments.Length >= 3) - QueueName = url.Segments[2]; - } - } + public byte Prefetch { get; set; } = 10; /// /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. @@ -46,7 +33,8 @@ [Url] public string? QueueUrl /// /// Setting up long polling /// - [Range(0, 20)] public byte WaitTimeSeconds { get; set; } = 20; + [Range(0, 20)] + public byte WaitTimeSeconds { get; set; } = 20; /// /// The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, @@ -58,15 +46,66 @@ [Url] public string? QueueUrl /// /// Setting up long polling /// - [Range(1, 10)] public byte MaxNumberOfMessages { get; set; } = 10; + [Range(1, 10)] + public byte MaxNumberOfMessages { get; set; } = 10; + + // User specific thing... +// /// +// /// Message processing timeout. If not set, IAmazonSQS.GetQueueAttributesAsync() will be used once, +// /// to get VisibilityTimeout for the queue. If it is not available, default value of 10 seconds will be used. +// /// +// /// +// /// Amazon SQS visibility timeout +// /// +// [Range(1, int.MaxValue)] +// public int? TimeoutMilliseconds { get; set; } + + internal void UpdateFrom(EndpointOptions other) + { + MaxConcurrency = other.MaxConcurrency; + Prefetch = other.Prefetch; + WaitTimeSeconds = other.WaitTimeSeconds; + MaxNumberOfMessages = other.MaxNumberOfMessages; + } +} + +/// +/// SQS queue consumer settings. +/// +[PublicAPI] +public record Options : EndpointOptions +{ + [Required] + public string QueueName { get; set; } = null!; + private string? _queueUrl; /// - /// Message processing timeout, in seconds. If not set, IAmazonSQS.GetQueueAttributesAsync() will be used once, - /// to get VisibilityTimeout for the queue. + /// If not set, IAmazonSQS.GetQueueUrlAsync(QueueName) will be used once, to get the actual URL of the queue. /// - /// - /// Amazon SQS visibility timeout - /// - [Range(1, 43200)] - public int Timeout { get; set; } = DefaultTimeout; + [Url] + public string? QueueUrl + { + get => _queueUrl; + set + { + _queueUrl = value; + + // Extract name (MyQueue) from an URL (https://sqs.us-east-2.amazonaws.com/123456789012/MyQueue) + if (Uri.TryCreate(value, UriKind.Absolute, out var url) && url.Segments.Length >= 3) + QueueName = url.Segments[2]; + } + } +} + +/// +/// SQS queue batch consumer settings. +/// +[PublicAPI] +public record BatchedOptions : Options +{ + [Range(1, ushort.MaxValue)] + public ushort BatchMaxSize { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public int BatchTimeWindowMilliseconds { get; set; } = 1_000; } diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index 2781412..e32635b 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -1,36 +1,42 @@ -using System.Diagnostics; using Amazon.SQS; using Amazon.SQS.Model; +using LocalPost.DependencyInjection; using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; namespace LocalPost.SqsConsumer; -internal sealed class QueueClient +internal sealed class QueueClient : INamedService { - // TODO Add more details - // See https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 - // See https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 - private static readonly ActivitySource Tracer = new(typeof(QueueClient).Namespace); - private readonly ILogger _logger; + private readonly IAmazonSQS _sqs; private readonly Options _options; - public QueueClient(ILogger logger, Options options, IAmazonSQS sqs) + public QueueClient(ILogger logger, IAmazonSQS sqs, IOptionsMonitor options, string name) : + this(logger, sqs, options.Get(name), name) + { + } + + public QueueClient(ILogger logger, IAmazonSQS sqs, Options options, string name) { _logger = logger; _sqs = sqs; _options = options; + Name = name; } + public string Name { get; } + + public string QueueName => _options.QueueName; + private GetQueueAttributesResponse? _queueAttributes; // TODO Use - public int MessageVisibilityTimeout => _queueAttributes?.VisibilityTimeout switch + public TimeSpan? MessageVisibilityTimeout => _queueAttributes?.VisibilityTimeout switch { - > 0 => _queueAttributes.VisibilityTimeout, - _ => _options.Timeout + > 0 => TimeSpan.FromSeconds(_queueAttributes.VisibilityTimeout), + _ => null }; private string? _queueUrl; @@ -50,7 +56,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) try { // Checking for a possible error in the response would be also good... - _queueAttributes = await _sqs.GetQueueAttributesAsync(QueueUrl, Options.AllAttributes, ct); + _queueAttributes = await _sqs.GetQueueAttributesAsync(QueueUrl, EndpointOptions.AllAttributes, ct); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { @@ -62,51 +68,56 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) } } - public async Task DeleteMessageAsync(Message message, CancellationToken ct) + public async Task>> PullMessagesAsync(CancellationToken ct) { - using var span = Tracer.StartActivity(); + using var activity = SqsActivitySource.StartReceiving(this); - var response = await _sqs.DeleteMessageAsync(QueueUrl, message.ReceiptHandle, ct); + var attributeNames = EndpointOptions.AllAttributes; // Make configurable, later + var messageAttributeNames = EndpointOptions.AllMessageAttributes; // Make configurable, later - // TODO Log failures?.. - } + // AWS SDK handles network failures, see + // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html + var response = await _sqs.ReceiveMessageAsync(new ReceiveMessageRequest + { + QueueUrl = QueueUrl, + WaitTimeSeconds = _options.WaitTimeSeconds, + MaxNumberOfMessages = _options.MaxNumberOfMessages, + AttributeNames = attributeNames, + MessageAttributeNames = messageAttributeNames, + }, ct); - public async Task> PullMessagesAsync(CancellationToken ct) - { - using var span = Tracer.StartActivity(); + activity?.SetTagsFor(response); - var attributeNames = Options.AllAttributes; // TODO Configurable - var messageAttributeNames = Options.AllMessageAttributes; // TODO Configurable + return response.Messages.Select(message => new ConsumeContext(this, message, message.Body)); + + // TODO Log failures?.. - try - { - // AWS SDK handles network failures, see - // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html - var response = await _sqs.ReceiveMessageAsync(new ReceiveMessageRequest - { - QueueUrl = QueueUrl, - WaitTimeSeconds = _options.WaitTimeSeconds, - MaxNumberOfMessages = _options.MaxNumberOfMessages, - AttributeNames = attributeNames, - MessageAttributeNames = messageAttributeNames, - }, ct); - - // TODO Add number of received messages to the diagnostics span - return response.Messages - .Select(message => new ConsumeContext(_options.QueueName, QueueUrl, message)).ToArray(); - } // catch (OverLimitException) // { -// // TODO Handle +// // Log and try again?.. // } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; - } - catch (Exception) - { - // Just bubble up, so the supervisor can report the error and the whole app can be restarted (Kubernetes) - throw; - } + } + + public async Task DeleteMessageAsync(ConsumeContext context) + { + using var activity = SqsActivitySource.StartSettling(context); + await _sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle); + + // TODO Log failures?.. + } + + public async Task DeleteMessagesAsync(BatchConsumeContext context) + { + using var activity = SqsActivitySource.StartSettling(context); + + var requests = context.Messages + .Select((message, i) => new DeleteMessageBatchRequestEntry(i.ToString(), message.ReceiptHandle)) + .Chunk(10) + .Select(entries => entries.ToList()); + + await Task.WhenAll(requests.Select(entries => + _sqs.DeleteMessageBatchAsync(QueueUrl, entries))); + + // TODO Log failures?.. } } diff --git a/src/LocalPost.SqsConsumer/README.md b/src/LocalPost.SqsConsumer/README.md new file mode 100644 index 0000000..8ff15e3 --- /dev/null +++ b/src/LocalPost.SqsConsumer/README.md @@ -0,0 +1 @@ +# LocalPost SQS Consumer diff --git a/src/LocalPost.SqsConsumer/SqsActivitySource.cs b/src/LocalPost.SqsConsumer/SqsActivitySource.cs new file mode 100644 index 0000000..76ea0ce --- /dev/null +++ b/src/LocalPost.SqsConsumer/SqsActivitySource.cs @@ -0,0 +1,148 @@ +using System.Diagnostics; +using System.Reflection; +using Amazon.SQS.Model; + +namespace LocalPost.SqsConsumer; + +internal static class MessageUtils +{ + public static void ExtractTraceField(object? carrier, string fieldName, + out string? fieldValue, out IEnumerable? fieldValues) + { + fieldValues = default; + fieldValue = null; + if (carrier is not Message message) + return; + + fieldValue = message.MessageAttributes.TryGetValue(fieldName, out var attribute) + ? attribute.StringValue + : null; + } +} + +internal static class SqsActivityExtensions +{ + public static void AcceptDistributedTracingFrom(this Activity activity, Message message) + { + var propagator = DistributedContextPropagator.Current; + propagator.ExtractTraceIdAndState(message, MessageUtils.ExtractTraceField, + out var traceParent, out var traceState); + + if (string.IsNullOrEmpty(traceParent)) + return; + activity.SetParentId(traceParent!); + if (!string.IsNullOrEmpty(traceState)) + activity.TraceStateString = traceState; + + var baggage = propagator.ExtractBaggage(message, MessageUtils.ExtractTraceField); + if (baggage is null) + return; + foreach (var baggageItem in baggage) + activity.AddBaggage(baggageItem.Key, baggageItem.Value); + } + + public static void SetDefaultTags(this Activity? activity, QueueClient client) + { + // See https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/#messaging-attributes + activity?.SetTag("messaging.system", "sqs"); + + activity?.SetTag("messaging.destination.name", client.QueueName); + + // activity?.SetTag("messaging.client_id", "service_name"); + // activity?.SetTag("server.address", client.QueueUrl.Host); + // activity?.SetTag("server.port", client.QueueUrl.Port); + } + + public static Activity? SetTagsFor(this Activity? activity, ConsumeContext context) => + activity?.SetTag("messaging.message.id", context.MessageId); + + public static Activity? SetTagsFor(this Activity? activity, BatchConsumeContext context) => + activity?.SetTag("messaging.batch.message_count", context.Messages.Count); + + public static Activity? SetTagsFor(this Activity? activity, ReceiveMessageResponse response) => + activity?.SetTag("messaging.batch.message_count", response.Messages.Count); +} + +// Npgsql as an inspiration: +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 +// Also OTEL semantic convention: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ +internal static class SqsActivitySource +{ + private static readonly System.Diagnostics.ActivitySource Source; + + public static bool IsEnabled => Source.HasListeners(); + + static SqsActivitySource() + { + // See https://stackoverflow.com/a/909583/322079 + var assembly = Assembly.GetExecutingAssembly(); + var version = AssemblyName.GetAssemblyName(assembly.Location).Version; + Source = new System.Diagnostics.ActivitySource(assembly.FullName, version.ToString()); + } + + public static Activity? StartProcessing(ConsumeContext context) + { + var activity = Source.CreateActivity($"{context.Client.QueueName} process", ActivityKind.Consumer); + if (activity is { IsAllDataRequested: true }) + { + activity.SetDefaultTags(context.Client); + activity.SetTagsFor(context); + activity.AcceptDistributedTracingFrom(context.Message); + } + + activity?.Start(); + + return activity; + } + + + public static Activity? StartProcessing(BatchConsumeContext context) + { + var activity = Source.StartActivity($"{context.Client.QueueName} process", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetDefaultTags(context.Client); + activity.SetTagsFor(context); + + // TODO Accept distributed tracing headers, per each message... + + return activity; + } + + public static Activity? StartSettling(ConsumeContext context) + { + var activity = Source.StartActivity($"{context.Client.QueueName} settle", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetDefaultTags(context.Client); + activity.SetTag("messaging.message.id", context.MessageId); + + return activity; + } + + public static Activity? StartSettling(BatchConsumeContext context) + { + var activity = Source.StartActivity($"{context.Client.QueueName} settle", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetDefaultTags(context.Client); + activity.SetTag("messaging.batch.message_count", context.Messages.Count); + + return activity; + } + + public static Activity? StartReceiving(QueueClient client) + { + var activity = Source.StartActivity($"{client.QueueName} receive", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetDefaultTags(client); + + return activity; + } +} diff --git a/src/LocalPost.SqsConsumer/SqsConsumerService.cs b/src/LocalPost.SqsConsumer/SqsConsumerService.cs index 7ddd4f0..504c505 100644 --- a/src/LocalPost.SqsConsumer/SqsConsumerService.cs +++ b/src/LocalPost.SqsConsumer/SqsConsumerService.cs @@ -1,82 +1,83 @@ -using System.Collections.Immutable; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; -using Microsoft.Extensions.Hosting; namespace LocalPost.SqsConsumer; -internal sealed class SqsConsumerService : INamedService -{ - public static SqsConsumerService Create(IServiceProvider provider, string name, - MiddlewareStack handlerStack) - { - var options = provider.GetOptions(name); - - var client = ActivatorUtilities.CreateInstance(provider, options); - var messageSource = new MessageSource(client); - var reader = new BackgroundServiceSupervisor(messageSource); - - HandlerFactory handlerFactory = handlerStack.Resolve; - Handler handler = ActivatorUtilities.CreateInstance>(provider, - name, handlerFactory).InvokeAsync; - - var consumer = new BackgroundQueue.Consumer(messageSource, handler); - var consumerGroup = new ConsumerGroup(consumer.Run, options.MaxConcurrency); - - return new SqsConsumerService(name, reader, consumerGroup); - } - - public SqsConsumerService(string name, IBackgroundServiceSupervisor reader, - IBackgroundServiceSupervisor consumerGroup) - { - Name = name; - - Reader = reader; - _readerReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); - _readerLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); - - ConsumerGroup = consumerGroup; - _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); - _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); - } - - public string Name { get; } - - // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services - // synchronously by default, so if consumers are stopped first, they will block the reader from completing the - // channel). -// public IHostedService Supervisor { get; } - - public IConcurrentHostedService Reader { get; } - private readonly IHealthCheck _readerReadinessCheck; - private readonly IHealthCheck _readerLivenessCheck; - - public IConcurrentHostedService ConsumerGroup { get; } - private readonly IHealthCheck _consumerGroupReadinessCheck; - private readonly IHealthCheck _consumerGroupLivenessCheck; - - public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService(name)._readerReadinessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService(name)._readerLivenessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration ConsumerGroupReadinessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService(name)._consumerGroupReadinessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration ConsumerGroupLivenessCheck(string name, HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(name, - provider => provider.GetRequiredService(name)._consumerGroupLivenessCheck, - failureStatus, - tags); -} +// TODO Remove +// internal sealed class SqsConsumerService : INamedService +// { +// public static SqsConsumerService Create(IServiceProvider provider, string name, +// Action> configure) +// { +// var options = provider.GetOptions(name); +// +// var client = ActivatorUtilities.CreateInstance(provider, options); +// var messageSource = new MessageSource(client, options.Prefetch); +// var reader = new BackgroundServiceSupervisor(messageSource); +// +// var middlewares = new HandlerStackBuilder(); +// middlewares.Append(); +// configure(middlewares); +// +// var handler = ScopedHandlerFactory.Wrap(middlewares.Build())(provider); +// +// var consumer = BackgroundQueue.ConsumerFor(messageSource, handler); +// var consumerGroup = BackgroundQueue.ConsumerGroupSupervisorFor(consumer, options.MaxConcurrency); +// +// return new SqsConsumerService(name, reader, consumerGroup); +// } +// +// private SqsConsumerService(string name, IBackgroundServiceSupervisor reader, +// IBackgroundServiceSupervisor consumerGroup) +// { +// Name = name; +// +// Reader = reader; +// _readerReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); +// _readerLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); +// +// ConsumerGroup = consumerGroup; +// _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); +// _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); +// } +// +// public string Name { get; } +// +// // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services +// // synchronously by default, so if consumers are stopped first, they will block the reader from completing the +// // channel). +// // public IHostedService Supervisor { get; } +// +// public IConcurrentHostedService Reader { get; } +// private readonly IHealthCheck _readerReadinessCheck; +// private readonly IHealthCheck _readerLivenessCheck; +// +// public IConcurrentHostedService ConsumerGroup { get; } +// private readonly IHealthCheck _consumerGroupReadinessCheck; +// private readonly IHealthCheck _consumerGroupLivenessCheck; +// +// public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, +// IEnumerable? tags = default) => new(name, +// provider => provider.GetRequiredService(name)._readerReadinessCheck, +// failureStatus, +// tags); +// +// public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, +// IEnumerable? tags = default) => new(name, +// provider => provider.GetRequiredService(name)._readerLivenessCheck, +// failureStatus, +// tags); +// +// public static HealthCheckRegistration ConsumerGroupReadinessCheck(string name, HealthStatus? failureStatus = default, +// IEnumerable? tags = default) => new(name, +// provider => provider.GetRequiredService(name)._consumerGroupReadinessCheck, +// failureStatus, +// tags); +// +// public static HealthCheckRegistration ConsumerGroupLivenessCheck(string name, HealthStatus? failureStatus = default, +// IEnumerable? tags = default) => new(name, +// provider => provider.GetRequiredService(name)._consumerGroupLivenessCheck, +// failureStatus, +// tags); +// } diff --git a/src/LocalPost/ActivityEx.cs b/src/LocalPost/ActivityEx.cs new file mode 100644 index 0000000..933ce2e --- /dev/null +++ b/src/LocalPost/ActivityEx.cs @@ -0,0 +1,27 @@ +using System.Diagnostics; + +namespace LocalPost; + +internal static class ActivityEx +{ + // See https://github.com/open-telemetry/opentelemetry-dotnet/blob/core-1.8.1/src/OpenTelemetry.Api/Trace/ActivityExtensions.cs#L81-L105 + public static Activity? Error(this Activity? activity, Exception ex, bool escaped = true) + { + var tags = new ActivityTagsCollection + { + { "exception.type", ex.GetType().FullName }, + { "exception.message", ex.Message }, + { "exception.stacktrace", ex.ToString() }, + { "exception.escaped", escaped } + }; + activity?.AddEvent(new ActivityEvent("exception", tags: tags)); + + activity?.SetTag("otel.status_code", "ERROR"); + // activity.SetTag("otel.status_description", ex is PostgresException pgEx ? pgEx.SqlState : ex.Message); + activity?.SetTag("otel.status_description", ex.Message); + + return activity; + } + + public static Activity? Success(this Activity? activity) => activity?.SetTag("otel.status_code", "OK"); +} diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs index 1459ede..ec0d6b1 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs @@ -2,6 +2,10 @@ namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumerableEx { + // TODO Better name... + public static ConcurrentAsyncEnumerable ToConcurrent(this IAsyncEnumerable source, int bufferMaxSize = 1) => + new(source, bufferMaxSize); + public static IAsyncEnumerable Batch(this IAsyncEnumerable source, BatchBuilderFactory factory) => new BatchingAsyncEnumerable(source, factory); diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs new file mode 100644 index 0000000..f2b945f --- /dev/null +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs @@ -0,0 +1,30 @@ +using Nito.AsyncEx; + +namespace LocalPost.AsyncEnumerable; + +internal static class AsyncEnumeratorEx +{ + public static async ValueTask Consume(this IAsyncEnumerator source, CancellationToken ct = default) + { + var waitTrigger = source.MoveNextAsync(); + var completed = waitTrigger.IsCompleted switch + { + true => await waitTrigger, + _ => await waitTrigger.AsTask().WaitAsync(ct) + }; + + if (completed) + // Ideally there should be a better way to communicate the completion... + throw new EndOfEnumeratorException("Source is empty"); + + return source.Current; + } +} + +internal sealed class EndOfEnumeratorException : Exception +{ + public EndOfEnumeratorException(string message) : base(message) + { + } +} + diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index 85880e1..d850fc2 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -1,70 +1,149 @@ +using JetBrains.Annotations; using Nito.AsyncEx; namespace LocalPost.AsyncEnumerable; -internal delegate IBatchBuilder BatchBuilderFactory(); +[PublicAPI] +public readonly record struct BatchSize // TODO Rename to Size?.. +{ + public static implicit operator int(BatchSize batchSize) => batchSize.Value; + + public static implicit operator BatchSize(int batchSize) => new(batchSize); + + public int Value { get; } + + public BatchSize(int value) + { + if (value <= 0) + throw new ArgumentOutOfRangeException(nameof(value), value, "Batch size must be positive."); + + Value = value; + } + + public void Deconstruct(out int value) => value = Value; +} + +internal delegate IBatchBuilder BatchBuilderFactory(CancellationToken ct = default); internal interface IBatchBuilder : IDisposable { CancellationToken TimeWindow { get; } + bool TimeWindowClosed { get; } Task TimeWindowTrigger { get; } bool IsEmpty { get; } + bool Full { get; } bool TryAdd(T entry); TBatch Build(); + void Reset(); + TBatch Flush(); } -internal abstract class BatchBuilder : IBatchBuilder +internal abstract class BatchBuilderBase : IBatchBuilder { - private readonly CancellationTokenSource _timeWindow; - private readonly CancellationTokenTaskSource _timeWindowTrigger; + private readonly TimeSpan _timeWindowLength; + private readonly CancellationToken _ct; - protected BatchBuilder(TimeSpan timeWindow) + private CancellationTokenSource _timeWindow; + private CancellationTokenTaskSource? _timeWindowTrigger; + + protected BatchBuilderBase(TimeSpan timeWindow, CancellationToken ct = default) { - _timeWindow = new CancellationTokenSource(timeWindow); - _timeWindowTrigger = new CancellationTokenTaskSource(_timeWindow.Token); + _timeWindowLength = timeWindow; + _ct = ct; // TODO Rename to globalCancellation or something like that + + _timeWindow = StartTimeWindow(); } public CancellationToken TimeWindow => _timeWindow.Token; - public Task TimeWindowTrigger => _timeWindowTrigger.Task; + public bool TimeWindowClosed => TimeWindow.IsCancellationRequested; + public Task TimeWindowTrigger => + (_timeWindowTrigger ??= new CancellationTokenTaskSource(_timeWindow.Token)).Task; + public abstract bool IsEmpty { get; } + public abstract bool Full { get; } public abstract bool TryAdd(T entry); + public abstract TBatch Build(); + private CancellationTokenSource StartTimeWindow() + { + if (_ct == CancellationToken.None) + return new CancellationTokenSource(_timeWindowLength); + + var timeWindow = CancellationTokenSource.CreateLinkedTokenSource(_ct); + timeWindow.CancelAfter(_timeWindowLength); + + return timeWindow; + } + + // Should be overwritten in derived classes, to reset their state also + public virtual void Reset() + { + _timeWindow.Cancel(); + _timeWindow.Dispose(); + _timeWindow = StartTimeWindow(); + + _timeWindowTrigger?.Dispose(); + _timeWindowTrigger = null; + } + + public TBatch Flush() + { + var batch = Build(); + Reset(); + return batch; + } + public void Dispose() { _timeWindow.Dispose(); - _timeWindowTrigger.Dispose(); + _timeWindowTrigger?.Dispose(); } } -internal sealed class BoundedBatchBuilder : BatchBuilder> +internal abstract class BoundedBatchBuilderBase : BatchBuilderBase { - public static BatchBuilderFactory> Factory(int maxSize, int timeWindow) => - () => new BoundedBatchBuilder(maxSize, TimeSpan.FromMilliseconds(timeWindow)); - - private readonly int _max; - private readonly List _batch = new(); + private readonly BatchSize _batchMaxSizeSize; + protected List Batch; - public BoundedBatchBuilder(int max, TimeSpan timeWindow) : base(timeWindow) + protected BoundedBatchBuilderBase(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : + base(timeWindow, ct) { - _max = max; + _batchMaxSizeSize = batchMaxSizeSize; + Batch = new List(_batchMaxSizeSize); } - public override bool IsEmpty => _batch.Count == 0; + public override bool IsEmpty => Batch.Count == 0; + + public override bool Full => Batch.Count >= _batchMaxSizeSize; public override bool TryAdd(T entry) { - if (_batch.Count >= _max) + if (Full) return false; - _batch.Add(entry); + Batch.Add(entry); return true; } - public override IReadOnlyList Build() => _batch; + public override void Reset() + { + base.Reset(); + Batch = new List(_batchMaxSizeSize); + } +} + +internal sealed class BoundedBatchBuilder : BoundedBatchBuilderBase> +{ + public BoundedBatchBuilder(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : + base(batchMaxSizeSize, timeWindow, ct) + { + } + + public override IReadOnlyList Build() => Batch; } diff --git a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs index da98eb7..5cb7d80 100644 --- a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs @@ -12,85 +12,115 @@ public BatchingAsyncEnumerable(IAsyncEnumerable source, BatchBuilderFactory GetAsyncEnumerator_old(CancellationToken ct = default) +// { +// // FIXME To static builder... +// using var batchBuilder = _factory(ct); +// +// var source = _reader.GetAsyncEnumerator(ct); +// var completed = false; +// var waitTrigger = source.MoveNextAsync(); +// Task? waitTask = null; +// while (!completed && !ct.IsCancellationRequested) +// { +// var shift = false; +// try +// { +// if (waitTask is null && waitTrigger.IsCompleted) +// completed = !await waitTrigger; +// else +// { +// waitTask ??= waitTrigger.AsTask(); +// // To save some allocations?.. +//// completed = !await (await Task.WhenAny(waitTask, batchBuilder.TimeWindowTrigger)); +// completed = !await waitTask.WaitAsync(batchBuilder.TimeWindow); +// waitTask = null; +// } +// +// if (completed) +// continue; +// } +// catch (OperationCanceledException e) when (e.CancellationToken == batchBuilder.TimeWindow) +// { +// shift = true; +// } +// catch (OperationCanceledException) // User (global) cancellation +// { +// continue; +// } +// +// if (shift) +// { // C# doesn't allow "yield return" in a try/catch block... +// if (!batchBuilder.IsEmpty) +// yield return batchBuilder.Flush(); +// +// continue; +// } +// +// if (!batchBuilder.TryAdd(source.Current)) +// if (!batchBuilder.IsEmpty) +// { +// // Flush the current buffer and start a fresh one +// yield return batchBuilder.Flush(); +// if (!batchBuilder.TryAdd(source.Current)) +// HandleSkipped(source.Current); // Even an empty batch cannot fit it... +// } +// else +// HandleSkipped(source.Current); // Even an empty buffer cannot fit it... +// +// waitTrigger = source.MoveNextAsync(); +// } +// +// // Flush on completion or error... +// if (!batchBuilder.IsEmpty) +// yield return batchBuilder.Flush(); +// +// ct.ThrowIfCancellationRequested(); +// } public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) { - var batch = _factory(); - IBatchBuilder ShiftBatch() + await using var source = _reader.GetAsyncEnumerator(ct); + using var batchBuilder = _factory(ct); + while (!ct.IsCancellationRequested) { - batch.Dispose(); - return batch = _factory(); - } - - try - { - var source = _reader.GetAsyncEnumerator(ct); - var completed = false; - var waitTrigger = source.MoveNextAsync(); - Task? waitTask = null; - while (!completed) + TOut completedBatch; + try { - var shift = false; - try - { - if (waitTask is null && waitTrigger.IsCompleted) - completed = !await waitTrigger; - else - { - waitTask ??= waitTrigger.AsTask(); - // The same as Task.WaitAsync(batch.TimeWindow), but saves some allocations - completed = !await (await Task.WhenAny(waitTask, batch.TimeWindowTrigger)); - waitTask = null; - } - - if (completed) - continue; - } - catch (OperationCanceledException e) when (e.CancellationToken == batch.TimeWindow) + var consumeResult = await source.Consume(ct); // FIXME batchBuilder.TimeWindow + var added = batchBuilder.TryAdd(consumeResult); + if (!added) { - shift = true; + completedBatch = batchBuilder.Flush(); + batchBuilder.TryAdd(consumeResult); // TODO If a message does not fit in an empty batch... } - catch (OperationCanceledException) // User cancellation + else { - completed = true; - continue; + if (batchBuilder.Full) + completedBatch = batchBuilder.Flush(); + else + continue; } - - if (shift) - { - if (!batch.IsEmpty) - yield return batch.Build(); - - ShiftBatch(); + } + catch (EndOfEnumeratorException) + { + break; + } + // Batch time window is closed, or the cancellation token is triggered + catch (OperationCanceledException) + { + if (batchBuilder.IsEmpty) continue; - } - if (!batch.TryAdd(source.Current)) - if (!batch.IsEmpty) - { - // Flush the current buffer and start a fresh one - yield return batch.Build(); - if (!ShiftBatch().TryAdd(source.Current)) - HandleSkipped(source.Current); // Even an empty buffer cannot fit it... - } - else - HandleSkipped(source.Current); // Even an empty buffer cannot fit it... - - waitTrigger = source.MoveNextAsync(); + completedBatch = batchBuilder.Flush(); } - // Flush on completion or error... - if (!batch.IsEmpty) - yield return batch.Build(); - - ct.ThrowIfCancellationRequested(); - } - finally - { - batch.Dispose(); + yield return completedBatch; } + + if (!batchBuilder.IsEmpty) + yield return batchBuilder.Flush(); + + ct.ThrowIfCancellationRequested(); } } diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs new file mode 100644 index 0000000..ce3912a --- /dev/null +++ b/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs @@ -0,0 +1,43 @@ +using System.Threading.Channels; + +namespace LocalPost.AsyncEnumerable; + +internal sealed class ConcurrentAsyncEnumerable : IAsyncEnumerable +{ + private readonly IAsyncEnumerable _reader; + private readonly Channel _buffer; + + public ConcurrentAsyncEnumerable(IAsyncEnumerable source, int bufferMaxSize = 1) + { + _reader = source; + _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) + { + SingleReader = false, + SingleWriter = true, + FullMode = BoundedChannelFullMode.Wait, + }); + } + + public async Task Run(CancellationToken ct) + { + var buffer = _buffer.Writer; + try + { + await foreach (var item in _reader.WithCancellation(ct)) + await buffer.WriteAsync(item, ct); + } + finally + { + buffer.Complete(); + } + } + + public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) + { + var buffer = _buffer.Reader; + // Like ReadAllAsync() from netstandard2.1 + while (await buffer.WaitToReadAsync(ct).ConfigureAwait(false)) + while (buffer.TryRead(out var item)) + yield return item; + } +} diff --git a/src/LocalPost/BackgroundJobQueue.cs b/src/LocalPost/BackgroundJobQueue.cs index d59db53..30f0073 100644 --- a/src/LocalPost/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundJobQueue.cs @@ -1,3 +1,5 @@ +using JetBrains.Annotations; + namespace LocalPost; public delegate Task Job(CancellationToken ct); @@ -9,11 +11,12 @@ public interface IBackgroundJobQueue : IBackgroundQueue { } +[UsedImplicitly] internal sealed class BackgroundJobQueue : IBackgroundJobQueue { - private readonly BackgroundQueue _queue; + private readonly BackgroundQueue _queue; - public BackgroundJobQueue(BackgroundQueue queue) + public BackgroundJobQueue(BackgroundQueue queue) { _queue = queue; } diff --git a/src/LocalPost/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue.cs index 5d885bd..d78375c 100644 --- a/src/LocalPost/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue.cs @@ -1,100 +1,116 @@ using System.Threading.Channels; -using Microsoft.Extensions.Options; +using JetBrains.Annotations; +using LocalPost.AsyncEnumerable; namespace LocalPost; -public partial interface IBackgroundQueue +[PublicAPI] +public interface IBackgroundQueue { // TODO Custom exception when closed?.. Or just return true/false?.. ValueTask Enqueue(T item, CancellationToken ct = default); } -// TODO Open to public later -internal interface IBackgroundQueueManager +internal static partial class BackgroundQueue { - // Implement later for a better health check -// bool IsFull { get; } - - bool IsClosed { get; } - - ValueTask CompleteAsync(CancellationToken ct = default); -} - -public interface IHandler -{ - Task InvokeAsync(TOut payload, CancellationToken ct); -} - -public interface IMiddleware -{ - Handler Invoke(Handler next); -} - -public delegate Task Handler(T context, CancellationToken ct); - -public delegate Handler HandlerFactory(IServiceProvider provider); - -public delegate Handler Middleware(Handler next); -//public delegate Task Middleware(T context, Handler next, CancellationToken ct); - -public delegate Middleware MiddlewareFactory(IServiceProvider provider); - - - -internal sealed partial class BackgroundQueue : IBackgroundQueue, IBackgroundQueueManager, IAsyncEnumerable -{ - private readonly TimeSpan _completionTimeout; - - public BackgroundQueue(BackgroundQueueOptions options) : this( - options.MaxSize switch + public static BackgroundQueue Create(BackgroundQueueOptions options) => + Create(options, reader => reader.ReadAllAsync()); + + public static BackgroundQueue> CreateBatched(BatchedBackgroundQueueOptions options) => + Create>(options, + reader => reader + .ReadAllAsync() + .Batch(ct => new BoundedBatchBuilder(options.BatchMaxSize, options.BatchTimeWindow, ct)), + true); + + // To make the pipeline linear (single consumer), just add .ToConcurrent() to the end + public static BackgroundQueue Create(BackgroundQueueOptions options, + Func, IAsyncEnumerable> configure, + bool proxy = false) // TODO Rename this parameter somehow... + { + var channel = options.MaxSize switch { not null => Channel.CreateBounded(new BoundedChannelOptions(options.MaxSize.Value) { - SingleReader = options.MaxConcurrency == 1, - SingleWriter = false, + SingleReader = proxy || options.MaxConcurrency == 1, + SingleWriter = false, // We do not know how it will be used + FullMode = options.FullMode, }), _ => Channel.CreateUnbounded(new UnboundedChannelOptions { - SingleReader = options.MaxConcurrency == 1, - SingleWriter = false, + SingleReader = proxy || options.MaxConcurrency == 1, + SingleWriter = false, // We do not know how it will be used }) - }, - TimeSpan.FromMilliseconds(options.CompletionTimeout ?? 0)) - { - } + }; - public BackgroundQueue(Channel messages, TimeSpan completionTimeout) - { - _completionTimeout = completionTimeout; - Messages = messages; + var pipeline = configure(channel.Reader); + if (proxy) + pipeline = pipeline.ToConcurrent(); + + return new BackgroundQueue(channel, pipeline, + TimeSpan.FromMilliseconds(options.CompletionTimeout ?? 0)); } +} - internal Channel Messages { get; } +internal static partial class BackgroundQueue +{ + public static readonly string Name = "BackgroundQueue/" + Reflection.FriendlyNameOf(); +} - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) +internal sealed class BackgroundQueue : IBackgroundQueue, IAsyncEnumerable, + IBackgroundService +{ + private readonly TimeSpan _completionTimeout; + private readonly ChannelWriter _messages; + private readonly IAsyncEnumerable _pipeline; + + public BackgroundQueue(ChannelWriter input, IAsyncEnumerable pipeline, TimeSpan completionTimeout) { - // Track full or not later - while (await Messages.Reader.WaitToReadAsync(ct)) - while (Messages.Reader.TryRead(out var item)) - yield return item; + _completionTimeout = completionTimeout; + _messages = input; + _pipeline = pipeline; } - public static implicit operator ChannelReader(BackgroundQueue that) => that.Messages.Reader; + public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => _pipeline.GetAsyncEnumerator(ct); - public static implicit operator ChannelWriter(BackgroundQueue that) => that.Messages.Writer; + // Track full or not later + public ValueTask Enqueue(T item, CancellationToken ct = default) => _messages.WriteAsync(item, ct); - public ValueTask Enqueue(T item, CancellationToken ct = default) => Messages.Writer.WriteAsync(item, ct); + public bool IsClosed { get; private set; } // TODO Use - public bool IsClosed { get; private set; } - - public async ValueTask CompleteAsync(CancellationToken ct = default) + private async ValueTask CompleteAsync(CancellationToken ct = default) { if (IsClosed) return; await Task.Delay(_completionTimeout, ct); - Messages.Writer.Complete(); // TODO Handle exceptions + _messages.Complete(); IsClosed = true; } + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + + public Task ExecuteAsync(CancellationToken ct) => _pipeline switch + { + ConcurrentAsyncEnumerable concurrent => concurrent.Run(ct), + _ => Task.CompletedTask + }; + + public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); } + + + + + +//// Open to public later?.. +//internal interface IBackgroundQueueManager +//{ +// // Implement later for a better health check?.. +//// bool IsFull { get; } +// +// bool IsClosed { get; } +// +// ValueTask CompleteAsync(CancellationToken ct = default); +//} diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 351e17e..207c1d9 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -1,186 +1,116 @@ -using System.Collections.Immutable; -using System.Threading.Channels; -using LocalPost.AsyncEnumerable; +using LocalPost.DependencyInjection; namespace LocalPost; -internal sealed class ConsumerSupervisor : IBackgroundServiceSupervisor -{ - private CancellationTokenSource? _executionCts; - private Task? _execution; - - private readonly Func _consumer; +// public async Task StopAsync(CancellationToken forceExitToken) +// { +// // Do not cancel the execution immediately, as it will finish gracefully itself (when the channel is closed) +// +// // TODO .NET 6 async... +// using var linked = forceExitToken.Register(() => _executionCts?.Cancel()); +// +// if (_execution is not null) +// await _execution; +// } - public ConsumerSupervisor(Func consumer) - { - _consumer = consumer; - } - public bool Started => _executionCts is not null && _execution is not null; - public bool Running => _execution is not null && !_execution.IsCompleted; +internal static partial class BackgroundQueue +{ +// public static BackgroundQueue.Consumer ConsumerFor(IAsyncEnumerable reader, Handler handler) => +// new(reader, handler); - public bool Crashed => Exception is not null; +// public static IBackgroundServiceSupervisor ConsumerSupervisorFor(BackgroundQueue.Consumer consumer) => +// new BackgroundServiceSupervisor(consumer); - public Exception? Exception { get; private set; } + public static ConsumerGroup ConsumerGroupFor(TQ queue, Handler handler, int maxConcurrency) + where TQ : IAsyncEnumerable => new(Consumer.Loop(queue, handler), maxConcurrency); - public Task StartAsync(CancellationToken ct) - { - if (_executionCts is not null) - throw new InvalidOperationException("Execution has been already started"); + public static ConsumerGroup ConsumerGroupOverDisposablesFor(TQ queue, Handler handler, int maxConcurrency) + where TQ : IAsyncEnumerable + where T : IDisposable => new(Consumer.LoopOverDisposables(queue, handler), maxConcurrency); - _executionCts = new CancellationTokenSource(); - _execution = ExecuteAsync(_executionCts.Token); + public static NamedConsumerGroup ConsumerGroupForNamed(TQ queue, Handler handler, int maxConcurrency) + where TQ : IAsyncEnumerable, INamedService => new(queue, Consumer.Loop(queue, handler), maxConcurrency); - return Task.CompletedTask; - } + public static NamedConsumerGroup ConsumerGroupOverDisposablesForNamed(TQ queue, Handler handler, int maxConcurrency) + where TQ : IAsyncEnumerable, INamedService + where T : IDisposable => new(queue, Consumer.Loop(queue, handler), maxConcurrency); - private async Task ExecuteAsync(CancellationToken ct) + internal class NamedConsumerGroup : ConsumerGroupBase, INamedService + where TQ : IAsyncEnumerable, INamedService { - if (ct.IsCancellationRequested) - return; - - try - { - await _consumer(ct); - - // Completed - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) + public NamedConsumerGroup(TQ queue, Func loop, int maxConcurrency) : + base(loop, maxConcurrency) { - // Completed gracefully on request + Name = queue.Name; } - catch (Exception e) - { - Exception = e; - } - } - public async Task StopAsync(CancellationToken forceExitToken) - { - // Do not cancel the execution immediately, as it will finish gracefully itself (when the channel is closed) - - // TODO .NET 6 async... - using var linked = forceExitToken.Register(() => _executionCts?.Cancel()); - - if (_execution is not null) - await _execution; + public string Name { get; } } - public void Dispose() + // Parametrized class, to be used with the Dependency Injection container + internal class ConsumerGroup : ConsumerGroupBase where TQ : IAsyncEnumerable { - _executionCts?.Dispose(); - _execution?.Dispose(); + public ConsumerGroup(Func loop, int maxConcurrency) : base(loop, maxConcurrency) + { + } } -} -// With predefined static size -internal sealed class ConsumerGroup : IBackgroundServiceSupervisor -{ - private readonly ImmutableArray _services; - - public ConsumerGroup(Func consumer, int maxConcurrency) + // Parametrized class, to be used with the Dependency Injection container + internal class ConsumerGroupBase : IBackgroundService { - _services = Enumerable.Range(1, maxConcurrency) - .Select(_ => new ConsumerSupervisor(consumer)) - .ToImmutableArray(); - } + private readonly List _consumers; - public Task StartAsync(CancellationToken ct) => - Task.WhenAll(_services.Select(service => service.StartAsync(ct))); - // TODO Log info + protected ConsumerGroupBase(Func loop, int maxConcurrency) + { + _consumers = Enumerable.Range(1, maxConcurrency) + .Select(_ => new Consumer(loop)) + .ToList(); + } - public Task StopAsync(CancellationToken ct) => - Task.WhenAll(_services.Select(service => service.StopAsync(ct))); - // TODO Log info + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - public bool Started => _services.All(c => c.Started); - public bool Running => _services.All(c => c.Running); - public bool Crashed => _services.Any(c => c.Crashed); - public Exception? Exception => null; // TODO Implement + public Task ExecuteAsync(CancellationToken ct) => + Task.WhenAll(_consumers.Select(c => c.ExecuteAsync(ct))); - public void Dispose() - { - foreach (var disposable in _services) - disposable.Dispose(); + public Task StopAsync(CancellationToken ct) => + Task.WhenAll(_consumers.Select(c => c.StopAsync(ct))); } -} -internal sealed partial class BackgroundQueue -{ internal sealed class Consumer { - private readonly IAsyncEnumerable _reader; - private readonly Handler _handler; - - public Consumer(IAsyncEnumerable reader, Handler handler) - { - _reader = reader; - _handler = handler; - } - - public async Task Run(CancellationToken ct) - { - await foreach (var message in _reader.WithCancellation(ct)) - await _handler(message, ct); - } - } - - internal sealed class BatchConsumer - { - private readonly ChannelReader _source; - private readonly ChannelWriter _destination; - private readonly BatchBuilderFactory _factory; - - private IBatchBuilder _batch; - - public BatchConsumer(ChannelReader source, ChannelWriter destination, BatchBuilderFactory factory) - { - _source = source; - _destination = destination; - _factory = factory; - - _batch = factory(); - } - - public async Task Run(CancellationToken ct) - { - while (true) + public static Func Loop(IAsyncEnumerable queue, Handler handler) => + async (CancellationToken ct) => { - using var cts = CancellationTokenSource.CreateLinkedTokenSource(ct, _batch.TimeWindow); - try - { - if (!await _source.WaitToReadAsync(cts.Token)) - break; + await foreach (var message in queue.WithCancellation(ct)) + await handler(message, ct); + }; - while (_source.TryRead(out var message)) + public static Func LoopOverDisposables + (IAsyncEnumerable queue, Handler handler) where T : IDisposable => + async (CancellationToken ct) => + { + await foreach (var message in queue.WithCancellation(ct)) + try { - if (_batch.TryAdd(message)) continue; - if (_batch.IsEmpty) - throw new Exception("Cannot fit a message in a batch"); - - await ProcessBatch(ct); - - if (!_batch.TryAdd(message)) - throw new Exception("Cannot fit a message in a batch"); + await handler(message, ct); } - } - catch (OperationCanceledException e) when (e.CancellationToken == _batch.TimeWindow) - { - // Just process the current batch - if (!_batch.IsEmpty) - await ProcessBatch(ct); - } - } - - _destination.Complete(); - } + finally + { + message.Dispose(); + } + }; - private async Task ProcessBatch(CancellationToken ct) + private readonly Func _loop; + + public Consumer(Func loop) { - await _destination.WriteAsync(_batch.Build(), ct); - _batch.Dispose(); - _batch = _factory(); + _loop = loop; } + + public Task ExecuteAsync(CancellationToken ct) => _loop(ct); + + public Task StopAsync(CancellationToken ct) => _loop(ct); // Process the rest (leftovers) } } diff --git a/src/LocalPost/BackgroundQueueOptions.cs b/src/LocalPost/BackgroundQueueOptions.cs index 13dcac9..aab061b 100644 --- a/src/LocalPost/BackgroundQueueOptions.cs +++ b/src/LocalPost/BackgroundQueueOptions.cs @@ -3,16 +3,42 @@ namespace LocalPost; +// For the DI container and, to distinguish between different queues +public sealed record BackgroundQueueOptions : BackgroundQueueOptions; + +// For the DI container and, to distinguish between different queues +public sealed record BatchedBackgroundQueueOptions : BatchedBackgroundQueueOptions; + +public record BatchedBackgroundQueueOptions : BackgroundQueueOptions +{ + [Range(1, ushort.MaxValue)] + public ushort BatchMaxSize { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public int BatchTimeWindowMilliseconds { get; set; } = 1_000; + + internal TimeSpan BatchTimeWindow => TimeSpan.FromMilliseconds(BatchTimeWindowMilliseconds); +} + /// /// Background queue configuration. /// public record BackgroundQueueOptions { - // TODO Use + /// + /// How to handle new messages when the queue (channel) is full. Default is to drop the oldest message (to not + /// block the producer). + /// public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; + /// + /// Maximum queue (channel) length, after which writes are blocked. Default is unlimited. + /// public ushort? MaxSize { get; set; } = null; + /// + /// How long to wait before closing the queue (channel) on app shutdown. Default is 1 second. + /// public ushort? CompletionTimeout { get; set; } = 1_000; // Milliseconds /// diff --git a/src/LocalPost/BackgroundQueueService.cs b/src/LocalPost/BackgroundQueueService.cs index d944cf1..90b74bc 100644 --- a/src/LocalPost/BackgroundQueueService.cs +++ b/src/LocalPost/BackgroundQueueService.cs @@ -5,81 +5,76 @@ namespace LocalPost; -internal sealed class BackgroundQueueService -{ - public static readonly string Name = Reflection.FriendlyNameOf(); - - public static BackgroundQueueService Create(IServiceProvider provider, MiddlewareStack handlerStack) - { - var options = provider.GetOptions>(); - - var queue = new BackgroundQueue(options); - - HandlerFactory handlerFactory = handlerStack.Resolve; - Handler handler = ActivatorUtilities.CreateInstance>(provider, - Name, handlerFactory).InvokeAsync; - - var consumer = new BackgroundQueue.Consumer(queue, handler); - var consumerGroup = new ConsumerGroup(consumer.Run, options.MaxConcurrency); - - return new BackgroundQueueService(queue, consumerGroup); - } - - public static BackgroundQueueService CreateBatched(IServiceProvider provider, - MiddlewareStack> handlerStack, - int maxBatchSize = 10, int batchCompletionTimeWindow = 1_000) => CreateBatched(provider, handlerStack, - BoundedBatchBuilder.Factory(maxBatchSize, batchCompletionTimeWindow)); - - public static BackgroundQueueService CreateBatched(IServiceProvider provider, - MiddlewareStack handlerStack, BatchBuilderFactory batchFactory) - { - var options = provider.GetOptions>(); - - var queue = new BackgroundQueue(options); - var batchQueue = new BackgroundQueue(options); - - // Just a single consumer, to do the batching properly - var consumer = new BackgroundQueue.BatchConsumer(queue, batchQueue, batchFactory); - var consumerSupervisor = new ConsumerSupervisor(consumer.Run); - - HandlerFactory handlerFactory = handlerStack.Resolve; - Handler handler = ActivatorUtilities.CreateInstance>(provider, - Name, handlerFactory).InvokeAsync; - var batchConsumer = new BackgroundQueue.Consumer(batchQueue, handler); - var batchConsumerGroup = new ConsumerGroup(batchConsumer.Run, options.MaxConcurrency); - - return new BackgroundQueueService(queue, - new IBackgroundServiceSupervisor.Combined(consumerSupervisor, batchConsumerGroup)); - } - - public BackgroundQueueService(BackgroundQueue queue, IBackgroundServiceSupervisor consumerGroup) - { - Queue = queue; - QueueSupervisor = new BackgroundQueue.Supervisor(queue); - - ConsumerGroup = consumerGroup; - _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); - _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); - } - - public IBackgroundQueue Queue { get; } - - public IConcurrentHostedService QueueSupervisor { get; } - - public IConcurrentHostedService ConsumerGroup { get; } - private readonly IHealthCheck _consumerGroupReadinessCheck; - private readonly IHealthCheck _consumerGroupLivenessCheck; - - public static HealthCheckRegistration ConsumerGroupReadinessCheck(HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(Name, - provider => provider.GetRequiredService>()._consumerGroupReadinessCheck, - failureStatus, - tags); - - public static HealthCheckRegistration ConsumerGroupLivenessCheck(HealthStatus? failureStatus = default, - IEnumerable? tags = default) => new(Name, - provider => provider.GetRequiredService>()._consumerGroupLivenessCheck, - failureStatus, - tags); - -} +//internal sealed class BackgroundQueueService +//{ +// public static readonly string Name = Reflection.FriendlyNameOf(); +// +// public static BackgroundQueueService Create(IServiceProvider provider, HandlerStack handlerStack) +// { +// var options = provider.GetOptions>(); +// +// var queue = new BackgroundQueue(options); +// +// HandlerFactory handlerFactory = handlerStack.Resolve; +// Handler handler = ActivatorUtilities.CreateInstance(provider, +// Name, handlerFactory).InvokeAsync; +// +// var consumer = new BackgroundQueue.Consumer(queue, handler); +// var consumerGroup = BackgroundQueue.ConsumerGroupSupervisorFor(consumer, options.MaxConcurrency); +// +// return new BackgroundQueueService(queue, consumerGroup); +// } +// +// // TODO Use +// public static BackgroundQueueService CreateBatched(IServiceProvider provider, +// HandlerStack handlerStack, BatchBuilderFactory batchFactory) +// { +// var options = provider.GetOptions>(); +// +// var queue = new BackgroundQueue(options); +// var batchQueue = new BackgroundQueue(options); +// +// // Just a single consumer, to do the batching properly +// var consumer = new BackgroundQueue.BatchBuilder(queue, batchQueue, batchFactory); +// var consumerSupervisor = new ConsumerSupervisor(consumer.Run); +// +// HandlerFactory handlerFactory = handlerStack.Resolve; +// Handler handler = ActivatorUtilities.CreateInstance(provider, +// Name, handlerFactory).InvokeAsync; +// var batchConsumer = new BackgroundQueue.Consumer(batchQueue, handler); +// var batchConsumerGroup = BackgroundQueue.ConsumerGroupSupervisorFor(batchConsumer, options.MaxConcurrency); +// +// return new BackgroundQueueService(queue, +// new IBackgroundServiceSupervisor.Combined(consumerSupervisor, batchConsumerGroup)); +// } +// +// private BackgroundQueueService(BackgroundQueue queue, IBackgroundServiceSupervisor consumerGroup) +// { +// Queue = queue; +// QueueSupervisor = new BackgroundQueue.Supervisor(queue); +// +// ConsumerGroup = consumerGroup; +// _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); +// _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); +// } +// +// public IBackgroundQueue Queue { get; } +// +// public IConcurrentHostedService QueueSupervisor { get; } +// +// public IConcurrentHostedService ConsumerGroup { get; } +// private readonly IHealthCheck _consumerGroupReadinessCheck; +// private readonly IHealthCheck _consumerGroupLivenessCheck; +// +// public static HealthCheckRegistration ConsumerGroupReadinessCheck(HealthStatus? failureStatus = default, +// IEnumerable? tags = default) => new(Name, +// provider => provider.GetRequiredService>()._consumerGroupReadinessCheck, +// failureStatus, +// tags); +// +// public static HealthCheckRegistration ConsumerGroupLivenessCheck(HealthStatus? failureStatus = default, +// IEnumerable? tags = default) => new(Name, +// provider => provider.GetRequiredService>()._consumerGroupLivenessCheck, +// failureStatus, +// tags); +//} diff --git a/src/LocalPost/BackgroundQueueSupervisor.cs b/src/LocalPost/BackgroundQueueSupervisor.cs index c08dcfa..5ad9231 100644 --- a/src/LocalPost/BackgroundQueueSupervisor.cs +++ b/src/LocalPost/BackgroundQueueSupervisor.cs @@ -1,20 +1,21 @@ namespace LocalPost; -internal sealed partial class BackgroundQueue -{ - internal sealed class Supervisor : IConcurrentHostedService - { - // Health checks later?.. Like full or not. - - private readonly IBackgroundQueueManager _queue; - - public Supervisor(IBackgroundQueueManager queue) - { - _queue = queue; - } - - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - - public async Task StopAsync(CancellationToken forceExitToken) => await _queue.CompleteAsync(forceExitToken); - } -} +//internal sealed partial class BackgroundQueue +//{ +// internal sealed class Supervisor : IConcurrentHostedService +// { +// // Health checks later?.. Like full or not. +// +// private readonly IBackgroundQueueManager _queue; +// +// public Supervisor(IBackgroundQueueManager queue) +// { +// _queue = queue; +// } +// +// // TODO Run the enumarable... Like Batched. +// public Task StartAsync(CancellationToken ct) => Task.CompletedTask; +// +// public async Task StopAsync(CancellationToken forceExitToken) => await _queue.CompleteAsync(forceExitToken); +// } +//} diff --git a/src/LocalPost/BackgroundService.cs b/src/LocalPost/BackgroundService.cs deleted file mode 100644 index b11aaa0..0000000 --- a/src/LocalPost/BackgroundService.cs +++ /dev/null @@ -1,10 +0,0 @@ -namespace LocalPost; - -internal interface IBackgroundService -{ - Task StartAsync(CancellationToken ct); - - Task ExecuteAsync(CancellationToken ct); - - Task StopAsync(CancellationToken ct); -} diff --git a/src/LocalPost/BackgroundServiceSupervisor.cs b/src/LocalPost/BackgroundServiceSupervisor.cs deleted file mode 100644 index 8b618b8..0000000 --- a/src/LocalPost/BackgroundServiceSupervisor.cs +++ /dev/null @@ -1,187 +0,0 @@ -using System.Collections.Immutable; -using System.Diagnostics.CodeAnalysis; -using Microsoft.Extensions.Diagnostics.HealthChecks; -using Nito.AsyncEx; -using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; - -namespace LocalPost; - -internal interface IBackgroundServiceSupervisor : IConcurrentHostedService, IDisposable -{ - // With predefined static size - // (IAsyncDisposable later?..) - internal sealed record Combined(ImmutableArray Supervisors) : - IBackgroundServiceSupervisor - { - public Combined(IBackgroundServiceSupervisor s1, IBackgroundServiceSupervisor s2) : this(new[] { s1, s2 }) - { - } - - public Combined(IEnumerable supervisors) : this(supervisors.ToImmutableArray()) - { - } - - public Task StartAsync(CancellationToken ct) => - Task.WhenAll(Supervisors.Select(service => service.StartAsync(ct))); - - public Task StopAsync(CancellationToken ct) => - Task.WhenAll(Supervisors.Select(service => service.StopAsync(ct))); - - public bool Started => Supervisors.All(c => c.Started); - public bool Running => Supervisors.All(c => c.Running); - public bool Crashed => Supervisors.Any(c => c.Crashed); - public Exception? Exception => null; // TODO Implement - - public void Dispose() - { - foreach (var disposable in Supervisors) - disposable.Dispose(); - } - } - - public sealed class LivenessCheck : IHealthCheck - { - private readonly IBackgroundServiceSupervisor _supervisor; - - public LivenessCheck(IBackgroundServiceSupervisor supervisor) - { - _supervisor = supervisor; - } - - public Task CheckHealthAsync(HealthCheckContext context, - CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); - - private HealthCheckResult CheckHealth(HealthCheckContext _) - { - if (_supervisor.Crashed) - return Unhealthy("Crashed", _supervisor.Exception); - - if (_supervisor is { Started: true, Running: false }) - return Unhealthy("Not running"); - - // Starting or running - return Healthy("Alive"); - } - } - - public sealed class ReadinessCheck : IHealthCheck - { - private readonly IBackgroundServiceSupervisor _supervisor; - - public ReadinessCheck(IBackgroundServiceSupervisor supervisor) - { - _supervisor = supervisor; - } - - public Task CheckHealthAsync(HealthCheckContext context, - CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); - - private HealthCheckResult CheckHealth(HealthCheckContext context) - { - if (!_supervisor.Started) - return Unhealthy("Has not been started yet", _supervisor.Exception); - - if (_supervisor.Crashed) - return Unhealthy("Crashed", _supervisor.Exception); - - return Healthy("Running or completed"); - } - } - - public bool Started { get; } - - public bool Running { get; } - - [MemberNotNullWhen(true, nameof(Exception))] - public bool Crashed { get; } - - public Exception? Exception { get; } -} - -internal sealed class BackgroundServiceSupervisor : IBackgroundServiceSupervisor -{ - private CancellationTokenSource? _executionCts; - private Task? _execution; - - public BackgroundServiceSupervisor(IBackgroundService service) - { - Service = service; - } - - public IBackgroundService Service { get; } - - // TODO StartedSuccessfully - public bool Started => _executionCts is not null && _execution is not null; - - public bool Running => _execution is not null && !_execution.IsCompleted; - - public bool Crashed => Exception is not null; - - public Exception? Exception { get; private set; } - - public async Task StartAsync(CancellationToken ct) - { - if (_executionCts is not null) - throw new InvalidOperationException("Execution has been already started"); - - _executionCts = new CancellationTokenSource(); - - try - { - await Service.StartAsync(ct); - - // Store the task we're executing - _execution = ExecuteAsync(_executionCts.Token); - } - catch (Exception e) - { - Exception = e; - } - } - - private async Task ExecuteAsync(CancellationToken ct) - { - if (ct.IsCancellationRequested) - return; - - try - { - await Service.ExecuteAsync(ct); - - // Completed - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - // Completed gracefully on request - } - catch (Exception e) - { - Exception = e; - } - } - - public async Task StopAsync(CancellationToken forceExitToken) - { - if (_executionCts is null || _executionCts.IsCancellationRequested) - return; - - // Signal cancellation to the executing method - _executionCts.Cancel(); - - if (_execution is null) - return; - - // Wait until the execution completes or the app is forced to exit - await _execution.WaitAsync(forceExitToken); - - await Service.StopAsync(forceExitToken); - } - - public void Dispose() - { - _executionCts?.Dispose(); - // ReSharper disable once SuspiciousTypeConversion.Global - if (Service is IDisposable disposable) - disposable.Dispose(); - } -} diff --git a/src/LocalPost/ConcurrentHostedServices.cs b/src/LocalPost/ConcurrentHostedServices.cs new file mode 100644 index 0000000..366899b --- /dev/null +++ b/src/LocalPost/ConcurrentHostedServices.cs @@ -0,0 +1,194 @@ +using System.Collections.Immutable; +using System.Diagnostics.CodeAnalysis; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; +using Nito.AsyncEx; +using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; + +namespace LocalPost; + +internal interface IBackgroundService +{ + Task StartAsync(CancellationToken ct); + + Task ExecuteAsync(CancellationToken ct); + + Task StopAsync(CancellationToken ct); +} + +internal interface IBackgroundServiceMonitor +{ + public sealed class LivenessCheck : IHealthCheck + { + public required IBackgroundServiceMonitor Service { get; init; } + + public Task CheckHealthAsync(HealthCheckContext context, + CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); + + private HealthCheckResult CheckHealth(HealthCheckContext _) => Service switch + { + { Crashed: true } => Unhealthy("Crashed", Service.Exception), +// { Running: false } => Degraded("Not (yet) running"), + // Started and running + _ => Healthy("Alive") + }; + } + + // Readiness like "ready to handle requests" is the same a liveness check here. At least at the moment. + public sealed class ReadinessCheck : IHealthCheck + { + public required IBackgroundServiceMonitor Service { get; init; } + + public Task CheckHealthAsync(HealthCheckContext context, + CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); + + private HealthCheckResult CheckHealth(HealthCheckContext _) => Service switch + { + { Running: true } => Healthy("Running"), + _ => Unhealthy("Not (yet) running") + }; + } + + public bool Started { get; } + + public bool Running { get; } + + [MemberNotNullWhen(true, nameof(Exception))] + public bool Crashed { get; } + + public Exception? Exception { get; } +} + +internal class NamedBackgroundServiceRunner : BackgroundServiceRunner, INamedService + where T : class, IBackgroundService, INamedService +{ + public NamedBackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) : base(service, appLifetime) + { + Name = service.Name; + } + + public string Name { get; } +} + +internal class BackgroundServiceRunner : IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable + where T : class, IBackgroundService +{ + private Task? _start; + private CancellationTokenSource? _executionCts; + private Task? _execution; + + private readonly T _service; + private readonly IHostApplicationLifetime _appLifetime; + + public BackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) + { + _service = service; + _appLifetime = appLifetime; + } + + public bool Starting => _start is not null && !_start.IsCompleted; + + // StartedSuccessfully?.. + public bool Started => _start is not null && _start.Status == TaskStatus.RanToCompletion; + + public bool Running => _execution is not null && !_execution.IsCompleted; + + public bool StartCrashed => _start is not null && _start.Status == TaskStatus.Faulted; + public bool RunCrashed => _execution is not null && _execution.Status == TaskStatus.Faulted; + public bool Crashed => StartCrashed || RunCrashed; + + // TODO Test + public Exception? Exception => (StartCrashed ? _start?.Exception : _execution?.Exception)?.InnerException; + + private async Task WaitAppStartAsync(CancellationToken ct) + { + try + { + // Wait until all other services are started + await Task.Delay(Timeout.Infinite, _appLifetime.ApplicationStarted).WaitAsync(ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == _appLifetime.ApplicationStarted) + { + // Startup completed, continue + } + } + + public async Task StartAsync(CancellationToken ct) + { + // All the services are started from the same (main) thread, so there are no races + if (_start is not null) + throw new InvalidOperationException("Service is already started"); + + await (_start = _service.StartAsync(ct)); + + // Start execution in the background... +#pragma warning disable CS4014 + ExecuteAsync(); +#pragma warning restore CS4014 + } + + private async Task ExecuteAsync() + { + _executionCts = new CancellationTokenSource(); + var ct = _executionCts.Token; + + try + { + await WaitAppStartAsync(ct); + await (_execution = _service.ExecuteAsync(ct)); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + // Normal case, we trigger this token ourselves when stopping the service + } + catch (Exception) + { + // Otherwise it's an error, but swallow it silently (this method is called in "fire and forget" mode, not + // awaited, so any unhandled exception will arrive in TaskScheduler.UnobservedTaskException, which is not + // what we want). + // See also: https://stackoverflow.com/a/59300076/322079. + } + } + + public async Task StopAsync(CancellationToken forceExitToken) + { + if (_executionCts is null) + // Or simply ignore and return?.. + throw new InvalidOperationException("Service has not been started"); + + if (!_executionCts.IsCancellationRequested) + _executionCts.Cancel(); // Signal cancellation to the service + + if (_execution is not null) + // Wait until the execution completes or the app is forced to exit + await _execution.WaitAsync(forceExitToken); + + await _service.StopAsync(forceExitToken); + } + + public void Dispose() + { + _executionCts?.Dispose(); + } +} + +internal interface IConcurrentHostedService : IHostedService +{ +} + +internal sealed class ConcurrentHostedServices : IHostedService +{ + private readonly ImmutableArray _services; + + public ConcurrentHostedServices(IEnumerable services) + { + _services = services.ToImmutableArray(); + } + + public Task StartAsync(CancellationToken cancellationToken) => + Task.WhenAll(_services.Select(c => c.StartAsync(cancellationToken))); + + public Task StopAsync(CancellationToken cancellationToken) => + Task.WhenAll(_services.Select(c => c.StopAsync(cancellationToken))); +} diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index 3b7fcc2..dfb1e1b 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -1,15 +1,31 @@ -using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.DependencyInjection; namespace LocalPost.DependencyInjection; -public static class HealthChecks +internal static class HealthChecks { - public static IHealthChecksBuilder AddBackgroundQueueReadinessCheck(this IHealthChecksBuilder builder, - HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(BackgroundQueueService.ConsumerGroupReadinessCheck(failureStatus, tags)); + public static HealthCheckRegistration LivenessCheckFor(HealthStatus? failureStatus = null, + IEnumerable? tags = default) where T : class, IBackgroundService => new( + Reflection.FriendlyNameOf(), // Can be overwritten later + provider => new IBackgroundServiceMonitor.LivenessCheck + { Service = provider.GetRequiredService>() }, + failureStatus, // Can be overwritten later + tags); + + public static HealthCheckRegistration LivenessCheckForNamed(string name, HealthStatus? failureStatus = null, + IEnumerable? tags = default) where T : class, IBackgroundService, INamedService => new( + name, // Can be overwritten later + provider => new IBackgroundServiceMonitor.LivenessCheck + { Service = provider.GetRequiredService>(name) }, + failureStatus, // Can be overwritten later + tags); - public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, - HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(BackgroundQueueService.ConsumerGroupLivenessCheck(failureStatus, tags)); + public static HealthCheckRegistration ReadinessCheckForNamed(string name, HealthStatus? failureStatus = null, + IEnumerable? tags = default) where T : class, IBackgroundService, INamedService => new( + name, // Can be overwritten later + provider => new IBackgroundServiceMonitor.ReadinessCheck + { Service = provider.GetRequiredService>(name) }, + failureStatus, // Can be overwritten later + tags); } diff --git a/src/LocalPost/DependencyInjection/INamedService.cs b/src/LocalPost/DependencyInjection/INamedService.cs index 59c6dd2..4e9aecf 100644 --- a/src/LocalPost/DependencyInjection/INamedService.cs +++ b/src/LocalPost/DependencyInjection/INamedService.cs @@ -1,6 +1,32 @@ +using Microsoft.Extensions.DependencyInjection; + namespace LocalPost.DependencyInjection; internal interface INamedService { string Name { get; } } + +internal sealed class NamedServiceDescriptor : ServiceDescriptor +{ + public static NamedServiceDescriptor Singleton(string name, + Func implementationFactory) where TService : class, INamedService => + new(typeof(TService), name, implementationFactory, ServiceLifetime.Singleton); + + public string Name { get; init; } + + public NamedServiceDescriptor(Type serviceType, string name, Type implementationType, ServiceLifetime lifetime) : base(serviceType, implementationType, lifetime) + { + Name = name; + } + + public NamedServiceDescriptor(Type serviceType, string name, object instance) : base(serviceType, instance) + { + Name = name; + } + + public NamedServiceDescriptor(Type serviceType, string name, Func factory, ServiceLifetime lifetime) : base(serviceType, factory, lifetime) + { + Name = name; + } +} diff --git a/src/LocalPost/DependencyInjection/JobQueueRegistration.cs b/src/LocalPost/DependencyInjection/JobQueueRegistration.cs deleted file mode 100644 index 8b5a2ce..0000000 --- a/src/LocalPost/DependencyInjection/JobQueueRegistration.cs +++ /dev/null @@ -1,17 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.DependencyInjection; - -public static class JobQueueRegistration -{ - public static OptionsBuilder> AddBackgroundJobQueue(this IServiceCollection services) - { - services.TryAddSingleton(); - services.TryAddSingleton(provider => provider.GetRequiredService()); - - return services.AddBackgroundQueue(builder => - builder.SetHandler((job, ct) => job(ct))); - } -} diff --git a/src/LocalPost/DependencyInjection/QueueRegistration.cs b/src/LocalPost/DependencyInjection/QueueRegistration.cs deleted file mode 100644 index 8122195..0000000 --- a/src/LocalPost/DependencyInjection/QueueRegistration.cs +++ /dev/null @@ -1,51 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.DependencyInjection; - -public static class QueueRegistration -{ - // THandler has to be registered by the user - public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - Action>? configure = null) where THandler : IHandler => - services.AddBackgroundQueue(builder => - { - builder.SetHandler(); - configure?.Invoke(builder); - }); - - public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - Handler handler, Action>? configure = null) => - services.AddBackgroundQueue(builder => - { - builder.SetHandler(handler); - configure?.Invoke(builder); - }); - - public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - Action> configure) - { - services.TryAddConcurrentHostedServices(); - - var handleStackBuilder = new MiddlewareStackBuilder(); - configure(handleStackBuilder); - var handlerStack = handleStackBuilder.Build(); - - services.TryAddSingleton(provider => BackgroundQueueService.Create(provider, handlerStack)); - - services.TryAddSingleton>(provider => - provider.GetRequiredService>().Queue); - - services.AddSingleton(provider => - provider.GetRequiredService>().QueueSupervisor); - services.AddSingleton(provider => - provider.GetRequiredService>().ConsumerGroup); - - // Extend ServiceDescriptor for better comparison and implement custom TryAddSingleton later... - - return services.AddOptions>(); - } - - // TODO Batched queue consumer -} diff --git a/src/LocalPost/DependencyInjection/Registration.cs b/src/LocalPost/DependencyInjection/Registration.cs index e650636..a244e76 100644 --- a/src/LocalPost/DependencyInjection/Registration.cs +++ b/src/LocalPost/DependencyInjection/Registration.cs @@ -1,9 +1,148 @@ using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; namespace LocalPost.DependencyInjection; -public static class Registration +internal static class ConsumerGroupRegistration { - public static void TryAddConcurrentHostedServices(this IServiceCollection services) => - services.AddHostedService(); + internal static bool TryAddConsumerGroup(this IServiceCollection services, string name, + HandlerFactory configure) where TQ : IAsyncEnumerable, INamedService + { +// services.TryAddConsumerGroup(name, provider => BackgroundQueue.ConsumerGroupFor( +// provider.GetRequiredService(name), configure(provider), 1)); + if (!services.TryAddNamedSingleton(name, provider => BackgroundQueue.ConsumerGroupForNamed( + provider.GetRequiredService(name), configure(provider), 1))) // FIXME Config + return false; + + services.AddBackgroundServiceForNamed>(name); + + return true; + } + + internal static bool TryAddConsumerGroup(this IServiceCollection services, + HandlerFactory configure) where TQ : IAsyncEnumerable + { + if (!services.TryAddSingleton(provider => BackgroundQueue.ConsumerGroupFor( + provider.GetRequiredService(), configure(provider), 1))) // FIXME Config + return false; + + services.AddBackgroundServiceFor>(); + + return true; + } +} + +internal static class HealthChecksRegistration +{ + public static IHealthChecksBuilder AddConsumerGroupLivenessCheck(this IHealthChecksBuilder builder, + string? name = default, HealthStatus? failureStatus = default, IEnumerable? tags = default) + where TQ : IAsyncEnumerable + { + var check = HealthChecks.LivenessCheckFor>(failureStatus, tags); + if (name is not null) + check.Name = name; + + return builder.Add(check); + } + + public static IHealthChecksBuilder AddNamedConsumerGroupLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) + where TQ : IAsyncEnumerable, INamedService + { + var check = HealthChecks.LivenessCheckForNamed>(name, failureStatus, tags); + + return builder.Add(check); + } +} + +internal static class Registration +{ + public static void AddConcurrentHostedServices(this IServiceCollection services) => services + .AddHostedService(); + + public static void AddBackgroundServiceForNamed(this IServiceCollection services, string name) + where T : class, IBackgroundService, INamedService + { + services.AddConcurrentHostedServices(); + + // We DO expect that this service is registered by the user already... +// services.AddSingleton(); +// services.AddSingleton(); + + var added = services.TryAddNamedSingleton>(name, provider => + new NamedBackgroundServiceRunner(provider.GetRequiredService(name), + provider.GetRequiredService())); + if (!added) + return; + + services.AddSingleton(provider => + provider.GetRequiredService>(name)); + services.AddSingleton(provider => + provider.GetRequiredService>(name)); + } + + public static void AddBackgroundServiceFor(this IServiceCollection services) + where T : class, IBackgroundService + { + services.AddConcurrentHostedServices(); + + // We DO expect that this service is registered by the user already... +// services.AddSingleton(); +// services.AddSingleton(); + + var added = services.TryAddSingleton>(provider => + new BackgroundServiceRunner(provider.GetRequiredService(), + provider.GetRequiredService())); + if (!added) + return; + + services.AddSingleton(provider => + provider.GetRequiredService>()); + services.AddSingleton(provider => + provider.GetRequiredService>()); + } + + public static bool TryAddNamedSingleton(this IServiceCollection services, string name, + Func implementationFactory) where TService : class, INamedService => + services.TryAdd(NamedServiceDescriptor.Singleton(name, implementationFactory)); + + public static bool TryAddSingleton(this IServiceCollection services) where TService : class => + services.TryAdd(ServiceDescriptor.Singleton()); + + public static bool TryAddSingleton(this IServiceCollection services, + Func implementationFactory) where TService : class => + services.TryAdd(ServiceDescriptor.Singleton(implementationFactory)); + + // "If binary compatibility were not a problem, then the TryAdd methods could return bool" + // from https://github.com/dotnet/runtime/issues/45114#issuecomment-733807639 + // See also: https://github.com/dotnet/runtime/issues/44728#issuecomment-831413792 + public static bool TryAdd(this IServiceCollection services, ServiceDescriptor descriptor) + { + if (services.Any(service => IsEqual(service, descriptor))) + return false; + + services.Add(descriptor); + return true; + + static bool IsEqual(ServiceDescriptor a, ServiceDescriptor b) + { + var equal = a.ServiceType == b.ServiceType; // && a.Lifetime == b.Lifetime; + if (equal && a is NamedServiceDescriptor namedA && b is NamedServiceDescriptor namedB) + return namedA.Name == namedB.Name; + + return equal; + } + } + + public static IServiceCollection AddSingletonAlias(this IServiceCollection services) + where TService : class + where TImplementation : class, TService => + services.AddSingleton(provider => provider.GetRequiredService()); + + public static IServiceCollection AddSingletonAlias(this IServiceCollection services, + string name) + where TService : class + where TImplementation : class, TService, INamedService => + services.AddSingleton(provider => provider.GetRequiredService(name)); } diff --git a/src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs b/src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs new file mode 100644 index 0000000..530d1b8 --- /dev/null +++ b/src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs @@ -0,0 +1,16 @@ +using JetBrains.Annotations; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.DependencyInjection; + +[PublicAPI] +public static class ServiceHealthCheckRegistration +{ + // Not needed, as there is no complex logic inside. It's either working, or dead. +// public static IHealthChecksBuilder AddBackgroundQueueReadinessCheck(... + + public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, + HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + .AddConsumerGroupLivenessCheck, T>(); +} diff --git a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs index 869cc95..ef4f7ed 100644 --- a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs +++ b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs @@ -1,4 +1,5 @@ using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; @@ -6,13 +7,14 @@ namespace LocalPost.DependencyInjection; internal static class ServiceProviderLookups { public static T GetRequiredService(this IServiceProvider provider, string name) - where T : INamedService - { - return provider.GetRequiredService>().First(x => x.Name == name); - } + where T : INamedService => + provider.GetRequiredService>().First(x => x.Name == name); public static T GetOptions(this IServiceProvider provider) => provider.GetOptions(Options.DefaultName); public static T GetOptions(this IServiceProvider provider, string name) => provider.GetRequiredService>().Get(name); + + public static ILogger GetLoggerFor(this IServiceProvider provider) => + provider.GetRequiredService>(); } diff --git a/src/LocalPost/DependencyInjection/ServiceRegistration.cs b/src/LocalPost/DependencyInjection/ServiceRegistration.cs new file mode 100644 index 0000000..55c33ec --- /dev/null +++ b/src/LocalPost/DependencyInjection/ServiceRegistration.cs @@ -0,0 +1,39 @@ +using JetBrains.Annotations; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.DependencyInjection; + +[PublicAPI] +public static class ServiceRegistration +{ + public static OptionsBuilder> AddBackgroundJobQueue(this IServiceCollection services) + { + services.TryAddSingleton(); + services.TryAddSingleton(provider => provider.GetRequiredService()); + + return services.AddBackgroundQueue(_ => async (job, ct) => await job(ct)); + } + + // THandler has to be registered by the user + public static OptionsBuilder> AddBackgroundQueue( + this IServiceCollection services, + HandlerFactory? configure = null) where THandler : IHandler => + services.AddBackgroundQueue(HandlerStack.From().Scoped()); + + public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, + HandlerFactory configure) + { + services.TryAddSingleton>(provider => provider.GetRequiredService>()); + services.TryAddSingleton(provider => + BackgroundQueue.Create(provider.GetOptions>())); + services.AddBackgroundServiceFor>(); + + // FIXME Prevent adding two services with different handlers... Do not allow calling this method twice for the same queue? + services.TryAddConsumerGroup>(configure); + + return services.AddOptions>(); + } + + // TODO Batched +} diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs new file mode 100644 index 0000000..4e6dfef --- /dev/null +++ b/src/LocalPost/HandlerStack.cs @@ -0,0 +1,244 @@ +using JetBrains.Annotations; +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost; + + + +public interface IHandler +{ + ValueTask InvokeAsync(TOut payload, CancellationToken ct); +} + +//public interface IMiddleware +//{ +// Handler Invoke(Handler next); +//} + +public delegate ValueTask Handler(T context, CancellationToken ct); + +public delegate Handler HandlerFactory(IServiceProvider provider); + +//public delegate Handler Middleware(Handler next); +////public delegate Task Middleware(T context, Handler next, CancellationToken ct); +// +//public delegate Middleware MiddlewareFactory(IServiceProvider provider); + + + +public delegate Handler HandlerMiddleware(Handler next); +//public delegate Handler HandlerMiddleware(Handler next); + +public delegate HandlerMiddleware HandlerMiddlewareFactory(IServiceProvider provider); +//public delegate HandlerMiddleware HandlerMiddlewareFactory(IServiceProvider provider); + + +public delegate HandlerFactory HandlerFactoryMiddleware(HandlerFactory hf); +//public delegate HandlerFactory HandlerFactoryMiddleware(HandlerFactory hf); + +public interface IHandlerMiddleware +{ + Handler Invoke(Handler next); +} +//public interface IHandlerMiddleware : IMiddleware2 +//{ +//} + +[PublicAPI] +public static partial class HandlerStack +{ + public static HandlerFactory For(Handler handler) => _ => handler; + + public static HandlerFactory From() where THandler : IHandler => + provider => provider.GetRequiredService().InvokeAsync; + + + + public static HandlerFactory Map(this HandlerFactory handlerFactory, + HandlerFactoryMiddleware middleware) => middleware(handlerFactory); + + public static HandlerFactory Map(this HandlerFactory handlerFactory, + HandlerMiddlewareFactory middlewareFactory) => provider => + { + var h = handlerFactory(provider); + var m = middlewareFactory(provider); + + return m(h); + }; + +// public static HandlerFactory Map(this HandlerFactory handlerFactory, +// HandlerMiddlewareFactory middlewareFactory) => provider => +// { +// var h = handlerFactory(provider); +// var m = middlewareFactory(provider); +// +// return m(h); +// }; + + public static HandlerFactory Map(this HandlerFactory handlerFactory, + HandlerMiddleware middleware) => handlerFactory.Map(_ => middleware); + +// public static HandlerFactory Map(this HandlerFactory handlerFactory, +// HandlerMiddleware middleware) => handlerFactory.Map(_ => middleware); + + // Really no need... +// public static HandlerFactory Map(this HandlerFactory handlerFactory, +// IMiddleware2 middleware) => handlerFactory.Map(middleware.Invoke); + + public static HandlerFactory Scoped(this HandlerFactory hf) => hf.Map(ScopedHandler.Wrap); + + public static HandlerFactory SkipWhen(this HandlerFactory handlerStack, Func pred) => + handlerStack.Map(next => async (context, ct) => + { + if (pred(context)) + return; + + await next(context, ct); + }); + + +// public static HandlerFactory Append(this HandlerFactory handlerFactory, +// HandlerMiddlewareFactory handlerMiddlewareFactory) => +// provider => handlerMiddlewareFactory(provider)(handlerFactory(provider)); +// +// public static HandlerFactory Append(this HandlerFactory handlerFactory, +// HandlerMiddlewareFactory handlerMiddlewareFactory) => +// provider => handlerMiddlewareFactory(provider)(handlerFactory(provider)); +// +// // Because... +//// public static HandlerFactory Append(this HandlerFactory handlerFactory, +//// Func> middlewareFactory) => +//// handlerFactory.Append(provider => middlewareFactory(provider)); +// +// public static HandlerFactory AppendMiddleware(this HandlerFactory handlerFactory, +// HandlerMiddleware middleware) => +// handlerFactory.Append(_ => middleware); +// +// public static HandlerFactory Append(this HandlerFactory handlerFactory, +// IMiddleware2 middleware) => +// handlerFactory.Append(_ => middleware.Invoke); +// +// // C# can only infer ALL generics of a method... Or nothing, so you have to specify each one manually. Not very +// // convenient, but creating a wrapper class is even worse. +// public static HandlerFactory Append(this HandlerFactory handlerFactory) +// where TMiddleware : class, IMiddleware2 => +// handlerFactory.Append(provider => provider.GetRequiredService().Invoke); +// +// // C# can only infer ALL generics of a method... Or nothing, so you have to specify each one manually. Not very +// // convenient, but creating a wrapper class is even worse. +// public static HandlerFactory Append(this HandlerFactory handlerFactory) +// where TMiddleware : class, IMiddleware2 => handlerFactory.Append(); +} + +[PublicAPI] +public static class HandlerStack +{ + public static readonly HandlerFactory Empty = _ => (_, _) => default; + +// public static HandlerStack2 From(Handler handler) => new() +// { +// HandlerFactory = _ => handler +// }; +// +// public static HandlerStack2 From() where THandler : IHandler => new() +// { +// HandlerFactory = provider => provider.GetRequiredService().InvokeAsync +// }; +// +// public required HandlerFactory HandlerFactory { get; init; } +// +// public static implicit operator HandlerFactory(HandlerStack2 stack) => stack.HandlerFactory; +// +// public HandlerStack2 Append(MiddlewareFactory2 middlewareFactory) => new() +// { +// HandlerFactory = provider => middlewareFactory(provider)(HandlerFactory(provider)) +// }; +// +// public HandlerStack2 Append(Middleware2 middleware) => Append(_ => middleware); +// +// public HandlerStack2 Append() where TMiddleware : class, IMiddleware2 => +// Append(); +// +// public HandlerStack2 Append() where TMiddleware : class, IMiddleware2 => +// Append(provider => provider.GetRequiredService().Invoke); +// +// public HandlerStack2 Scoped() => new() +// { +// HandlerFactory = ScopedHandlerFactory.Wrap(HandlerFactory) +// }; +} + + + + + + +// TODO Remove +//[PublicAPI] +//public sealed class HandlerStackBuilder : HandlerStackBuilder> +//{ +//} +// +//[PublicAPI] +//public abstract class HandlerStackBuilder +// where TBuilder : HandlerStackBuilder +//{ +// protected readonly List> Middlewares = new(); +// protected HandlerFactory HandlerFactory = _ => (_, _) => Task.CompletedTask; +// +// public TBuilder SetHandler(Handler handler) => SetHandler(_ => handler); +// +// public TBuilder SetHandler() where THandler : IHandler => +// SetHandler(provider => provider.GetRequiredService().InvokeAsync); +// +// public TBuilder SetHandler(HandlerFactory factory) +// { +// HandlerFactory = factory; +// +// return (TBuilder) this; +// } +// +//// public TBuilder Append() where TMiddleware : IHandler +//// { +//// Middlewares.Add(provider => next => ActivatorUtilities.CreateInstance(provider, next).InvokeAsync); +//// +//// return (TBuilder) this; +//// } +// +// public TBuilder Append(Middleware middleware) => +// Append(_ => middleware); +// +// public TBuilder Append() where TMiddleware : class, IMiddleware +// { +// Middlewares.Add(provider => provider.GetRequiredService().Invoke); +// +// return (TBuilder) this; +// } +// +// public TBuilder Append(MiddlewareFactory factory) +// { +// Middlewares.Add(factory); +// +// return (TBuilder) this; +// } +// +// internal HandlerStack Build() => new(HandlerFactory, Middlewares); +//} +// +//[PublicAPI] +//public sealed class HandlerStack +//{ +// private readonly HandlerFactory _handler; +// private readonly ImmutableArray> _middlewares; +// +// public HandlerStack(HandlerFactory handler, IEnumerable>? middlewares = null) +// { +// _handler = handler; +// _middlewares = middlewares?.ToImmutableArray() ?? ImmutableArray>.Empty; +// } +// +// public Handler Resolve(IServiceProvider provider) => _middlewares +// .Select(factory => factory(provider)) +// .Reverse() +// .Aggregate(_handler(provider), (next, middleware) => middleware(next)); +//} diff --git a/src/LocalPost/HostedServices.cs b/src/LocalPost/HostedServices.cs deleted file mode 100644 index 851ed03..0000000 --- a/src/LocalPost/HostedServices.cs +++ /dev/null @@ -1,31 +0,0 @@ -using System.Collections.Immutable; -using Microsoft.Extensions.Hosting; - -namespace LocalPost; - -internal interface IConcurrentHostedService : IHostedService -{ -} - -internal sealed class HostedServices : IHostedService, IDisposable -{ - private readonly ImmutableArray _services; - - public HostedServices(IEnumerable services) - { - _services = services.ToImmutableArray(); - } - - public Task StartAsync(CancellationToken cancellationToken) => - Task.WhenAll(_services.Select(c => c.StartAsync(cancellationToken))); - - public Task StopAsync(CancellationToken cancellationToken) => - Task.WhenAll(_services.Select(c => c.StopAsync(cancellationToken))); - - public void Dispose() - { - foreach (var service in _services) - if (service is IDisposable disposable) - disposable.Dispose(); - } -} diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index 8bb797d..25ddbc7 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -2,16 +2,16 @@ netstandard2.0 - - LocalPost true + LocalPost false LocalPost - background;task;queue;coravel;hangfire - Local (in-process) background queue. Alexey Shokov + Local (in-process) background queue. + https://github.com/alexeyshockov/LocalPost/v$(Version) + background;task;queue;coravel;hangfire README.md MIT @@ -51,10 +51,11 @@ - + - + + @@ -69,6 +70,9 @@ <_Parameter1>$(MSBuildProjectName).KafkaConsumer + + <_Parameter1>$(MSBuildProjectName).RabbitMqConsumer + diff --git a/src/LocalPost/MiddlewareStack.cs b/src/LocalPost/MiddlewareStack.cs deleted file mode 100644 index 29f8c0e..0000000 --- a/src/LocalPost/MiddlewareStack.cs +++ /dev/null @@ -1,26 +0,0 @@ -using System.Collections.Immutable; - -namespace LocalPost; - -public sealed class MiddlewareStack -{ - private readonly HandlerFactory _handlerFactory; - private readonly ImmutableArray> _middlewares; - - public MiddlewareStack(HandlerFactory handlerFactory, IEnumerable>? middlewares = null) - { - _handlerFactory = handlerFactory; - _middlewares = middlewares?.ToImmutableArray() ?? ImmutableArray>.Empty; - } - - public Handler Resolve(IServiceProvider provider) - { - var middlewares = _middlewares.Select(factory => factory(provider)); - - var handler = _handlerFactory(provider); - foreach (var middleware in middlewares) // TODO Reverse? - handler = middleware(handler); - - return handler; - } -} diff --git a/src/LocalPost/MiddlewareStackBuilder.cs b/src/LocalPost/MiddlewareStackBuilder.cs deleted file mode 100644 index 3ee5f3e..0000000 --- a/src/LocalPost/MiddlewareStackBuilder.cs +++ /dev/null @@ -1,52 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost; - -public sealed class MiddlewareStackBuilder : MiddlewareStackBuilder> -{ -} - -public abstract class MiddlewareStackBuilder - where TBuilder : MiddlewareStackBuilder -{ - protected readonly List> Middlewares = new(); - protected HandlerFactory HandlerFactory = _ => (c, ct) => Task.CompletedTask; - - public TBuilder SetHandler(Handler handler) => SetHandler(_ => handler); - - public TBuilder SetHandler() where THandler : IHandler => - SetHandler(provider => provider.GetRequiredService().InvokeAsync); - - public TBuilder SetHandler(HandlerFactory factory) - { - HandlerFactory = factory; - - return (TBuilder) this; - } - - // public TBuilder Append() where TMiddleware : IHandler -// { -// Middlewares.Add(provider => next => ActivatorUtilities.CreateInstance(provider, next).InvokeAsync); -// -// return (TBuilder) this; -// } - - public TBuilder Append(Middleware middleware) => - Append(_ => middleware); - - public TBuilder Append() where TMiddleware : class, IMiddleware - { - Middlewares.Add(provider => provider.GetRequiredService().Invoke); - - return (TBuilder) this; - } - - public TBuilder Append(MiddlewareFactory factory) - { - Middlewares.Add(factory); - - return (TBuilder) this; - } - - internal MiddlewareStack Build() => new(HandlerFactory, Middlewares); -} diff --git a/src/LocalPost/Middlewares.cs b/src/LocalPost/Middlewares.cs new file mode 100644 index 0000000..351ef2b --- /dev/null +++ b/src/LocalPost/Middlewares.cs @@ -0,0 +1,50 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; + +namespace LocalPost; + +public static partial class HandlerStack +{ + public static HandlerFactory LogErrors(this HandlerFactory handlerStack) => + handlerStack.Map(provider => + ActivatorUtilities.CreateInstance>(provider).Invoke); +} + +internal class LoggingErrorHandler : IHandlerMiddleware +{ + private readonly ILogger> _logger; + + public LoggingErrorHandler(ILogger> logger) + { + _logger = logger; + } + + public Handler Invoke(Handler next) => async (context, ct) => + { + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + _logger.LogError(e, "Unhandled exception while processing a message"); + } + }; +} + +// TODO Just add it as an example, also using Polly +//[PublicAPI] +//public static class Middlewares +//{ +// public static Middleware Timeout(TimeSpan timeout) => next => async (context, ct) => +// { +// using var cts = CancellationTokenSource.CreateLinkedTokenSource(ct); +// cts.CancelAfter(timeout); +// +// await next(context, cts.Token); +// }; +//} diff --git a/src/LocalPost/ChannelReaderEx.cs b/src/LocalPost/Polyfills.cs similarity index 51% rename from src/LocalPost/ChannelReaderEx.cs rename to src/LocalPost/Polyfills.cs index 10ead5a..f4dfccb 100644 --- a/src/LocalPost/ChannelReaderEx.cs +++ b/src/LocalPost/Polyfills.cs @@ -1,3 +1,4 @@ +using System.Diagnostics.CodeAnalysis; using System.Runtime.CompilerServices; using System.Threading.Channels; @@ -5,7 +6,7 @@ namespace LocalPost; internal static class ChannelReaderEx { - // netstandard2.0 does not contain this overload, it's available only from netstandard2.1 (.NET Core 3.0) + // netstandard2.0 does not contain this overload, it's available only from netstandard2.1 (.NET Core 3.0+) public static async IAsyncEnumerable ReadAllAsync(this ChannelReader reader, [EnumeratorCancellation] CancellationToken cancellationToken = default) { @@ -14,3 +15,17 @@ public static async IAsyncEnumerable ReadAllAsync(this ChannelReader re yield return item; } } + +internal static class EnumerableEx +{ + // Can be removed on .NET 6+, see https://stackoverflow.com/a/6362642/322079 + [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] + public static IEnumerable> Chunk(this IEnumerable source, ushort size) + { + while (source.Any()) + { + yield return source.Take(size); + source = source.Skip(size); + } + } +} diff --git a/src/LocalPost/QueueOptions.cs b/src/LocalPost/QueueOptions.cs deleted file mode 100644 index 2864fbe..0000000 --- a/src/LocalPost/QueueOptions.cs +++ /dev/null @@ -1,4 +0,0 @@ -namespace LocalPost; - -// For the DI container and, to distinguish between different queues -public sealed record BackgroundQueueOptions : BackgroundQueueOptions; diff --git a/src/LocalPost/ScopedHandler.cs b/src/LocalPost/ScopedHandler.cs index faf282a..d8f7787 100644 --- a/src/LocalPost/ScopedHandler.cs +++ b/src/LocalPost/ScopedHandler.cs @@ -1,48 +1,36 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; namespace LocalPost; +internal static class ScopedHandler +{ + public static HandlerFactory Wrap(HandlerFactory handlerFactory) => provider => + { + var scopeFactory = provider.GetRequiredService(); + return new ScopedHandler(scopeFactory, handlerFactory).InvokeAsync; + }; + +} + internal sealed class ScopedHandler : IHandler { - private readonly ILogger> _logger; private readonly IServiceScopeFactory _scopeFactory; private readonly HandlerFactory _handlerFactory; - private readonly string _name; - - public ScopedHandler(ILogger> logger, string name, IServiceScopeFactory scopeFactory, - HandlerFactory handlerFactory) + public ScopedHandler(IServiceScopeFactory scopeFactory, HandlerFactory handlerFactory) { - _logger = logger; _scopeFactory = scopeFactory; _handlerFactory = handlerFactory; - _name = name; } - public async Task InvokeAsync(T payload, CancellationToken ct) + public async ValueTask InvokeAsync(T payload, CancellationToken ct) { - // TODO Tracing... - // See https://andrewlock.net/exploring-dotnet-6-part-10-new-dependency-injection-features-in-dotnet-6/#handling-iasyncdisposable-services-with-iservicescope // And also https://devblogs.microsoft.com/dotnet/announcing-net-6/#microsoft-extensions-dependencyinjection-createasyncscope-apis await using var scope = _scopeFactory.CreateAsyncScope(); - // Make it specific for this queue somehow?.. var handler = _handlerFactory(scope.ServiceProvider); - try - { - // Await the handler, to keep the container scope alive - await handler(payload, ct); - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; - } - catch (Exception e) - { - _logger.LogError(e, "{Queue}: unhandled exception while processing a message", _name); - } + await handler(payload, ct); } } diff --git a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj new file mode 100644 index 0000000..f662938 --- /dev/null +++ b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj @@ -0,0 +1,29 @@ + + + + net6;net8 + enable + + false + + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/LocalPost.KafkaConsumer.Tests/Usings.cs b/tests/LocalPost.KafkaConsumer.Tests/Usings.cs new file mode 100644 index 0000000..c802f44 --- /dev/null +++ b/tests/LocalPost.KafkaConsumer.Tests/Usings.cs @@ -0,0 +1 @@ +global using Xunit; diff --git a/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj b/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj index 3779dba..1decf69 100644 --- a/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj +++ b/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj @@ -1,7 +1,7 @@ - netcoreapp3.1;net6;net7 + net6;net8 enable false diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index cf24d63..cc357eb 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -1,7 +1,7 @@ - netcoreapp3.1;net6;net7 + net6;net8 enable false @@ -13,6 +13,8 @@ + + diff --git a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs index 61234f6..e89ed27 100644 --- a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs @@ -1,8 +1,9 @@ using System.Threading.Channels; using FluentAssertions; using LocalPost.AsyncEnumerable; +using Nito.AsyncEx; -namespace LocalPost.Tests; +namespace LocalPost.Tests.AsyncEnumerable; public class BatchingAsyncEnumerableTests { @@ -15,7 +16,7 @@ internal async Task batches() SingleWriter = false }); var results = source.Reader.ReadAllAsync().Batch( - () => new BoundedBatchBuilder(10, TimeSpan.FromSeconds(2))); + (ct) => new BoundedBatchBuilder(10, TimeSpan.FromSeconds(2))); async Task Produce() { diff --git a/tests/LocalPost.Tests/LocalPost.Tests.csproj b/tests/LocalPost.Tests/LocalPost.Tests.csproj index 140b60e..15cfc3a 100644 --- a/tests/LocalPost.Tests/LocalPost.Tests.csproj +++ b/tests/LocalPost.Tests/LocalPost.Tests.csproj @@ -1,17 +1,18 @@ - netcoreapp3.1;net6;net7 + net6;net8 enable false - + - + + From f49193bad947f5d2ea7443ffa6706b3bc26ba6ad Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 25 May 2024 11:17:06 +0000 Subject: [PATCH 06/33] WIP --- .github/workflows/publish.yaml | 4 +- .github/workflows/qa.yml | 4 +- Directory.Build.props | 2 +- README.md | 20 +- docker-compose.yml | 200 ++++++--------- justfile | 7 + localstack_bootstrap/init/ready.d/sqs.sh | 13 + .../BackgroundQueueApp.csproj | 8 +- samples/BackgroundQueueApp/Program.cs | 13 +- .../Properties/launchSettings.json | 1 - .../KafkaConsumerApp/KafkaConsumerApp.csproj | 18 +- samples/KafkaConsumerApp/Program.cs | 105 ++++---- samples/KafkaConsumerApp/appsettings.json | 7 +- samples/SqsConsumerApp/Program.cs | 57 +++-- samples/SqsConsumerApp/SqsConsumerApp.csproj | 13 +- .../appsettings.Development.json | 7 +- src/LocalPost.KafkaConsumer/ConsumeContext.cs | 56 +++-- ...gistration.cs => HealthChecksBuilderEx.cs} | 9 +- .../DependencyInjection/KafkaBuilder.cs | 64 +++-- .../ServiceCollectionEx.cs | 26 ++ .../ServiceRegistration.cs | 29 --- src/LocalPost.KafkaConsumer/HandlerStack.cs | 76 ------ src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 124 ++++++++++ .../LocalPost.KafkaConsumer.csproj | 12 +- src/LocalPost.KafkaConsumer/Middlewares.cs | 61 ----- src/LocalPost.KafkaConsumer/Options.cs | 37 ++- src/LocalPost.KafkaConsumer/README.md | 18 +- .../{HandlerStack.cs => HandlerStackEx.cs} | 14 +- src/LocalPost.Polly/LocalPost.Polly.csproj | 8 +- src/LocalPost.SqsConsumer/ConsumeContext.cs | 14 +- ...gistration.cs => HealthChecksBuilderEx.cs} | 13 +- .../ServiceCollectionEx.cs | 21 ++ .../ServiceRegistration.cs | 111 --------- .../DependencyInjection/SqsBuilder.cs | 67 ++--- .../{HandlerStack.cs => HandlerStackEx.cs} | 84 ++++--- .../LocalPost.SqsConsumer.csproj | 10 +- src/LocalPost.SqsConsumer/MessageSource.cs | 19 +- src/LocalPost.SqsConsumer/Middlewares.cs | 35 +++ src/LocalPost.SqsConsumer/Options.cs | 18 +- src/LocalPost.SqsConsumer/QueueClient.cs | 4 +- src/LocalPost.SqsConsumer/README.md | 10 +- src/LocalPost/AppHealthSupervisor.cs | 41 +++ .../AsyncEnumerable/AsyncEnumerableEx.cs | 4 +- src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 39 +-- .../BatchingAsyncEnumerable.cs | 83 +------ .../ConcurrentAsyncEnumerable.cs | 24 +- src/LocalPost/BackgroundActivitySource.cs | 26 ++ src/LocalPost/BackgroundJobQueue.cs | 25 -- src/LocalPost/BackgroundQueue.cs | 116 +-------- src/LocalPost/BackgroundQueueConsumer.cs | 184 +++++++------- src/LocalPost/BackgroundQueueService.cs | 80 ------ src/LocalPost/BackgroundQueueSupervisor.cs | 21 -- .../BackgroundQueues/BackgroundJobQueue.cs | 15 ++ .../BackgroundQueues/BackgroundQueue.cs | 90 +++++++ .../BackgroundQueueOptions.cs | 13 +- .../BackgroundQueues/ConsumeContext.cs | 32 +++ .../BackgroundQueuesBuilder.cs | 55 +++++ .../DependencyInjection/HealthChecks.cs} | 8 +- .../ServiceCollectionEx.cs | 16 ++ .../BackgroundQueues/HandlerStackEx.cs | 34 +++ src/LocalPost/ConcurrentHostedServices.cs | 45 ++-- .../DependencyInjection/HealthChecks.cs | 96 ++++++-- .../ServiceCollectionEx.cs | 57 +++++ ...istration.cs => ServiceCollectionTools.cs} | 66 +---- .../ServiceProviderLookups.cs | 5 +- .../ServiceRegistration.cs | 39 --- src/LocalPost/Handler.cs | 24 ++ src/LocalPost/HandlerStack.cs | 233 +----------------- src/LocalPost/HandlerStackEx.cs | 131 ++++++++++ src/LocalPost/LocalPost.csproj | 14 +- src/LocalPost/Middlewares.cs | 64 +++-- src/LocalPost/Primitives.cs | 21 ++ src/LocalPost/QueuePublisher.cs | 10 + src/LocalPost/ScopedHandler.cs | 36 --- 74 files changed, 1569 insertions(+), 1597 deletions(-) create mode 100755 justfile create mode 100644 localstack_bootstrap/init/ready.d/sqs.sh rename src/LocalPost.KafkaConsumer/DependencyInjection/{ServiceHealthCheckRegistration.cs => HealthChecksBuilderEx.cs} (74%) create mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs delete mode 100644 src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs delete mode 100644 src/LocalPost.KafkaConsumer/HandlerStack.cs create mode 100644 src/LocalPost.KafkaConsumer/HandlerStackEx.cs delete mode 100644 src/LocalPost.KafkaConsumer/Middlewares.cs rename src/LocalPost.Polly/{HandlerStack.cs => HandlerStackEx.cs} (53%) rename src/LocalPost.SqsConsumer/DependencyInjection/{ServiceHealthCheckRegistration.cs => HealthChecksBuilderEx.cs} (55%) create mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs delete mode 100644 src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs rename src/LocalPost.SqsConsumer/{HandlerStack.cs => HandlerStackEx.cs} (59%) create mode 100644 src/LocalPost.SqsConsumer/Middlewares.cs create mode 100644 src/LocalPost/AppHealthSupervisor.cs create mode 100644 src/LocalPost/BackgroundActivitySource.cs delete mode 100644 src/LocalPost/BackgroundJobQueue.cs delete mode 100644 src/LocalPost/BackgroundQueueService.cs delete mode 100644 src/LocalPost/BackgroundQueueSupervisor.cs create mode 100644 src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs create mode 100644 src/LocalPost/BackgroundQueues/BackgroundQueue.cs rename src/LocalPost/{ => BackgroundQueues}/BackgroundQueueOptions.cs (78%) create mode 100644 src/LocalPost/BackgroundQueues/ConsumeContext.cs create mode 100644 src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs rename src/LocalPost/{DependencyInjection/ServiceHealthCheckRegistration.cs => BackgroundQueues/DependencyInjection/HealthChecks.cs} (72%) create mode 100644 src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs create mode 100644 src/LocalPost/BackgroundQueues/HandlerStackEx.cs create mode 100644 src/LocalPost/DependencyInjection/ServiceCollectionEx.cs rename src/LocalPost/DependencyInjection/{Registration.cs => ServiceCollectionTools.cs} (65%) delete mode 100644 src/LocalPost/DependencyInjection/ServiceRegistration.cs create mode 100644 src/LocalPost/Handler.cs create mode 100644 src/LocalPost/HandlerStackEx.cs create mode 100644 src/LocalPost/Primitives.cs create mode 100644 src/LocalPost/QueuePublisher.cs delete mode 100644 src/LocalPost/ScopedHandler.cs diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index dcbc521..674b0ba 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -1,4 +1,3 @@ ---- name: Publish on: @@ -17,9 +16,8 @@ jobs: - uses: actions/setup-dotnet@v3 with: dotnet-version: | - 3.1.x 6.0.x - 7.0.x + 8.0.x - run: dotnet restore - run: dotnet build -c Release --no-restore - run: dotnet test -c Release --no-build --verbosity=minimal diff --git a/.github/workflows/qa.yml b/.github/workflows/qa.yml index 1f8e284..1dc50b6 100644 --- a/.github/workflows/qa.yml +++ b/.github/workflows/qa.yml @@ -1,4 +1,3 @@ ---- name: QA on: @@ -41,9 +40,8 @@ jobs: - uses: actions/setup-dotnet@v3 with: dotnet-version: | - 3.1.x 6.0.x - 7.0.x + 8.0.x - run: dotnet tool restore - run: dotnet gitversion /output buildserver - run: ./sonar-scan.sh diff --git a/Directory.Build.props b/Directory.Build.props index 8bb8702..f39cb87 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,7 +1,7 @@ - 11 + 12 enable enable true diff --git a/README.md b/README.md index 37dc6a8..04482eb 100644 --- a/README.md +++ b/README.md @@ -6,27 +6,19 @@ Simple .NET in-memory background queue ([System.Threading.Channels](https://lear There are multiple ways to run background tasks in .NET. The most common are: - -## Amazon SQS Consumer - -### Permissions - -To operate on a queue below [permissions](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-permissions-reference.html) are required: -- sqs:GetQueueUrl -- sqs:GetQueueAttributes -- sqs:ReceiveMessage -- sqs:ChangeMessageVisibility - ## Usage ### Installation ### .NET 8 asynchronous background services handling -Before version 8 .NET runtime handled start/stop of the services only synchronously, but now it is possible to enable concurrent handling of the services. This is done by setting `HostOptions` property `ConcurrentServiceExecution` to `true`: +Before version 8 .NET runtime handled start/stop of the services only synchronously, but now it is possible to enable +concurrent handling of the services. This is done by setting `HostOptions` property `ConcurrentServiceExecution` +to `true`: -See https://github.com/dotnet/runtime/blob/v8.0.0/src/libraries/Microsoft.Extensions.Hosting/src/Internal/Host.cs -See https://github.com/dotnet/runtime/blob/main/src/libraries/Microsoft.Extensions.Hosting/src/HostOptions.cs +See for details: +- https://github.com/dotnet/runtime/blob/v8.0.0/src/libraries/Microsoft.Extensions.Hosting/src/Internal/Host.cs +- https://github.com/dotnet/runtime/blob/main/src/libraries/Microsoft.Extensions.Hosting/src/HostOptions.cs ## Similar projects / Inspiration diff --git a/docker-compose.yml b/docker-compose.yml index 0e95e9a..8528f68 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,127 +1,89 @@ -# See https://github.com/conduktor/kafka-stack-docker-compose/blob/master/full-stack.yml - +name: localpost +networks: + redpanda_network: + driver: bridge +volumes: + redpanda: + driver: local + localstack: + driver: local services: - zoo1: - image: confluentinc/cp-zookeeper:7.3.2 - hostname: zoo1 - container_name: zoo1 + localstack: + # https://docs.localstack.cloud/getting-started/installation/#docker-compose + image: localstack/localstack:3.4 ports: - - "2181:2181" + - 127.0.0.1:4566:4566 # LocalStack Gateway + - 127.0.0.1:4510-4559:4510-4559 # External services port range environment: - ZOOKEEPER_CLIENT_PORT: 2181 - ZOOKEEPER_SERVER_ID: 1 - ZOOKEEPER_SERVERS: zoo1:2888:3888 - - kafka1: - image: confluentinc/cp-kafka:7.3.2 - hostname: kafka1 - container_name: kafka1 - ports: - - "9092:9092" - - "29092:29092" - - "9999:9999" - environment: - KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka1:19092,EXTERNAL://${DOCKER_HOST_IP:-127.0.0.1}:9092,DOCKER://host.docker.internal:29092 - KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT,DOCKER:PLAINTEXT - KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL - KAFKA_ZOOKEEPER_CONNECT: "zoo1:2181" - KAFKA_BROKER_ID: 1 - KAFKA_LOG4J_LOGGERS: "kafka.controller=INFO,kafka.producer.async.DefaultEventHandler=INFO,state.change.logger=INFO" - KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 - KAFKA_JMX_PORT: 9001 - KAFKA_JMX_HOSTNAME: ${DOCKER_HOST_IP:-127.0.0.1} - KAFKA_AUTHORIZER_CLASS_NAME: kafka.security.authorizer.AclAuthorizer - KAFKA_ALLOW_EVERYONE_IF_NO_ACL_FOUND: "true" - depends_on: - - zoo1 - - kafka-schema-registry: - image: confluentinc/cp-schema-registry:7.3.2 - hostname: kafka-schema-registry - container_name: kafka-schema-registry - ports: - - "8081:8081" - environment: - SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092 - SCHEMA_REGISTRY_HOST_NAME: kafka-schema-registry - SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 - depends_on: - - zoo1 - - kafka1 - - - kafka-rest-proxy: - image: confluentinc/cp-kafka-rest:7.3.2 - hostname: kafka-rest-proxy - container_name: kafka-rest-proxy - ports: - - "8082:8082" - environment: - # KAFKA_REST_ZOOKEEPER_CONNECT: zoo1:2181 - KAFKA_REST_LISTENERS: http://0.0.0.0:8082/ - KAFKA_REST_SCHEMA_REGISTRY_URL: http://kafka-schema-registry:8081/ - KAFKA_REST_HOST_NAME: kafka-rest-proxy - KAFKA_REST_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092 - depends_on: - - zoo1 - - kafka1 - - kafka-schema-registry - - - kafka-connect: - image: confluentinc/cp-kafka-connect:7.3.2 - hostname: kafka-connect - container_name: kafka-connect - ports: - - "8083:8083" - environment: - CONNECT_BOOTSTRAP_SERVERS: "kafka1:19092" - CONNECT_REST_PORT: 8083 - CONNECT_GROUP_ID: compose-connect-group - CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs - CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets - CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status - CONNECT_KEY_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_KEY_CONVERTER_SCHEMA_REGISTRY_URL: 'http://kafka-schema-registry:8081' - CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter - CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: 'http://kafka-schema-registry:8081' - CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" - CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter" - CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect" - CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO" - CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR" - CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1" - CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1" - CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1" - CONNECT_PLUGIN_PATH: '/usr/share/java,/etc/kafka-connect/jars,/usr/share/confluent-hub-components' + # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ + - DEBUG=${DEBUG:-0} volumes: - - ./connectors:/etc/kafka-connect/jars/ - depends_on: - - zoo1 - - kafka1 - - kafka-schema-registry - - kafka-rest-proxy + - localstack:/var/lib/localstack + # https://docs.localstack.cloud/references/init-hooks/ + - ./localstack/init/ready.d:/etc/localstack/init/ready.d" # SQS hooks + # Only needed for Lambdas +# - /var/run/docker.sock:/var/run/docker.sock + redpanda: + # https://docs.redpanda.com/current/deploy/deployment-option/self-hosted/docker-image/ + image: docker.redpanda.com/redpandadata/redpanda:v24.1.2 command: - - bash - - -c - - | - confluent-hub install --no-prompt debezium/debezium-connector-mysql:latest - confluent-hub install --no-prompt confluentinc/kafka-connect-datagen:0.4.0 - /etc/confluent/docker/run - - - ksqldb-server: - image: confluentinc/cp-ksqldb-server:7.3.2 - hostname: ksqldb-server - container_name: ksqldb-server + - redpanda start + - --smp 1 + - --overprovisioned + - --kafka-addr internal://0.0.0.0:9092,external://0.0.0.0:19092 + # Address the broker advertises to clients that connect to the Kafka API. + # Use the internal addresses to connect to the Redpanda brokers + # from inside the same Docker network. + # Use the external addresses to connect to the Redpanda brokers + # from outside the Docker network. + - --advertise-kafka-addr internal://redpanda:9092,external://localhost:19092 + - --pandaproxy-addr internal://0.0.0.0:8082,external://0.0.0.0:18082 + # Address the broker advertises to clients that connect to the HTTP Proxy. + - --advertise-pandaproxy-addr internal://redpanda:8082,external://localhost:18082 + - --schema-registry-addr internal://0.0.0.0:8081,external://0.0.0.0:18081 + # Redpanda brokers use the RPC API to communicate with each other internally. + - --rpc-addr redpanda:33145 + - --advertise-rpc-addr redpanda:33145 + - --mode dev-container ports: - - "8088:8088" + - 18081:18081 + - 18082:18082 + - 19092:19092 + - 19644:9644 + volumes: + - redpanda:/var/lib/redpanda/data + networks: + - redpanda_network + healthcheck: + test: [ "CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1" ] + interval: 15s + timeout: 3s + retries: 5 + start_period: 5s + redpanda-console: + image: docker.redpanda.com/redpandadata/console:v2.5.2 + entrypoint: /bin/sh + command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console" environment: - KSQL_BOOTSTRAP_SERVERS: PLAINTEXT://kafka1:19092 - KSQL_LISTENERS: http://0.0.0.0:8088/ - KSQL_KSQL_SERVICE_ID: ksqldb-server_ + CONFIG_FILEPATH: /tmp/config.yml + CONSOLE_CONFIG_FILE: | + kafka: + brokers: ["redpanda:9092"] + schemaRegistry: + enabled: true + urls: ["http://redpanda:8081"] + redpanda: + adminApi: + enabled: true + urls: ["http://redpanda:9644"] + connect: + enabled: true + clusters: + - name: local-connect-cluster + url: http://connect:8083 + ports: + - 8080:8080 + networks: + - redpanda_network depends_on: - - zoo1 - - kafka1 + - redpanda diff --git a/justfile b/justfile new file mode 100755 index 0000000..993d5c0 --- /dev/null +++ b/justfile @@ -0,0 +1,7 @@ +#!/usr/bin/env just --justfile + +update-deps: + dotnet restore --force-evaluate + +install-deps: + dotnet restore diff --git a/localstack_bootstrap/init/ready.d/sqs.sh b/localstack_bootstrap/init/ready.d/sqs.sh new file mode 100644 index 0000000..d47ca0d --- /dev/null +++ b/localstack_bootstrap/init/ready.d/sqs.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# Enable debug +#set -x + +awslocal sqs create-queue --queue-name lp-test +QUEUE_URL=$(awslocal sqs get-queue-url --queue-name lp-test --query 'QueueUrl' --output text) + +awslocal sqs send-message \ + --queue-url "$QUEUE_URL" \ + --message-body '{"TemperatureC": 25, "TemperatureF": 77, "Summary": "not hot, not cold, perfect"}' diff --git a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj index 4495560..48c18d7 100644 --- a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj +++ b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj @@ -2,13 +2,17 @@ net8 - enable - enable + + + + + + diff --git a/samples/BackgroundQueueApp/Program.cs b/samples/BackgroundQueueApp/Program.cs index 1036890..c711050 100644 --- a/samples/BackgroundQueueApp/Program.cs +++ b/samples/BackgroundQueueApp/Program.cs @@ -1,7 +1,8 @@ using BackgroundQueueApp; using LocalPost; +using LocalPost.BackgroundQueues; +using LocalPost.BackgroundQueues.DependencyInjection; using LocalPost.Polly; -using LocalPost.DependencyInjection; using Polly; using Polly.Retry; @@ -20,15 +21,19 @@ .Build(); // A background queue with an inline handler -builder.Services.AddBackgroundQueue( - HandlerStack.For(async (weather, ct) => +builder.Services.AddBackgroundQueues(bq => + bq.AddQueue(HandlerStack.For(async (weather, ct) => { await Task.Delay(TimeSpan.FromSeconds(2), ct); Console.WriteLine(weather.Summary); }) + .Scoped() + .UsePayload() + .Trace() .UsePollyPipeline(resiliencePipeline) - .LogErrors() + ) ); + builder.Services.AddControllers(); // Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle builder.Services.AddEndpointsApiExplorer(); diff --git a/samples/BackgroundQueueApp/Properties/launchSettings.json b/samples/BackgroundQueueApp/Properties/launchSettings.json index af9bed4..510119c 100644 --- a/samples/BackgroundQueueApp/Properties/launchSettings.json +++ b/samples/BackgroundQueueApp/Properties/launchSettings.json @@ -9,7 +9,6 @@ "applicationUrl": "https://localhost:7003;http://localhost:5103", "environmentVariables": { "ASPNETCORE_ENVIRONMENT": "Development", - "AWS_PROFILE": "kw-test" } } } diff --git a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj index 623a387..79aacc5 100644 --- a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj +++ b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj @@ -2,21 +2,27 @@ net8 - enable - enable - + + + + + + + + + + + + - - - diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index a00ba14..beef3e6 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -4,62 +4,77 @@ using LocalPost.KafkaConsumer; using LocalPost.KafkaConsumer.DependencyInjection; -var host = Host.CreateDefaultBuilder(args) +await Host.CreateDefaultBuilder(args) .ConfigureServices((context, services) => { - services.AddOptions() - .Bind(context.Configuration.GetSection(KafkaOptions.ConfigSection)) - .ValidateDataAnnotations(); + services.AddScoped(); - services.AddScoped(); - services.AddKafkaConsumer("orders", - builder => - { - builder.SetHandler(); - }, - builder => - { - builder.SetValueDeserializer(new StringDeserializer()); - }).Configure((options, kafkaOptions) => - { - options.Kafka.GroupId = ""; - options.Kafka.AutoOffsetReset = AutoOffsetReset.Earliest; - options.Kafka.EnableAutoCommit = false; // TODO DryRun + services.AddKafkaConsumers(kafka => + { + // kafka.Defaults.Configure(options => + // { + // options.BootstrapServers = "localhost:9092"; + // options.SecurityProtocol = SecurityProtocol.SaslSsl; + // options.SaslMechanism = SaslMechanism.Plain; + // options.SaslUsername = "admin"; + // options.SaslPassword = ""; + // }); + kafka.Defaults + .Bind(context.Configuration.GetSection("Kafka")) + .ValidateDataAnnotations(); + kafka.AddConsumer("weather-forecasts", HandlerStack.From() + .UseKafkaPayload() + .DeserializeJson() + .Acknowledge() + .Scoped() + .Trace() + ) + .Bind(context.Configuration.GetSection("Kafka:Consumer")) + .Configure(options => + { + options.AutoOffsetReset = AutoOffsetReset.Earliest; + options.EnableAutoCommit = false; + }) + .ValidateDataAnnotations(); + }); - options.Kafka.BootstrapServers = "localhost:9092"; - options.Kafka.SecurityProtocol = SecurityProtocol.SaslSsl; - options.Kafka.SaslMechanism = SaslMechanism.Plain; - options.Kafka.SaslUsername = "admin"; - options.Kafka.SaslPassword = ""; - }); - // Only one consumer per name (topic) is allowed - services.AddBatchKafkaConsumer("orders", - builder => - { - }, - builder => - { - }); - }) - .Build(); -host.Run(); + // services.AddKafkaConsumer("orders", + // builder => { builder.SetHandler(); }, + // builder => { builder.SetValueDeserializer(new StringDeserializer()); }).Configure( + // (options, kafkaOptions) => + // { + // options.Kafka.GroupId = ""; + // options.Kafka.AutoOffsetReset = AutoOffsetReset.Earliest; + // options.Kafka.EnableAutoCommit = false; // TODO DryRun + // + // options.Kafka.BootstrapServers = "localhost:9092"; + // options.Kafka.SecurityProtocol = SecurityProtocol.SaslSsl; + // options.Kafka.SaslMechanism = SaslMechanism.Plain; + // options.Kafka.SaslUsername = "admin"; + // options.Kafka.SaslPassword = ""; + // }); -public sealed record KafkaOptions -{ - public const string ConfigSection = "Kafka"; - [Required] - public string BootstrapServers { get; init; } = null!; + // Only one consumer per name (topic) is allowed?.. + // services.AddBatchKafkaConsumer("orders", + // builder => + // { + // }, + // builder => + // { + // }); + }) + .Build() + .RunAsync(); - public Dictionary Consumers { get; init; } = new(); -} +public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); -internal class KafkaTopicHandler : IHandler> +internal sealed class MessageHandler : IHandler { - public async Task InvokeAsync(ConsumeContext payload, CancellationToken ct) + public async ValueTask InvokeAsync(WeatherForecast payload, CancellationToken ct) { await Task.Delay(1_000, ct); - Console.WriteLine(payload.Payload); + Console.WriteLine(payload); } } diff --git a/samples/KafkaConsumerApp/appsettings.json b/samples/KafkaConsumerApp/appsettings.json index e4eb4e5..dedf07b 100644 --- a/samples/KafkaConsumerApp/appsettings.json +++ b/samples/KafkaConsumerApp/appsettings.json @@ -6,11 +6,10 @@ } }, "Kafka": { - "DryRun": true, "BootstrapServers": "localhost:9092", - "orders": { - "Topic": "orders", - "GroupId": "orders-group" + "Consumer": { + "Topic": "weather-forecasts", + "GroupId": "example-cs-group" } } } diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index 9b8bf4f..c7aa95b 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -2,25 +2,54 @@ using LocalPost; using LocalPost.SqsConsumer; using LocalPost.SqsConsumer.DependencyInjection; +using Serilog; +using Serilog.Sinks.FingersCrossed; -var host = Host.CreateDefaultBuilder(args) - .ConfigureServices(services => - { - services.AddAWSService(); - services.AddScoped(); - services.AddAmazonSqsConsumer("test"); - }) - .Build(); - -host.Run(); +await Host.CreateDefaultBuilder(args) + .UseSerilog() + .ConfigureServices((context, services) => services + .AddDefaultAWSOptions(context.Configuration.GetAWSOptions()) + .AddAWSService()) + .ConfigureServices(services => services + .AddScoped() + .AddSqsConsumers(sqs => + { + sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + sqs.AddConsumer("weather-forecasts", + HandlerStack.From() + .UseSqsPayload() + .DeserializeJson() + .Acknowledge() + .Scoped() + .Touch(next => async (context, ct) => + { + using var logBuffer = LogBuffer.BeginScope(); + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; // Not a real error + } + catch (Exception) + { + logBuffer.Flush(); + throw; + } + }) + .Trace()); + })) + .Build() + .RunAsync(); +public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); -// FIXME System.Text.Json deserializer... -internal class SqsHandler : IHandler +public class MessageHandler : IHandler { - public async Task InvokeAsync(ConsumeContext payload, CancellationToken ct) + public async ValueTask InvokeAsync(WeatherForecast payload, CancellationToken ct) { await Task.Delay(1_000, ct); - Console.WriteLine(payload.Message.Body); + Console.WriteLine(payload); } } diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/samples/SqsConsumerApp/SqsConsumerApp.csproj index d91229f..a076bff 100644 --- a/samples/SqsConsumerApp/SqsConsumerApp.csproj +++ b/samples/SqsConsumerApp/SqsConsumerApp.csproj @@ -2,15 +2,22 @@ net8 - enable - enable - + + + + + + + + + + diff --git a/samples/SqsConsumerApp/appsettings.Development.json b/samples/SqsConsumerApp/appsettings.Development.json index b2dcdb6..7758c3f 100644 --- a/samples/SqsConsumerApp/appsettings.Development.json +++ b/samples/SqsConsumerApp/appsettings.Development.json @@ -1,8 +1,5 @@ { - "Logging": { - "LogLevel": { - "Default": "Information", - "Microsoft.Hosting.Lifetime": "Information" - } + "AWS": { + "ServiceURL": "http://localhost:8000" } } diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index badafa8..3ee9d59 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -1,4 +1,3 @@ -using System.Diagnostics.CodeAnalysis; using Confluent.Kafka; using JetBrains.Annotations; using LocalPost.AsyncEnumerable; @@ -8,8 +7,8 @@ namespace LocalPost.KafkaConsumer; internal static class ConsumeContext { public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( - BatchSize batchMaxSizeSize, TimeSpan timeWindow) => ct => - new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); + MaxSize batchMaxSize, TimeSpan timeWindow) => ct => + new BatchConsumeContext.Builder(batchMaxSize, timeWindow, ct); } [PublicAPI] @@ -29,35 +28,46 @@ internal ConsumeContext(KafkaTopicClient client, TopicPartitionOffset offset, Me Payload = payload; } + public void Deconstruct(out T payload, out IReadOnlyList headers) + { + payload = Payload; + headers = Headers; + } + public string Topic => Client.Topic; public IReadOnlyList Headers => Message.Headers.BackingList; public ConsumeContext Transform(TOut payload) => new(Client, Offset, Message, payload); - public static implicit operator T(ConsumeContext context) => context.Payload; + public ConsumeContext Transform(Func, TOut> transform) => Transform(transform(this)); - public void Deconstruct(out T payload, out IReadOnlyList headers) - { - payload = Payload; - headers = Headers; - } + public async Task> Transform(Func, Task> transform) => + Transform(await transform(this)); + + public static implicit operator T(ConsumeContext context) => context.Payload; } [PublicAPI] public readonly record struct BatchConsumeContext { - internal sealed class Builder : - BoundedBatchBuilderBase, BatchConsumeContext> + internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) + : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindow, ct) { - public Builder(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : - base(batchMaxSizeSize, timeWindow, ct) + public override BatchConsumeContext Build() { +// #if NET6_0_OR_GREATER +// ReadOnlySpan s = CollectionsMarshal.AsSpan(Batch) +// var ia = s.ToImmutableArray(); +// return new BatchConsumeContext(Batch); +// #else +// return new BatchConsumeContext(Batch.ToImmutableArray()); +// #endif + return new BatchConsumeContext(Batch); } - - public override BatchConsumeContext Build() => new(Batch); } + // TODO ImmutableArray public readonly IReadOnlyList> Messages; internal BatchConsumeContext(IReadOnlyList> messages) @@ -73,6 +83,22 @@ internal BatchConsumeContext(IReadOnlyList> messages) public BatchConsumeContext Transform(IEnumerable> payload) => Transform(payload.ToArray()); + public BatchConsumeContext Transform(IEnumerable batchPayload) => + Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); + + public BatchConsumeContext Transform(Func, TOut> transform) + { + // TODO Parallel LINQ + var messages = Messages.Select(transform); + return Transform(messages); + } + + public async Task> Transform(Func, Task> transform) + { + var messages = await Task.WhenAll(Messages.Select(transform)); + return Transform(messages); + } + internal KafkaTopicClient Client => Messages[^1].Client; // Use .MaxBy() to not rely on the order?.. diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs similarity index 74% rename from src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs rename to src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs index 07af069..03e1f0c 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs @@ -6,16 +6,19 @@ namespace LocalPost.KafkaConsumer.DependencyInjection; [PublicAPI] -public static class ServiceHealthCheckRegistration +public static class HealthChecksBuilderEx { + // TODO AddKafkaConsumersLivenessCheck() — simply for all the registered consumers + // Check if the same check is added twice?.. + public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddConsumerGroupLivenessCheck>(); + .AddNamedConsumerLivenessCheck>(name); public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddConsumerGroupLivenessCheck>(); + .AddNamedConsumerLivenessCheck>(name); } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 82c118b..7893096 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -7,20 +7,20 @@ namespace LocalPost.KafkaConsumer.DependencyInjection; [PublicAPI] -public sealed class KafkaBuilder : OptionsBuilder +public sealed class KafkaBuilder(IServiceCollection services) { - public KafkaBuilder(IServiceCollection services, string? name) : base(services, name) - { - } + // public IServiceCollection Services { get; } + + public OptionsBuilder Defaults { get; } = services.AddOptions(); // public static OptionsBuilder AddKafkaConsumer(this IServiceCollection Services, string name, // Action>> configure, // Action> configureClient) => -// Services.AddKafkaConsumer(name, configure, configureClient); +// services.AddKafkaConsumer(name, configure, configureClient); // // JSON serializer is the default... But for Kafka it can be different?.. // public static OptionsBuilder AddKafkaConsumer(this IServiceCollection Services, -// string name) where THandler : IHandler> => Services.AddKafkaConsumer(name, provider => +// string name) where THandler : IHandler> => services.AddKafkaConsumer(name, provider => // { // // Keep .Scoped() as far as possible, as from that point all the middlewares will be resolved per request, not // // just once @@ -37,41 +37,36 @@ public OptionsBuilder AddConsumer(string name, HandlerFactory(); - - if (!Services.TryAddKafkaClient(name)) + if (!services.TryAddKafkaClient(name)) throw new ArgumentException("Kafka consumer is already registered", nameof(name)); - Services.TryAddNamedSingleton(name, provider => + services.TryAddNamedSingleton(name, provider => new MessageSource(provider.GetRequiredService(name))); - Services.AddBackgroundServiceForNamed(name); + services.AddBackgroundServiceForNamed(name); - Services.TryAddConsumerGroup, MessageSource>(name, configure); - - return Services.AddOptions(name).Configure>((options, commonConfigs) => + services.TryAddBackgroundConsumer, MessageSource>(name, configure, provider => { - var commonConfig = commonConfigs.Get(Name); + var options = provider.GetOptions(name); + return new ConsumerOptions(1, options.BreakOnException); + }); + return services.AddOptions(name).Configure>((options, commonConfig) => + { + options.EnrichFrom(commonConfig.Value); options.Topic = name; - options.Kafka = new ConsumerConfig(commonConfig) - { - EnableAutoOffsetStore = false, // We will store offsets manually, see ConsumeContext class - }; }); } public OptionsBuilder AddBatchConsumer(string name, HandlerFactory> configure) { - if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... + if (string.IsNullOrEmpty(name)) throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - // Services.AddSingleton(); - - if (!Services.TryAddKafkaClient(name)) - throw new ArgumentException("Kafka consumer is already registered", nameof(name)); + if (!services.TryAddKafkaClient(name)) + throw new InvalidOperationException("Kafka consumer is already registered"); - Services.TryAddNamedSingleton(name, provider => + services.TryAddNamedSingleton(name, provider => { var options = provider.GetOptions(name); @@ -79,21 +74,18 @@ public OptionsBuilder AddBatchConsumer(string name, ConsumeContext.BatchBuilder( options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); }); - Services.AddBackgroundServiceForNamed(name); - - Services.TryAddConsumerGroup, BatchMessageSource>(name, configure); -// Services.TryAddConsumerGroup(name, provider => BackgroundQueue.ConsumerGroupFor( -// provider.GetRequiredService(name), configure(provider), 1)); + services.AddBackgroundServiceForNamed(name); - return Services.AddOptions(name).Configure>((options, commonConfigs) => + services.TryAddBackgroundConsumer, BatchMessageSource>(name, configure, provider => { - var commonConfig = commonConfigs.Get(Name); + var options = provider.GetOptions(name); + return new ConsumerOptions(1, options.BreakOnException); + }); + return services.AddOptions(name).Configure>((options, commonConfig) => + { + options.EnrichFrom(commonConfig.Value); options.Topic = name; - options.Kafka = new ConsumerConfig(commonConfig) - { - EnableAutoOffsetStore = false, // We will store offsets manually, see ConsumeContext class - }; }); } } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs new file mode 100644 index 0000000..2bdc981 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -0,0 +1,26 @@ +using JetBrains.Annotations; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; + +namespace LocalPost.KafkaConsumer.DependencyInjection; + +[PublicAPI] +public static class ServiceCollectionEx +{ + public static IServiceCollection AddKafkaConsumers(this IServiceCollection services, Action configure) + { + configure(new KafkaBuilder(services)); + + return services; + } + + internal static bool TryAddKafkaClient(this IServiceCollection services, string name) + where TOptions : Options => services.TryAddNamedSingleton(name, provider => + { + var options = provider.GetOptions(name); + + return new KafkaTopicClient(provider.GetRequiredService>(), + options, options.Topic, name); + }); +} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs deleted file mode 100644 index 78db23a..0000000 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceRegistration.cs +++ /dev/null @@ -1,29 +0,0 @@ -using JetBrains.Annotations; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; - -namespace LocalPost.KafkaConsumer.DependencyInjection; - -[PublicAPI] -public static class ServiceRegistration -{ - public static KafkaBuilder AddKafka(this IServiceCollection services, string? name = null) => - new(services, name); - - internal static bool TryAddKafkaClient(this IServiceCollection services, string name) - where TOptions : Options => - services.TryAddNamedSingleton(name, provider => - { - var options = provider.GetOptions(name); - - return new KafkaTopicClient(provider.GetRequiredService>(), - options.Kafka, options.Topic, name); - }); - - // Wrap it into an internal class, to avoid collision with other libraries?.. -// public static OptionsBuilder ConfigureKafkaConsumerDefaults(this IServiceCollection Services, -// Action configure) => -// // FIXME EnableAutoOffsetStore -// Services.AddOptions().Configure(configure); -} diff --git a/src/LocalPost.KafkaConsumer/HandlerStack.cs b/src/LocalPost.KafkaConsumer/HandlerStack.cs deleted file mode 100644 index ae66e0b..0000000 --- a/src/LocalPost.KafkaConsumer/HandlerStack.cs +++ /dev/null @@ -1,76 +0,0 @@ -using Confluent.Kafka; -using JetBrains.Annotations; - -namespace LocalPost.KafkaConsumer; - -[PublicAPI] -public static class KafkaHandlerStack -{ - public static HandlerFactory> Trace( - this HandlerFactory> handlerStack) => - handlerStack.Map, ConsumeContext>(next => - async (context, ct) => - { - using var activity = KafkaActivitySource.StartProcessing(context); - try - { - await next(context, ct); - activity?.Success(); - } - catch (Exception ex) - { - activity?.Error(ex); - } - }); - - public static HandlerFactory> Trace( - this HandlerFactory> handlerStack) => - handlerStack.Map, BatchConsumeContext>(next => - async (context, ct) => - { - using var activity = KafkaActivitySource.StartProcessing(context); - try - { - await next(context, ct); - activity?.Success(); - } - catch (Exception ex) - { - activity?.Error(ex); - } - }); - - public static HandlerFactory> Acknowledge( - this HandlerFactory> handlerStack) => - handlerStack.Map, ConsumeContext>(next => - async (context, ct) => - { - await next(context, ct); - context.Client.StoreOffset(context.Offset); - }); - - public static HandlerFactory> Acknowledge( - this HandlerFactory> handlerStack) => - handlerStack.Map, BatchConsumeContext>(next => - async (context, ct) => - { - await next(context, ct); - context.Client.StoreOffset(context.LatestOffset); - }); - - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, IAsyncDeserializer deserializer) - { - var middleware = new DeserializationMiddleware { Deserializer = deserializer }; - - return handlerStack.Map(middleware.Invoke); - } - - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, IAsyncDeserializer deserializer) - { - var middleware = new DeserializationMiddleware { Deserializer = deserializer }; - - return handlerStack.Map(middleware.Invoke); - } -} diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs new file mode 100644 index 0000000..5857d4f --- /dev/null +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -0,0 +1,124 @@ +using System.Collections.Immutable; +using System.Text.Json; +using Confluent.Kafka; +using JetBrains.Annotations; + +namespace LocalPost.KafkaConsumer; + +[PublicAPI] +public static class HandlerStackEx +{ + public static HandlerFactory> UseKafkaPayload(this HandlerFactory hf) => + hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); + + public static HandlerFactory> UseKafkaPayload(this HandlerFactory> hf) => + hf.Map, IReadOnlyList>(next => + async (context, ct) => await next(context.Messages.Select(m => m.Payload).ToImmutableList(), ct)); + + public static HandlerFactory> Trace(this HandlerFactory> hf) => + hf.Map, ConsumeContext>(next => + async (context, ct) => + { + using var activity = KafkaActivitySource.StartProcessing(context); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); + + public static HandlerFactory> Trace(this HandlerFactory> hf) => + hf.Map, BatchConsumeContext>(next => + async (context, ct) => + { + using var activity = KafkaActivitySource.StartProcessing(context); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); + + public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => + hf.Map, ConsumeContext>(next => + async (context, ct) => + { + await next(context, ct); + context.Client.StoreOffset(context.Offset); + }); + + public static HandlerFactory> Acknowledge( + this HandlerFactory> hf) => + hf.Map, BatchConsumeContext>(next => + async (context, ct) => + { + await next(context, ct); + context.Client.StoreOffset(context.LatestOffset); + }); + + #region Deserialize() + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, Func, T> deserialize) => + hf.Map, ConsumeContext>(next => + async (context, ct) => await next(context.Transform(deserialize), ct)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, Func, T> deserialize) => + hf.Map, BatchConsumeContext>(next => + async (context, ct) => await next(context.Transform(deserialize), ct)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, Func, Task> deserialize) => + hf.Map, ConsumeContext>(next => + async (context, ct) => await next(await context.Transform(deserialize), ct)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, Func, Task> deserialize) => + hf.Map, BatchConsumeContext>(next => + async (context, ct) => await next(await context.Transform(deserialize), ct)); + + private static Func, Task> AsyncDeserializer(IAsyncDeserializer deserializer) => + context => deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( + MessageComponentType.Value, context.Topic, context.Message.Headers)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, IAsyncDeserializer deserializer) => + hf.Deserialize(AsyncDeserializer(deserializer)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, IAsyncDeserializer deserializer) => + hf.Deserialize(AsyncDeserializer(deserializer)); + + private static Func, T> Deserializer(IDeserializer deserializer) => + context => deserializer.Deserialize(context.Payload, false, new SerializationContext( + MessageComponentType.Value, context.Topic, context.Message.Headers)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, IDeserializer deserializer) => + hf.Deserialize(Deserializer(deserializer)); + + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, IDeserializer deserializer) => + hf.Deserialize(Deserializer(deserializer)); + + #endregion + + public static HandlerFactory> DeserializeJson( + this HandlerFactory> hf, JsonSerializerOptions? options = null) => + hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); + + public static HandlerFactory> DeserializeJson( + this HandlerFactory> hf, JsonSerializerOptions? options = null) => + hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); +} diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index 83ff635..ea01d12 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -1,7 +1,7 @@ - netstandard2.0 + netstandard2.0;net6.0;net8.0 true false @@ -41,10 +41,14 @@ - - - + + + + + + + diff --git a/src/LocalPost.KafkaConsumer/Middlewares.cs b/src/LocalPost.KafkaConsumer/Middlewares.cs deleted file mode 100644 index a973b5b..0000000 --- a/src/LocalPost.KafkaConsumer/Middlewares.cs +++ /dev/null @@ -1,61 +0,0 @@ -using System.Collections.Immutable; -using Confluent.Kafka; -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost.KafkaConsumer; - -// internal static class Middlewares -// { -// public static HandlerMiddleware, ConsumeContext> Acknowledge(IServiceProvider provider) => -// provider.GetRequiredService().Invoke; -// -// public static HandlerMiddleware, BatchConsumeContext> AcknowledgeBatch( -// IServiceProvider provider) => provider.GetRequiredService().Invoke; -// } -// -// internal sealed class AcknowledgeMiddleware -// { -// private readonly ImmutableDictionary _clients; -// -// public AcknowledgeMiddleware(IEnumerable clients) -// { -// _clients = clients.ToImmutableDictionary(client => client.Name, client => client); -// } -// -// public Handler> Invoke(Handler> next) => async (context, ct) => -// { -// await next(context, ct); -// _clients[context.ClientName].StoreOffset(context.Message); -// }; -// -// public Handler> Invoke(Handler> next) => async (context, ct) => -// { -// await next(context, ct); -// _clients[context.ConsumerName].StoreOffset(context.Latest); -// }; -// } - -internal sealed class DeserializationMiddleware : - IHandlerMiddleware, BatchConsumeContext>, - IHandlerMiddleware, ConsumeContext> -{ - public required IAsyncDeserializer Deserializer { get; init; } - - public Handler> Invoke(Handler> next) => - async (context, ct) => await next(await Deserialize(context), ct); - - public Handler> Invoke(Handler> next) => - async (context, ct) => - { - var messages = await Task.WhenAll(context.Messages.Select(Deserialize)); - await next(context.Transform(messages), ct); - }; - - private async Task> Deserialize(ConsumeContext context) - { - var payload = await Deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( - MessageComponentType.Value, context.Topic, context.Message.Headers)); - - return context.Transform(payload); - } -} diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 8d8b3b7..53521c0 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -5,35 +5,32 @@ namespace LocalPost.KafkaConsumer; [PublicAPI] -public record Options +public class Options : ConsumerConfig { - /// - /// Group ID, auth and other options should be set directly. - /// - public ConsumerConfig Kafka { get; set; } = new() + public Options() { - EnableAutoOffsetStore = false, // We will store offsets manually, see ConsumeContext class - }; + EnableAutoOffsetStore = false; // We will store offsets manually, see Acknowledge middleware + } [Required] public string Topic { get; set; } = null!; - // TODO Implement (via ApplicationLifecycle) -// public bool ShutdownAppOnFatalError { get; set; } = true; + /// + /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. + /// Default is true. + /// + public bool BreakOnException { get; set; } = true; - // Implement later?.. -// /// -// /// How many (parallel) consumers to spawn. -// /// -// [Range(1, 10)] -// public byte Instances { get; set; } = 1; + internal void EnrichFrom(Config config) + { + foreach (var kv in config) + Set(kv.Key, kv.Value); + } } [PublicAPI] -public record BatchedOptions : Options +public class BatchedOptions : Options { - [Range(1, ushort.MaxValue)] - public ushort BatchMaxSize { get; set; } = 100; + [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 100; - [Range(1, ushort.MaxValue)] - public int BatchTimeWindowMilliseconds { get; set; } = 1_000; + [Range(1, ushort.MaxValue)] public int BatchTimeWindowMilliseconds { get; set; } = 1_000; } diff --git a/src/LocalPost.KafkaConsumer/README.md b/src/LocalPost.KafkaConsumer/README.md index 9d4a213..84e199c 100644 --- a/src/LocalPost.KafkaConsumer/README.md +++ b/src/LocalPost.KafkaConsumer/README.md @@ -8,16 +8,18 @@ internal queue, so `Consume()` calls will return faster. Because of this behavior, there is no need to maintain our own in memory queue (channel). +## Concurrent processing +A Kafka consumer is designed to handle messages _from one partition_ sequentially, as it commits the offset of the last +processed message. +One of the common ways to speed up things (increase throughput) is to have multiple partitions for a topic and multiple +parallel consumers. +Another way is to batch process messages. +## Message key ignorance - - - -## Immutability (actually lack of it) - -Because Kafka messages are meant to be processed sequentially (parallelism is achieved by having multiple -partitions / consumers), `ConsumeContext`/`BatchConsumeContext` objects are not immutable and are reused for each -handler's call. +Kafka's message key is used for almost one and only one purpose: to determine the partition for the message, when +publishing. And in almost all the cases this information is also available (serialized) in the message itself +(message value in Kafka terms). That's why we are ignoring the message key in this consumer. diff --git a/src/LocalPost.Polly/HandlerStack.cs b/src/LocalPost.Polly/HandlerStackEx.cs similarity index 53% rename from src/LocalPost.Polly/HandlerStack.cs rename to src/LocalPost.Polly/HandlerStackEx.cs index 7ad9517..9c5fa91 100644 --- a/src/LocalPost.Polly/HandlerStack.cs +++ b/src/LocalPost.Polly/HandlerStackEx.cs @@ -4,21 +4,21 @@ namespace LocalPost.Polly; [PublicAPI] -public static class PollyHandlerStack +public static class HandlerStackEx { - public static HandlerFactory UsePollyPipeline(this HandlerFactory handlerStack, - ResiliencePipeline pipeline) => - handlerStack.Map(next => async (context, ct) => + public static HandlerFactory UsePollyPipeline(this HandlerFactory hf, + ResiliencePipeline pipeline) => hf.Touch(next => + async (context, ct) => { - await pipeline.ExecuteAsync(ct => next(context, ct), ct); + await pipeline.ExecuteAsync(execCt => next(context, execCt), ct); }); - public static HandlerFactory UsePollyPipeline(this HandlerFactory handlerStack, + public static HandlerFactory UsePollyPipeline(this HandlerFactory hf, Action configure) { var builder = new ResiliencePipelineBuilder(); configure(builder); - return handlerStack.UsePollyPipeline(builder.Build()); + return hf.UsePollyPipeline(builder.Build()); } } diff --git a/src/LocalPost.Polly/LocalPost.Polly.csproj b/src/LocalPost.Polly/LocalPost.Polly.csproj index 12ae501..96e36f6 100644 --- a/src/LocalPost.Polly/LocalPost.Polly.csproj +++ b/src/LocalPost.Polly/LocalPost.Polly.csproj @@ -1,7 +1,7 @@ - netstandard2.0 + netstandard2.0;net6.0;net8.0 true false @@ -41,9 +41,11 @@ - - + + + + diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index 471c3b5..1ab7534 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -8,7 +8,7 @@ namespace LocalPost.SqsConsumer; internal static class ConsumeContext { public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( - BatchSize batchMaxSizeSize, TimeSpan timeWindow) => ct => + MaxSize batchMaxSizeSize, TimeSpan timeWindow) => ct => new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); } @@ -62,20 +62,20 @@ public async Task> Transform(Func, [PublicAPI] public readonly record struct BatchConsumeContext { - internal sealed class Builder : BoundedBatchBuilderBase, BatchConsumeContext> + internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) + : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindow, ct) { - public Builder(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : - base(batchMaxSizeSize, timeWindow, ct) - { - } - public override BatchConsumeContext Build() => new(Batch); } + // TODO ImmutableArray public readonly IReadOnlyList> Messages; internal BatchConsumeContext(IReadOnlyList> messages) { + if (messages.Count == 0) + throw new ArgumentException("Batch must contain at least one message", nameof(messages)); + Messages = messages; } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs similarity index 55% rename from src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs rename to src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs index 7b88cd7..dfd824e 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceHealthCheckRegistration.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs @@ -4,16 +4,19 @@ namespace LocalPost.SqsConsumer.DependencyInjection; -public static class ServiceHealthCheckRegistration +public static class HealthChecksBuilderEx { + // TODO AddSqsConsumersLivenessCheck() — simply for all the registered consumers + // Check if the same check is added twice?.. - public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, + + public static IHealthChecksBuilder AddSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddConsumerGroupLivenessCheck>(); + .AddNamedConsumerLivenessCheck>(name); - public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, + public static IHealthChecksBuilder AddSqsBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddConsumerGroupLivenessCheck>(); + .AddNamedConsumerLivenessCheck>(name); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs new file mode 100644 index 0000000..bf9804c --- /dev/null +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -0,0 +1,21 @@ +using JetBrains.Annotations; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost.SqsConsumer.DependencyInjection; + +[PublicAPI] +public static class ServiceCollectionEx +{ + public static IServiceCollection AddSqsConsumers(this IServiceCollection services, Action configure) + { + configure(new SqsBuilder(services)); + + return services; + } + + internal static bool TryAddQueueClient(this IServiceCollection services, string name) + where TOptions : Options => + services.TryAddNamedSingleton(name, provider => + ActivatorUtilities.CreateInstance(provider, name)); +} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs deleted file mode 100644 index 4d7eed1..0000000 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceRegistration.cs +++ /dev/null @@ -1,111 +0,0 @@ -using JetBrains.Annotations; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Hosting; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; - -namespace LocalPost.SqsConsumer.DependencyInjection; - -[PublicAPI] -public static class ServiceRegistration -{ - public static SqsBuilder AddSqsConsumers(this IServiceCollection services) => - new(services, null); - - internal static bool TryAddQueueClient(this IServiceCollection services, string name) - where TOptions : Options => - services.TryAddNamedSingleton(name, provider => - ActivatorUtilities.CreateInstance(provider, name)); -} - - -// TODO Remove -// [PublicAPI] -// public static class ServiceRegistration -// { -// // TODO Implement -// // public static OptionsBuilder AddAmazonSqsJsonConsumer(this IServiceCollection services, -// // string name, Action? configure = null) where THandler : IHandler => -// // services.AddAmazonSqsConsumer(name, builder => -// // { -// // builder.MiddlewareStackBuilder.SetHandler(); -// // configure?.Invoke(builder); -// // }); -// -// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, -// string name, Action>? configure = null) -// where THandler : IHandler => -// services.AddAmazonSqsConsumer(name, builder => -// { -// builder.SetHandler(); -// configure?.Invoke(builder); -// }); -// -// // public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, -// // string name, Handler handler, Action? configure = null) => -// // services.AddAmazonSqsConsumer(name, builder => -// // { -// // builder.MiddlewareStackBuilder.SetHandler(handler); -// // configure?.Invoke(builder); -// // }); -// -// // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// // string name, Func handler) where TDep1 : notnull => -// // services.AddAmazonSqsConsumer(name, provider => (context, ct) => -// // { -// // var dep1 = provider.GetRequiredService(); -// // -// // return handler(dep1, context, ct); -// // }); -// // -// // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// // string name, Func handler) -// // where TDep1 : notnull -// // where TDep2 : notnull => -// // services.AddAmazonSqsConsumer(name, provider => (context, ct) => -// // { -// // var dep1 = provider.GetRequiredService(); -// // var dep2 = provider.GetRequiredService(); -// // -// // return handler(dep1, dep2, context, ct); -// // }); -// // -// // public static OptionsBuilder AddAmazonSqsMinimalConsumer(this IServiceCollection services, -// // string name, Func handler) -// // where TDep1 : notnull -// // where TDep2 : notnull -// // where TDep3 : notnull => -// // services.AddAmazonSqsConsumer(name, provider => (context, ct) => -// // { -// // var dep1 = provider.GetRequiredService(); -// // var dep2 = provider.GetRequiredService(); -// // var dep3 = provider.GetRequiredService(); -// // -// // return handler(dep1, dep2, dep3, context, ct); -// // }); -// -// // public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, -// // string name) where THandler : IMessageHandler => -// // services -// // .AddAmazonSqsConsumer(name, provider => provider.GetRequiredService().Process); -// -// public static OptionsBuilder AddAmazonSqsConsumer(this IServiceCollection services, -// string name, Action> configure) -// { -// services.AddConcurrentHostedServices(); -// -// services.TryAddSingleton(); -// -// if (services.TryAddNamedSingleton(name, provider => SqsConsumerService.Create(provider, name, configure))) -// throw new InvalidOperationException($"SQS consumer is already registered: {name}"); -// -// services.AddSingleton(provider => -// provider.GetRequiredService(name).Reader); -// services.AddSingleton(provider => -// provider.GetRequiredService(name).ConsumerGroup); -// -// return services.AddOptions(name).Configure(options => options.QueueName = name); -// } -// } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index 23fb64a..a77f764 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -6,49 +6,48 @@ namespace LocalPost.SqsConsumer.DependencyInjection; [PublicAPI] -public sealed class SqsBuilder : OptionsBuilder +public sealed class SqsBuilder(IServiceCollection services) { - internal SqsBuilder(IServiceCollection services, string? name) : base(services, name) - { - } + // public IServiceCollection Services { get; } + + public OptionsBuilder Defaults { get; } = services.AddOptions(); - public OptionsBuilder AddConsumer(string name, HandlerFactory> configure) + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) { - if (string.IsNullOrEmpty(name)) // TODO Just default empty name... + if (string.IsNullOrEmpty(name)) throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - // Services.AddSingleton(); + if (!services.TryAddQueueClient(name)) + // return ob; // Already added, don't register twice + throw new InvalidOperationException("SQS consumer is already registered"); - if (!Services.TryAddQueueClient(name)) - throw new ArgumentException("SQS consumer is already registered", nameof(name)); - - Services.TryAddNamedSingleton(name, provider => + services.TryAddNamedSingleton(name, provider => new MessageSource(provider.GetRequiredService(name))); - Services.AddBackgroundServiceForNamed(name); - - Services.TryAddConsumerGroup, MessageSource>(name, configure); + services.AddBackgroundServiceForNamed(name); - return Services.AddOptions(name).Configure>((options, commonConfigs) => + services.TryAddBackgroundConsumer, MessageSource>(name, hf, provider => { - var commonConfig = commonConfigs.Get(Name); + var options = provider.GetOptions(name); + return new ConsumerOptions(options.MaxConcurrency, options.BreakOnException); + }); - options.UpdateFrom(commonConfig); + return services.AddOptions(name).Configure>((options, commonConfig) => + { + options.UpdateFrom(commonConfig.Value); options.QueueName = name; }); } - public OptionsBuilder AddBatchConsumer(string name, - HandlerFactory> configure) + public OptionsBuilder AddBatchConsumer(string name, HandlerFactory> hf) { - if (string.IsNullOrEmpty(name)) // TODO Just default empty name... + if (string.IsNullOrEmpty(name)) throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - // Services.AddSingleton(); - - if (!Services.TryAddQueueClient(name)) - throw new ArgumentException("SQS consumer is already registered", nameof(name)); + if (!services.TryAddQueueClient(name)) + // return ob; // Already added, don't register twice + throw new InvalidOperationException("SQS consumer is already registered"); - Services.TryAddNamedSingleton(name, provider => + services.TryAddNamedSingleton(name, provider => { var options = provider.GetOptions(name); @@ -56,17 +55,19 @@ public OptionsBuilder AddBatchConsumer(string name, ConsumeContext.BatchBuilder( options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); }); - Services.AddBackgroundServiceForNamed(name); - - Services.TryAddConsumerGroup, BatchMessageSource>(name, configure); -// Services.TryAddConsumerGroup(name, provider => BackgroundQueue.ConsumerGroupFor( -// provider.GetRequiredService(name), configure(provider), 1)); + services.AddBackgroundServiceForNamed(name); - return Services.AddOptions(name).Configure>((options, commonConfigs) => + // services.TryAddConsumerGroup, BatchMessageSource>(name, hf, + // ConsumerOptions.From(o => new ConsumerOptions(o.MaxConcurrency, o.BreakOnException))); + services.TryAddBackgroundConsumer, BatchMessageSource>(name, hf, provider => { - var commonConfig = commonConfigs.Get(Name); + var options = provider.GetOptions(name); + return new ConsumerOptions(options.MaxConcurrency, options.BreakOnException); + }); - options.UpdateFrom(commonConfig); + return services.AddOptions(name).Configure>((options, commonConfig) => + { + options.UpdateFrom(commonConfig.Value); options.QueueName = name; }); } diff --git a/src/LocalPost.SqsConsumer/HandlerStack.cs b/src/LocalPost.SqsConsumer/HandlerStackEx.cs similarity index 59% rename from src/LocalPost.SqsConsumer/HandlerStack.cs rename to src/LocalPost.SqsConsumer/HandlerStackEx.cs index ea35841..2293812 100644 --- a/src/LocalPost.SqsConsumer/HandlerStack.cs +++ b/src/LocalPost.SqsConsumer/HandlerStackEx.cs @@ -1,45 +1,53 @@ -using System.Diagnostics; -using Amazon.Runtime.Internal; +using System.Collections.Immutable; +using System.Text.Json; using JetBrains.Annotations; namespace LocalPost.SqsConsumer; [PublicAPI] -public static class SqsHandlerStack +public static class HandlerStackEx { + public static HandlerFactory> UseSqsPayload(this HandlerFactory hf) => + hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); + + public static HandlerFactory> UseSqsPayload(this HandlerFactory> hf) => + hf.Map, IReadOnlyList>(next => + async (context, ct) => await next(context.Messages.Select(m => m.Payload).ToImmutableList(), ct)); + public static HandlerFactory> Trace( this HandlerFactory> handlerStack) => - handlerStack.Map, ConsumeContext>(next => - async (context, ct) => + handlerStack.Map, ConsumeContext>(next => async (context, ct) => + { + using var activity = SqsActivitySource.StartProcessing(context); + try { - using var activity = SqsActivitySource.StartProcessing(context); - try - { - await next(context, ct); - activity?.Success(); - } - catch (Exception ex) - { - activity?.Error(ex); - } - }); + await next(context, ct); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); public static HandlerFactory> Trace( this HandlerFactory> handlerStack) => - handlerStack.Map, BatchConsumeContext>(next => - async (context, ct) => + handlerStack.Map, BatchConsumeContext>(next => async (context, ct) => + { + // TODO Link distributed transactions from each message + using var activity = SqsActivitySource.StartProcessing(context); + try { - using var activity = SqsActivitySource.StartProcessing(context); - try - { - await next(context, ct); - activity?.Success(); - } - catch (Exception ex) - { - activity?.Error(ex); - } - }); + await next(context, ct); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); public static HandlerFactory> Acknowledge( this HandlerFactory> handlerStack) => @@ -66,18 +74,26 @@ public static HandlerFactory> Deserialize( handlerStack.Map, ConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct)); - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, Func, Task> deserialize) => - handlerStack.Map, ConsumeContext>(next => - async (context, ct) => await next(await context.Transform(deserialize), ct)); - public static HandlerFactory> Deserialize( this HandlerFactory> handlerStack, Func, T> deserialize) => handlerStack.Map, BatchConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct)); + public static HandlerFactory> Deserialize( + this HandlerFactory> handlerStack, Func, Task> deserialize) => + handlerStack.Map, ConsumeContext>(next => + async (context, ct) => await next(await context.Transform(deserialize), ct)); + public static HandlerFactory> Deserialize( this HandlerFactory> handlerStack, Func, Task> deserialize) => handlerStack.Map, BatchConsumeContext>(next => async (context, ct) => await next(await context.Transform(deserialize), ct)); + + public static HandlerFactory> DeserializeJson( + this HandlerFactory> hf, JsonSerializerOptions? options = null) => + hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); + + public static HandlerFactory> DeserializeJson( + this HandlerFactory> hf, JsonSerializerOptions? options = null) => + hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index e509fa6..37c4c6e 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -1,7 +1,7 @@ - netstandard2.0 + netstandard2.0;net6.0;net8.0 true false @@ -42,9 +42,13 @@ - - + + + + + + diff --git a/src/LocalPost.SqsConsumer/MessageSource.cs b/src/LocalPost.SqsConsumer/MessageSource.cs index 820495f..bf9be7e 100644 --- a/src/LocalPost.SqsConsumer/MessageSource.cs +++ b/src/LocalPost.SqsConsumer/MessageSource.cs @@ -35,20 +35,13 @@ public IAsyncEnumerator> GetAsyncEnumerator(Cancella _source.GetAsyncEnumerator(ct); } -internal abstract class MessageSourceBase : IBackgroundService, INamedService +internal abstract class MessageSourceBase(QueueClient client) : IBackgroundService, INamedService { - private readonly QueueClient _client; - private bool _stopped; - protected MessageSourceBase(QueueClient client) - { - _client = client; - } - - public string Name => _client.Name; + public string Name => client.Name; - public async Task StartAsync(CancellationToken ct) => await _client.ConnectAsync(ct); + public async Task StartAsync(CancellationToken ct) => await client.ConnectAsync(ct); public abstract Task ExecuteAsync(CancellationToken ct); @@ -56,17 +49,15 @@ protected async IAsyncEnumerable> ConsumeAsync( [EnumeratorCancellation] CancellationToken ct = default) { while (!ct.IsCancellationRequested && !_stopped) - foreach (var message in await _client.PullMessagesAsync(ct)) - yield return message; + foreach (var message in await client.PullMessagesAsync(ct)) + yield return new ConsumeContext(client, message, message.Body); ct.ThrowIfCancellationRequested(); } - // Run on a separate thread, as Confluent Kafka API is blocking public Task StopAsync(CancellationToken ct) { _stopped = true; -// _client.Close(); return Task.CompletedTask; } diff --git a/src/LocalPost.SqsConsumer/Middlewares.cs b/src/LocalPost.SqsConsumer/Middlewares.cs new file mode 100644 index 0000000..4df5539 --- /dev/null +++ b/src/LocalPost.SqsConsumer/Middlewares.cs @@ -0,0 +1,35 @@ +using System.Collections.Immutable; +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost.SqsConsumer; + +// internal static class Middlewares +// { +// public static HandlerMiddleware, ConsumeContext> Acknowledge(IServiceProvider provider) => +// provider.GetRequiredService().Invoke; +// +// public static HandlerMiddleware, BatchConsumeContext> AcknowledgeBatch( +// IServiceProvider provider) => provider.GetRequiredService().Invoke; +// } +// +// internal sealed class AcknowledgeMiddleware +// { +// private readonly ImmutableDictionary _clients; +// +// public AcknowledgeMiddleware(IEnumerable clients) +// { +// _clients = clients.ToImmutableDictionary(client => client.Name, client => client); +// } +// +// public Handler> Invoke(Handler> next) => async (context, ct) => +// { +// await next(context, ct); +// await _clients[context.ClientName].DeleteMessageAsync(context); +// }; +// +// public Handler> Invoke(Handler> next) => async (context, ct) => +// { +// await next(context, ct); +// await _clients[context.ClientName].DeleteMessagesAsync(context); +// }; +// } diff --git a/src/LocalPost.SqsConsumer/Options.cs b/src/LocalPost.SqsConsumer/Options.cs index e702dbc..c266364 100644 --- a/src/LocalPost.SqsConsumer/Options.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -10,18 +10,24 @@ namespace LocalPost.SqsConsumer; public record EndpointOptions { // AWS SDK requires List... No way to make it readonly / immutable :( - internal static readonly List AllAttributes = new() { "All" }; - internal static readonly List AllMessageAttributes = new() { "All" }; + internal static readonly List AllAttributes = ["All"]; + internal static readonly List AllMessageAttributes = ["All"]; /// - /// How many messages to process in parallel. Default is 10. + /// How many messages to process concurrently. Default is 10. /// - [Required] public ushort MaxConcurrency { get; set; } = 10; + public ushort MaxConcurrency { get; set; } = 10; /// - /// How many messages to prefetch from the queue. Default is 10. + /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. + /// Default is true. /// - public byte Prefetch { get; set; } = 10; + public bool BreakOnException { get; set; } = true; + + /// + /// How many messages to prefetch from SQS. Default is 10. + /// + public byte Prefetch { get; set; } = 10; // FIXME Use /// /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index e32635b..3027c59 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -68,7 +68,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) } } - public async Task>> PullMessagesAsync(CancellationToken ct) + public async Task> PullMessagesAsync(CancellationToken ct) { using var activity = SqsActivitySource.StartReceiving(this); @@ -88,7 +88,7 @@ public async Task>> PullMessagesAsync(Cancell activity?.SetTagsFor(response); - return response.Messages.Select(message => new ConsumeContext(this, message, message.Body)); + return response.Messages; // TODO Log failures?.. diff --git a/src/LocalPost.SqsConsumer/README.md b/src/LocalPost.SqsConsumer/README.md index 8ff15e3..3924a11 100644 --- a/src/LocalPost.SqsConsumer/README.md +++ b/src/LocalPost.SqsConsumer/README.md @@ -1 +1,9 @@ -# LocalPost SQS Consumer +# LocalPost Amazon SQS Consumer + +## IAM Permissions + +To operate on a queue, below [permissions](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-permissions-reference.html) are required: +- `sqs:GetQueueUrl` +- `sqs:GetQueueAttributes` +- `sqs:ReceiveMessage` +- `sqs:ChangeMessageVisibility` diff --git a/src/LocalPost/AppHealthSupervisor.cs b/src/LocalPost/AppHealthSupervisor.cs new file mode 100644 index 0000000..49debf7 --- /dev/null +++ b/src/LocalPost/AppHealthSupervisor.cs @@ -0,0 +1,41 @@ +using System.Collections.Immutable; +using JetBrains.Annotations; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; + +namespace LocalPost; + +[UsedImplicitly] +internal sealed class AppHealthSupervisor(ILogger logger, + HealthCheckService healthChecker, IHostApplicationLifetime appLifetime) : IBackgroundService +{ + public TimeSpan CheckInterval { get; init; } = TimeSpan.FromSeconds(1); + public int ExitCode { get; init; } = 1; + public IImmutableSet Tags { get; init; } = ImmutableHashSet.Empty; + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + + public async Task ExecuteAsync(CancellationToken ct) + { + while (!ct.IsCancellationRequested) + { + var result = await Check(ct); + if (result.Status == HealthStatus.Unhealthy) + { + logger.LogError("Health check failed, stopping the application..."); + appLifetime.StopApplication(); + Environment.ExitCode = ExitCode; + break; + } + + await Task.Delay(CheckInterval, ct); + } + } + + private Task Check(CancellationToken ct = default) => Tags.Count == 0 + ? healthChecker.CheckHealthAsync(ct) + : healthChecker.CheckHealthAsync(hcr => Tags.IsSubsetOf(hcr.Tags), ct); + + public Task StopAsync(CancellationToken ct) => Task.CompletedTask; +} diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs index ec0d6b1..153a020 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs @@ -3,8 +3,8 @@ namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumerableEx { // TODO Better name... - public static ConcurrentAsyncEnumerable ToConcurrent(this IAsyncEnumerable source, int bufferMaxSize = 1) => - new(source, bufferMaxSize); + public static ConcurrentAsyncEnumerable ToConcurrent(this IAsyncEnumerable source, + MaxSize bufferMaxSize = default) => new(source, bufferMaxSize); public static IAsyncEnumerable Batch(this IAsyncEnumerable source, BatchBuilderFactory factory) => new BatchingAsyncEnumerable(source, factory); diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index d850fc2..7a3c9ce 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -1,28 +1,7 @@ -using JetBrains.Annotations; using Nito.AsyncEx; namespace LocalPost.AsyncEnumerable; -[PublicAPI] -public readonly record struct BatchSize // TODO Rename to Size?.. -{ - public static implicit operator int(BatchSize batchSize) => batchSize.Value; - - public static implicit operator BatchSize(int batchSize) => new(batchSize); - - public int Value { get; } - - public BatchSize(int value) - { - if (value <= 0) - throw new ArgumentOutOfRangeException(nameof(value), value, "Batch size must be positive."); - - Value = value; - } - - public void Deconstruct(out int value) => value = Value; -} - internal delegate IBatchBuilder BatchBuilderFactory(CancellationToken ct = default); internal interface IBatchBuilder : IDisposable @@ -107,19 +86,19 @@ public void Dispose() internal abstract class BoundedBatchBuilderBase : BatchBuilderBase { - private readonly BatchSize _batchMaxSizeSize; - protected List Batch; + private readonly MaxSize _batchMaxSize; + protected List Batch; // FIXME ImmutableArrayBuilder - protected BoundedBatchBuilderBase(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : + protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : base(timeWindow, ct) { - _batchMaxSizeSize = batchMaxSizeSize; - Batch = new List(_batchMaxSizeSize); + _batchMaxSize = batchMaxSize; + Batch = new List(_batchMaxSize); } public override bool IsEmpty => Batch.Count == 0; - public override bool Full => Batch.Count >= _batchMaxSizeSize; + public override bool Full => Batch.Count >= _batchMaxSize; public override bool TryAdd(T entry) { @@ -134,14 +113,14 @@ public override bool TryAdd(T entry) public override void Reset() { base.Reset(); - Batch = new List(_batchMaxSizeSize); + Batch = new List(_batchMaxSize); } } internal sealed class BoundedBatchBuilder : BoundedBatchBuilderBase> { - public BoundedBatchBuilder(BatchSize batchMaxSizeSize, TimeSpan timeWindow, CancellationToken ct = default) : - base(batchMaxSizeSize, timeWindow, ct) + public BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : + base(batchMaxSize, timeWindow, ct) { } diff --git a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs index 5cb7d80..d816288 100644 --- a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs @@ -1,87 +1,12 @@ namespace LocalPost.AsyncEnumerable; -internal sealed class BatchingAsyncEnumerable : IAsyncEnumerable +internal sealed class BatchingAsyncEnumerable( + IAsyncEnumerable reader, BatchBuilderFactory factory) : IAsyncEnumerable { - private readonly IAsyncEnumerable _reader; - private readonly BatchBuilderFactory _factory; - - public BatchingAsyncEnumerable(IAsyncEnumerable source, BatchBuilderFactory factory) - { - _reader = source; - _factory = factory; - - } - -// public async IAsyncEnumerator GetAsyncEnumerator_old(CancellationToken ct = default) -// { -// // FIXME To static builder... -// using var batchBuilder = _factory(ct); -// -// var source = _reader.GetAsyncEnumerator(ct); -// var completed = false; -// var waitTrigger = source.MoveNextAsync(); -// Task? waitTask = null; -// while (!completed && !ct.IsCancellationRequested) -// { -// var shift = false; -// try -// { -// if (waitTask is null && waitTrigger.IsCompleted) -// completed = !await waitTrigger; -// else -// { -// waitTask ??= waitTrigger.AsTask(); -// // To save some allocations?.. -//// completed = !await (await Task.WhenAny(waitTask, batchBuilder.TimeWindowTrigger)); -// completed = !await waitTask.WaitAsync(batchBuilder.TimeWindow); -// waitTask = null; -// } -// -// if (completed) -// continue; -// } -// catch (OperationCanceledException e) when (e.CancellationToken == batchBuilder.TimeWindow) -// { -// shift = true; -// } -// catch (OperationCanceledException) // User (global) cancellation -// { -// continue; -// } -// -// if (shift) -// { // C# doesn't allow "yield return" in a try/catch block... -// if (!batchBuilder.IsEmpty) -// yield return batchBuilder.Flush(); -// -// continue; -// } -// -// if (!batchBuilder.TryAdd(source.Current)) -// if (!batchBuilder.IsEmpty) -// { -// // Flush the current buffer and start a fresh one -// yield return batchBuilder.Flush(); -// if (!batchBuilder.TryAdd(source.Current)) -// HandleSkipped(source.Current); // Even an empty batch cannot fit it... -// } -// else -// HandleSkipped(source.Current); // Even an empty buffer cannot fit it... -// -// waitTrigger = source.MoveNextAsync(); -// } -// -// // Flush on completion or error... -// if (!batchBuilder.IsEmpty) -// yield return batchBuilder.Flush(); -// -// ct.ThrowIfCancellationRequested(); -// } - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) { - await using var source = _reader.GetAsyncEnumerator(ct); - using var batchBuilder = _factory(ct); + await using var source = reader.GetAsyncEnumerator(ct); + using var batchBuilder = factory(ct); while (!ct.IsCancellationRequested) { TOut completedBatch; diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs index ce3912a..7699639 100644 --- a/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs @@ -2,28 +2,22 @@ namespace LocalPost.AsyncEnumerable; -internal sealed class ConcurrentAsyncEnumerable : IAsyncEnumerable +internal sealed class ConcurrentAsyncEnumerable(IAsyncEnumerable source, MaxSize bufferMaxSize) + : IAsyncEnumerable { - private readonly IAsyncEnumerable _reader; - private readonly Channel _buffer; - - public ConcurrentAsyncEnumerable(IAsyncEnumerable source, int bufferMaxSize = 1) + private readonly Channel _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) { - _reader = source; - _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) - { - SingleReader = false, - SingleWriter = true, - FullMode = BoundedChannelFullMode.Wait, - }); - } + SingleReader = false, + SingleWriter = true, + FullMode = BoundedChannelFullMode.Wait, + }); public async Task Run(CancellationToken ct) { var buffer = _buffer.Writer; try { - await foreach (var item in _reader.WithCancellation(ct)) + await foreach (var item in source.WithCancellation(ct)) await buffer.WriteAsync(item, ct); } finally @@ -35,7 +29,7 @@ public async Task Run(CancellationToken ct) public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) { var buffer = _buffer.Reader; - // Like ReadAllAsync() from netstandard2.1 + // Like ReadAllAsync() from netstandard2.1/.NET Core 3.0+ while (await buffer.WaitToReadAsync(ct).ConfigureAwait(false)) while (buffer.TryRead(out var item)) yield return item; diff --git a/src/LocalPost/BackgroundActivitySource.cs b/src/LocalPost/BackgroundActivitySource.cs new file mode 100644 index 0000000..dd81915 --- /dev/null +++ b/src/LocalPost/BackgroundActivitySource.cs @@ -0,0 +1,26 @@ +using System.Diagnostics; +using System.Reflection; + +namespace LocalPost; + +internal static class BackgroundActivitySource +{ + public static readonly ActivitySource Source; + + public static bool IsEnabled => Source.HasListeners(); + + static BackgroundActivitySource() + { + // See https://stackoverflow.com/a/909583/322079 + var assembly = Assembly.GetExecutingAssembly(); + var version = AssemblyName.GetAssemblyName(assembly.Location).Version; + Source = new System.Diagnostics.ActivitySource(assembly.FullName, version.ToString()); + } + + // public static Activity? StartProcessing(ConsumeContext context) + // { + // var activity = Source.CreateActivity($"{context.Client.Topic} process", ActivityKind., ac); + // + // return activity; + // } +} diff --git a/src/LocalPost/BackgroundJobQueue.cs b/src/LocalPost/BackgroundJobQueue.cs deleted file mode 100644 index 30f0073..0000000 --- a/src/LocalPost/BackgroundJobQueue.cs +++ /dev/null @@ -1,25 +0,0 @@ -using JetBrains.Annotations; - -namespace LocalPost; - -public delegate Task Job(CancellationToken ct); - -/// -/// Just a convenient alias for . -/// -public interface IBackgroundJobQueue : IBackgroundQueue -{ -} - -[UsedImplicitly] -internal sealed class BackgroundJobQueue : IBackgroundJobQueue -{ - private readonly BackgroundQueue _queue; - - public BackgroundJobQueue(BackgroundQueue queue) - { - _queue = queue; - } - - public ValueTask Enqueue(Job item, CancellationToken ct = default) => _queue.Enqueue(item, ct); -} diff --git a/src/LocalPost/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue.cs index d78375c..80ae0b7 100644 --- a/src/LocalPost/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue.cs @@ -1,116 +1,20 @@ -using System.Threading.Channels; -using JetBrains.Annotations; -using LocalPost.AsyncEnumerable; - namespace LocalPost; -[PublicAPI] +/// +/// Entrypoint for the background queue, inject it where you need to enqueue items. +/// +/// public interface IBackgroundQueue { // TODO Custom exception when closed?.. Or just return true/false?.. - ValueTask Enqueue(T item, CancellationToken ct = default); + ValueTask Enqueue(T payload, CancellationToken ct = default); } -internal static partial class BackgroundQueue -{ - public static BackgroundQueue Create(BackgroundQueueOptions options) => - Create(options, reader => reader.ReadAllAsync()); - - public static BackgroundQueue> CreateBatched(BatchedBackgroundQueueOptions options) => - Create>(options, - reader => reader - .ReadAllAsync() - .Batch(ct => new BoundedBatchBuilder(options.BatchMaxSize, options.BatchTimeWindow, ct)), - true); - - // To make the pipeline linear (single consumer), just add .ToConcurrent() to the end - public static BackgroundQueue Create(BackgroundQueueOptions options, - Func, IAsyncEnumerable> configure, - bool proxy = false) // TODO Rename this parameter somehow... - { - var channel = options.MaxSize switch - { - not null => Channel.CreateBounded(new BoundedChannelOptions(options.MaxSize.Value) - { - SingleReader = proxy || options.MaxConcurrency == 1, - SingleWriter = false, // We do not know how it will be used - FullMode = options.FullMode, - }), - _ => Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = proxy || options.MaxConcurrency == 1, - SingleWriter = false, // We do not know how it will be used - }) - }; - - var pipeline = configure(channel.Reader); - if (proxy) - pipeline = pipeline.ToConcurrent(); - - return new BackgroundQueue(channel, pipeline, - TimeSpan.FromMilliseconds(options.CompletionTimeout ?? 0)); - } -} - -internal static partial class BackgroundQueue -{ - public static readonly string Name = "BackgroundQueue/" + Reflection.FriendlyNameOf(); -} - -internal sealed class BackgroundQueue : IBackgroundQueue, IAsyncEnumerable, - IBackgroundService -{ - private readonly TimeSpan _completionTimeout; - private readonly ChannelWriter _messages; - private readonly IAsyncEnumerable _pipeline; - - public BackgroundQueue(ChannelWriter input, IAsyncEnumerable pipeline, TimeSpan completionTimeout) - { - _completionTimeout = completionTimeout; - _messages = input; - _pipeline = pipeline; - } - - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => _pipeline.GetAsyncEnumerator(ct); - - // Track full or not later - public ValueTask Enqueue(T item, CancellationToken ct = default) => _messages.WriteAsync(item, ct); - - public bool IsClosed { get; private set; } // TODO Use - - private async ValueTask CompleteAsync(CancellationToken ct = default) - { - if (IsClosed) - return; - - await Task.Delay(_completionTimeout, ct); - - _messages.Complete(); - IsClosed = true; - } - - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - - public Task ExecuteAsync(CancellationToken ct) => _pipeline switch - { - ConcurrentAsyncEnumerable concurrent => concurrent.Run(ct), - _ => Task.CompletedTask - }; - - public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); -} - - +public delegate Task BackgroundJob(CancellationToken ct); -//// Open to public later?.. -//internal interface IBackgroundQueueManager -//{ -// // Implement later for a better health check?.. -//// bool IsFull { get; } -// -// bool IsClosed { get; } -// -// ValueTask CompleteAsync(CancellationToken ct = default); -//} +/// +/// Just a convenient alias for . +/// +public interface IBackgroundJobQueue : IBackgroundQueue; diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 207c1d9..2ea95b2 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -1,116 +1,130 @@ using LocalPost.DependencyInjection; +using Microsoft.Extensions.Logging; namespace LocalPost; -// public async Task StopAsync(CancellationToken forceExitToken) -// { -// // Do not cancel the execution immediately, as it will finish gracefully itself (when the channel is closed) -// -// // TODO .NET 6 async... -// using var linked = forceExitToken.Register(() => _executionCts?.Cancel()); -// -// if (_execution is not null) -// await _execution; -// } +internal sealed record ConsumerOptions(ushort MaxConcurrency, bool BreakOnException); - - -internal static partial class BackgroundQueue +internal static class BackgroundQueue { -// public static BackgroundQueue.Consumer ConsumerFor(IAsyncEnumerable reader, Handler handler) => -// new(reader, handler); - -// public static IBackgroundServiceSupervisor ConsumerSupervisorFor(BackgroundQueue.Consumer consumer) => -// new BackgroundServiceSupervisor(consumer); - - public static ConsumerGroup ConsumerGroupFor(TQ queue, Handler handler, int maxConcurrency) - where TQ : IAsyncEnumerable => new(Consumer.Loop(queue, handler), maxConcurrency); - - public static ConsumerGroup ConsumerGroupOverDisposablesFor(TQ queue, Handler handler, int maxConcurrency) - where TQ : IAsyncEnumerable - where T : IDisposable => new(Consumer.LoopOverDisposables(queue, handler), maxConcurrency); - - public static NamedConsumerGroup ConsumerGroupForNamed(TQ queue, Handler handler, int maxConcurrency) - where TQ : IAsyncEnumerable, INamedService => new(queue, Consumer.Loop(queue, handler), maxConcurrency); - - public static NamedConsumerGroup ConsumerGroupOverDisposablesForNamed(TQ queue, Handler handler, int maxConcurrency) - where TQ : IAsyncEnumerable, INamedService - where T : IDisposable => new(queue, Consumer.Loop(queue, handler), maxConcurrency); + // public static ConsumerGroup ConsumerGroupFor(TQ queue, Handler handler, ushort maxConcurrency) + // where TQ : IAsyncEnumerable => new(Consumer.LoopOver(queue, handler), maxConcurrency); + // + // public static NamedConsumerGroup ConsumerGroupForNamed( + // TQ queue, Handler handler, ushort maxConcurrency) + // where TQ : IAsyncEnumerable, INamedService => + // new(queue, Consumer.LoopOver(queue, handler), maxConcurrency); - internal class NamedConsumerGroup : ConsumerGroupBase, INamedService + // Parametrized class, to be used with the Dependency Injection container + internal class NamedConsumer( + ILogger> logger, + TQ queue, + Handler handler, + ushort maxConcurrency) + : ConsumerBase(logger, queue, handler, maxConcurrency), INamedService where TQ : IAsyncEnumerable, INamedService { - public NamedConsumerGroup(TQ queue, Func loop, int maxConcurrency) : - base(loop, maxConcurrency) - { - Name = queue.Name; - } - - public string Name { get; } + public string Name { get; } = queue.Name; } // Parametrized class, to be used with the Dependency Injection container - internal class ConsumerGroup : ConsumerGroupBase where TQ : IAsyncEnumerable + internal class Consumer( + ILogger> logger, + TQ queue, + Handler handler, + ushort maxConcurrency) + : ConsumerBase(logger, queue, handler, maxConcurrency) + where TQ : IAsyncEnumerable; + + internal abstract class ConsumerBase( + ILogger> logger, + IAsyncEnumerable queue, + Handler handler, + ushort maxConcurrency) + : IBackgroundService //, IDisposable { - public ConsumerGroup(Func loop, int maxConcurrency) : base(loop, maxConcurrency) - { - } - } + public bool BreakOnException { get; init; } = false; + // private bool _broken = false; - // Parametrized class, to be used with the Dependency Injection container - internal class ConsumerGroupBase : IBackgroundService - { - private readonly List _consumers; + private Task? _exec; + private CancellationTokenSource? _execCts; - protected ConsumerGroupBase(Func loop, int maxConcurrency) + private async Task Execute(CancellationToken execCt) { - _consumers = Enumerable.Range(1, maxConcurrency) - .Select(_ => new Consumer(loop)) - .ToList(); - } - - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + // using var loopCts = new CancellationTokenSource(); + using var loopCts = CancellationTokenSource.CreateLinkedTokenSource(execCt); + // using var cts = CancellationTokenSource.CreateLinkedTokenSource(execCt, loopCts.Token); + var loopCt = loopCts.Token; - public Task ExecuteAsync(CancellationToken ct) => - Task.WhenAll(_consumers.Select(c => c.ExecuteAsync(ct))); + await Task.WhenAll(Enumerable.Range(1, maxConcurrency) + .Select(_ => Loop())); - public Task StopAsync(CancellationToken ct) => - Task.WhenAll(_consumers.Select(c => c.StopAsync(ct))); - } + return; - internal sealed class Consumer - { - public static Func Loop(IAsyncEnumerable queue, Handler handler) => - async (CancellationToken ct) => + async Task Loop() { - await foreach (var message in queue.WithCancellation(ct)) - await handler(message, ct); - }; - - public static Func LoopOverDisposables - (IAsyncEnumerable queue, Handler handler) where T : IDisposable => - async (CancellationToken ct) => + try + { + await foreach (var message in queue.WithCancellation(loopCt)) + await Handle(message); + } + catch (OperationCanceledException) when (loopCt.IsCancellationRequested) + { + // Fine, breaking the loop because of an exception in the handler + } + } + + async Task Handle(T message) { - await foreach (var message in queue.WithCancellation(ct)) - try - { - await handler(message, ct); - } - finally + try + { + await handler(message, execCt); + } + catch (OperationCanceledException) when (execCt.IsCancellationRequested) + { + throw; // App shutdown timeout (force shutdown) + } + catch (Exception e) + { + if (BreakOnException) { - message.Dispose(); + // Break the loop (all the concurrent executions of it) + // ReSharper disable once AccessToDisposedClosure + loopCts.Cancel(); + // Push it up, so the service is marked as unhealthy + throw; } - }; - private readonly Func _loop; + logger.LogError(e, "Failed to handle a message"); + } + } + } + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - public Consumer(Func loop) + public Task ExecuteAsync(CancellationToken ct) { - _loop = loop; + if (_exec is not null) + return _exec; + + var execCts = _execCts = new CancellationTokenSource(); + return _exec = Execute(execCts.Token); } - public Task ExecuteAsync(CancellationToken ct) => _loop(ct); + // Process the rest (leftovers). Common cases: + // - SQS: message source (fetcher) has been stopped, so we just need to process leftovers from the channel + // - Kafka: message source (consumer) has been stopped, so we just need to process leftovers from the channel + // - Background (job) queue: hope that the producers are stopped, so no new messages should appear, so we + // just need to process leftovers from the queue + public Task StopAsync(CancellationToken ct) + { + if (_exec is null) + return Task.CompletedTask; + + ct.Register(() => _execCts?.Cancel()); + return _exec; - public Task StopAsync(CancellationToken ct) => _loop(ct); // Process the rest (leftovers) + // Cleanup the state?.. + } } } diff --git a/src/LocalPost/BackgroundQueueService.cs b/src/LocalPost/BackgroundQueueService.cs deleted file mode 100644 index 90b74bc..0000000 --- a/src/LocalPost/BackgroundQueueService.cs +++ /dev/null @@ -1,80 +0,0 @@ -using LocalPost.AsyncEnumerable; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost; - -//internal sealed class BackgroundQueueService -//{ -// public static readonly string Name = Reflection.FriendlyNameOf(); -// -// public static BackgroundQueueService Create(IServiceProvider provider, HandlerStack handlerStack) -// { -// var options = provider.GetOptions>(); -// -// var queue = new BackgroundQueue(options); -// -// HandlerFactory handlerFactory = handlerStack.Resolve; -// Handler handler = ActivatorUtilities.CreateInstance(provider, -// Name, handlerFactory).InvokeAsync; -// -// var consumer = new BackgroundQueue.Consumer(queue, handler); -// var consumerGroup = BackgroundQueue.ConsumerGroupSupervisorFor(consumer, options.MaxConcurrency); -// -// return new BackgroundQueueService(queue, consumerGroup); -// } -// -// // TODO Use -// public static BackgroundQueueService CreateBatched(IServiceProvider provider, -// HandlerStack handlerStack, BatchBuilderFactory batchFactory) -// { -// var options = provider.GetOptions>(); -// -// var queue = new BackgroundQueue(options); -// var batchQueue = new BackgroundQueue(options); -// -// // Just a single consumer, to do the batching properly -// var consumer = new BackgroundQueue.BatchBuilder(queue, batchQueue, batchFactory); -// var consumerSupervisor = new ConsumerSupervisor(consumer.Run); -// -// HandlerFactory handlerFactory = handlerStack.Resolve; -// Handler handler = ActivatorUtilities.CreateInstance(provider, -// Name, handlerFactory).InvokeAsync; -// var batchConsumer = new BackgroundQueue.Consumer(batchQueue, handler); -// var batchConsumerGroup = BackgroundQueue.ConsumerGroupSupervisorFor(batchConsumer, options.MaxConcurrency); -// -// return new BackgroundQueueService(queue, -// new IBackgroundServiceSupervisor.Combined(consumerSupervisor, batchConsumerGroup)); -// } -// -// private BackgroundQueueService(BackgroundQueue queue, IBackgroundServiceSupervisor consumerGroup) -// { -// Queue = queue; -// QueueSupervisor = new BackgroundQueue.Supervisor(queue); -// -// ConsumerGroup = consumerGroup; -// _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); -// _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); -// } -// -// public IBackgroundQueue Queue { get; } -// -// public IConcurrentHostedService QueueSupervisor { get; } -// -// public IConcurrentHostedService ConsumerGroup { get; } -// private readonly IHealthCheck _consumerGroupReadinessCheck; -// private readonly IHealthCheck _consumerGroupLivenessCheck; -// -// public static HealthCheckRegistration ConsumerGroupReadinessCheck(HealthStatus? failureStatus = default, -// IEnumerable? tags = default) => new(Name, -// provider => provider.GetRequiredService>()._consumerGroupReadinessCheck, -// failureStatus, -// tags); -// -// public static HealthCheckRegistration ConsumerGroupLivenessCheck(HealthStatus? failureStatus = default, -// IEnumerable? tags = default) => new(Name, -// provider => provider.GetRequiredService>()._consumerGroupLivenessCheck, -// failureStatus, -// tags); -//} diff --git a/src/LocalPost/BackgroundQueueSupervisor.cs b/src/LocalPost/BackgroundQueueSupervisor.cs deleted file mode 100644 index 5ad9231..0000000 --- a/src/LocalPost/BackgroundQueueSupervisor.cs +++ /dev/null @@ -1,21 +0,0 @@ -namespace LocalPost; - -//internal sealed partial class BackgroundQueue -//{ -// internal sealed class Supervisor : IConcurrentHostedService -// { -// // Health checks later?.. Like full or not. -// -// private readonly IBackgroundQueueManager _queue; -// -// public Supervisor(IBackgroundQueueManager queue) -// { -// _queue = queue; -// } -// -// // TODO Run the enumarable... Like Batched. -// public Task StartAsync(CancellationToken ct) => Task.CompletedTask; -// -// public async Task StopAsync(CancellationToken forceExitToken) => await _queue.CompleteAsync(forceExitToken); -// } -//} diff --git a/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs b/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs new file mode 100644 index 0000000..1d71991 --- /dev/null +++ b/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs @@ -0,0 +1,15 @@ +using JetBrains.Annotations; + +namespace LocalPost.BackgroundQueues; + +// Just a proxy to the actual queue, needed to expose IBackgroundJobQueue +[UsedImplicitly] +internal sealed class BackgroundJobQueue(BackgroundQueue> queue) + : IBackgroundJobQueue +{ + public ValueTask Enqueue(ConsumeContext item, CancellationToken ct = default) => + queue.Enqueue(item, ct); + + public ValueTask Enqueue(BackgroundJob payload, CancellationToken ct = default) => + Enqueue(new ConsumeContext(payload), ct); +} diff --git a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs b/src/LocalPost/BackgroundQueues/BackgroundQueue.cs new file mode 100644 index 0000000..6ea88ed --- /dev/null +++ b/src/LocalPost/BackgroundQueues/BackgroundQueue.cs @@ -0,0 +1,90 @@ +using System.Threading.Channels; +using LocalPost.AsyncEnumerable; + +namespace LocalPost.BackgroundQueues; + +internal static class BackgroundQueue +{ + public static BackgroundQueue> Create(BackgroundQueueOptions options) => + Create>(options, reader => reader.ReadAllAsync()); + + public static BackgroundQueue>> CreateBatched( + BatchedBackgroundQueueOptions options) => + Create>>(options, + reader => reader + .ReadAllAsync() + .Batch(ct => + new BoundedBatchBuilder>(options.BatchMaxSize, options.BatchTimeWindow, ct)), + true); + + // To make the pipeline linear (single consumer), just add .ToConcurrent() to the end + public static BackgroundQueue Create(BackgroundQueueOptions options, + Func>, IAsyncEnumerable> configure, + bool proxy = false) // TODO Rename this parameter somehow... + { + var channel = options.MaxSize switch + { + not null => Channel.CreateBounded>(new BoundedChannelOptions(options.MaxSize.Value) + { + SingleReader = proxy || options.MaxConcurrency == 1, + SingleWriter = false, // We do not know how it will be used + FullMode = options.FullMode, + }), + _ => Channel.CreateUnbounded>(new UnboundedChannelOptions + { + SingleReader = proxy || options.MaxConcurrency == 1, + SingleWriter = false, // We do not know how it will be used + }) + }; + + var pipeline = configure(channel.Reader); + if (proxy) + pipeline = pipeline.ToConcurrent(); + + return new BackgroundQueue(channel, pipeline, + TimeSpan.FromMilliseconds(options.CompletionTimeout)); + } +} + +internal static partial class BackgroundQueue +{ + public static readonly string Name = "BackgroundQueue/" + Reflection.FriendlyNameOf(); +} + +internal sealed class BackgroundQueue( + ChannelWriter> input, + IAsyncEnumerable pipeline, + TimeSpan completionDelay) + : IAsyncEnumerable, IBackgroundService, IBackgroundQueue, IQueuePublisher> +{ + public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => pipeline.GetAsyncEnumerator(ct); + + // Track full or not later + public ValueTask Enqueue(ConsumeContext item, CancellationToken ct = default) => input.WriteAsync(item, ct); + + public ValueTask Enqueue(T item, CancellationToken ct = default) => Enqueue(new ConsumeContext(item), ct); + + public bool IsClosed { get; private set; } // TODO Use + + private async ValueTask CompleteAsync(CancellationToken ct = default) + { + if (IsClosed) + return; + + if (completionDelay.TotalMilliseconds > 0) + await Task.Delay(completionDelay, ct); + + input.Complete(); + IsClosed = true; + } + + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + + public Task ExecuteAsync(CancellationToken ct) => pipeline switch + { + ConcurrentAsyncEnumerable concurrent => concurrent.Run(ct), + _ => Task.CompletedTask + }; + + public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); +} diff --git a/src/LocalPost/BackgroundQueueOptions.cs b/src/LocalPost/BackgroundQueues/BackgroundQueueOptions.cs similarity index 78% rename from src/LocalPost/BackgroundQueueOptions.cs rename to src/LocalPost/BackgroundQueues/BackgroundQueueOptions.cs index aab061b..31d8540 100644 --- a/src/LocalPost/BackgroundQueueOptions.cs +++ b/src/LocalPost/BackgroundQueues/BackgroundQueueOptions.cs @@ -1,12 +1,12 @@ using System.ComponentModel.DataAnnotations; using System.Threading.Channels; -namespace LocalPost; +namespace LocalPost.BackgroundQueues; -// For the DI container and, to distinguish between different queues +// For the DI container, to distinguish between different queues public sealed record BackgroundQueueOptions : BackgroundQueueOptions; -// For the DI container and, to distinguish between different queues +// For the DI container, to distinguish between different queues public sealed record BatchedBackgroundQueueOptions : BatchedBackgroundQueueOptions; public record BatchedBackgroundQueueOptions : BackgroundQueueOptions @@ -32,17 +32,18 @@ public record BackgroundQueueOptions public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; /// - /// Maximum queue (channel) length, after which writes are blocked. Default is unlimited. + /// Maximum queue (channel) length, after which writes are blocked (see ). + /// Default is unlimited. /// public ushort? MaxSize { get; set; } = null; /// /// How long to wait before closing the queue (channel) on app shutdown. Default is 1 second. /// - public ushort? CompletionTimeout { get; set; } = 1_000; // Milliseconds + public ushort CompletionTimeout { get; set; } = 1_000; // Milliseconds /// - /// How many messages to process in parallel. Default is 10. + /// How many messages to process concurrently. Default is 10. /// [Required] public ushort MaxConcurrency { get; set; } = 10; } diff --git a/src/LocalPost/BackgroundQueues/ConsumeContext.cs b/src/LocalPost/BackgroundQueues/ConsumeContext.cs new file mode 100644 index 0000000..fee0e15 --- /dev/null +++ b/src/LocalPost/BackgroundQueues/ConsumeContext.cs @@ -0,0 +1,32 @@ +using System.Diagnostics; +using JetBrains.Annotations; + +namespace LocalPost.BackgroundQueues; + +[PublicAPI] +public readonly record struct ConsumeContext +{ + public readonly ActivityContext? ActivityContext; + public readonly T Payload; + + internal ConsumeContext(T payload) : this(payload, Activity.Current?.Context) + { + } + + internal ConsumeContext(T payload, ActivityContext? activityContext) + { + Payload = payload; + ActivityContext = activityContext; + } + + public ConsumeContext Transform(TOut payload) => new(payload, ActivityContext); + + public static implicit operator ConsumeContext(T payload) => new(payload); + + public static implicit operator T(ConsumeContext context) => context.Payload; + + public void Deconstruct(out T payload) + { + payload = Payload; + } +} diff --git a/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs new file mode 100644 index 0000000..52bd89b --- /dev/null +++ b/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs @@ -0,0 +1,55 @@ +using JetBrains.Annotations; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; + +namespace LocalPost.BackgroundQueues.DependencyInjection; + +[PublicAPI] +public class BackgroundQueuesBuilder(IServiceCollection services) +{ + public OptionsBuilder> AddJobQueue() + { + services.TryAddSingleton(); + services.TryAddSingletonAlias(); + + // TODO Allow to configure the handler somehow + return AddQueue( + HandlerStack.For(async (job, ct) => await job(ct)) + .Scoped() + .UsePayload() + .Trace() + ); + } + + // THandler has to be registered by the user + public OptionsBuilder> AddQueue() + where THandler : IHandler => + AddQueue( + HandlerStack.From() + .Scoped() + .UsePayload() + .Trace() + ); + + public OptionsBuilder> AddQueue(HandlerFactory> hf) + { + if (!services.TryAddSingletonAlias, BackgroundQueue>>()) + // return ob; // Already added, don't register twice + throw new InvalidOperationException($"BackgroundQueue<{Reflection.FriendlyNameOf()}> is already registered."); + + services.TryAddSingleton(provider => + BackgroundQueue.Create(provider.GetOptions>())); + services.AddBackgroundServiceFor>>(); + + services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => + { + var options = provider.GetOptions>(); + return new ConsumerOptions(options.MaxConcurrency, false); + }); + + return services.AddOptions>(); + } + + // TODO Batched +} diff --git a/src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs b/src/LocalPost/BackgroundQueues/DependencyInjection/HealthChecks.cs similarity index 72% rename from src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs rename to src/LocalPost/BackgroundQueues/DependencyInjection/HealthChecks.cs index 530d1b8..98a8164 100644 --- a/src/LocalPost/DependencyInjection/ServiceHealthCheckRegistration.cs +++ b/src/LocalPost/BackgroundQueues/DependencyInjection/HealthChecks.cs @@ -1,16 +1,18 @@ using JetBrains.Annotations; +using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; -namespace LocalPost.DependencyInjection; +namespace LocalPost.BackgroundQueues.DependencyInjection; + [PublicAPI] -public static class ServiceHealthCheckRegistration +public static class HealthChecksBuilderEx { // Not needed, as there is no complex logic inside. It's either working, or dead. // public static IHealthChecksBuilder AddBackgroundQueueReadinessCheck(... public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .AddConsumerGroupLivenessCheck, T>(); + .AddConsumerLivenessCheck, T>(); } diff --git a/src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs new file mode 100644 index 0000000..77dd680 --- /dev/null +++ b/src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs @@ -0,0 +1,16 @@ +using JetBrains.Annotations; +using Microsoft.Extensions.DependencyInjection; + +namespace LocalPost.BackgroundQueues.DependencyInjection; + +[PublicAPI] +public static class ServiceCollectionEx +{ + public static IServiceCollection AddBackgroundQueues(this IServiceCollection services, + Action configure) + { + configure(new BackgroundQueuesBuilder(services)); + + return services; + } +} diff --git a/src/LocalPost/BackgroundQueues/HandlerStackEx.cs b/src/LocalPost/BackgroundQueues/HandlerStackEx.cs new file mode 100644 index 0000000..0dfd14d --- /dev/null +++ b/src/LocalPost/BackgroundQueues/HandlerStackEx.cs @@ -0,0 +1,34 @@ +using System.Diagnostics; +using JetBrains.Annotations; + +namespace LocalPost.BackgroundQueues; + +[PublicAPI] +public static partial class HandlerStackEx +{ + public static HandlerFactory> UsePayload(this HandlerFactory hf) => + hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); + + public static HandlerFactory> Trace(this HandlerFactory> hf) + { + var typeName = Reflection.FriendlyNameOf(); + var transactionName = $"{typeName} process"; + return hf.Map, ConsumeContext>(next => async (context, ct) => + { + using var activity = context.ActivityContext.HasValue + ? BackgroundActivitySource.Source.StartActivity(transactionName, ActivityKind.Consumer, + context.ActivityContext.Value) + : BackgroundActivitySource.Source.StartActivity(transactionName, ActivityKind.Consumer); + try + { + await next(context, ct); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); + } +} diff --git a/src/LocalPost/ConcurrentHostedServices.cs b/src/LocalPost/ConcurrentHostedServices.cs index 366899b..0600576 100644 --- a/src/LocalPost/ConcurrentHostedServices.cs +++ b/src/LocalPost/ConcurrentHostedServices.cs @@ -60,33 +60,21 @@ public Task CheckHealthAsync(HealthCheckContext context, public Exception? Exception { get; } } -internal class NamedBackgroundServiceRunner : BackgroundServiceRunner, INamedService +internal class NamedBackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) + : BackgroundServiceRunner(service, appLifetime), INamedService where T : class, IBackgroundService, INamedService { - public NamedBackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) : base(service, appLifetime) - { - Name = service.Name; - } - - public string Name { get; } + public string Name { get; } = service.Name; } -internal class BackgroundServiceRunner : IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable +internal class BackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) + : IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable where T : class, IBackgroundService { private Task? _start; private CancellationTokenSource? _executionCts; private Task? _execution; - private readonly T _service; - private readonly IHostApplicationLifetime _appLifetime; - - public BackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) - { - _service = service; - _appLifetime = appLifetime; - } - public bool Starting => _start is not null && !_start.IsCompleted; // StartedSuccessfully?.. @@ -105,10 +93,10 @@ private async Task WaitAppStartAsync(CancellationToken ct) { try { - // Wait until all other services are started - await Task.Delay(Timeout.Infinite, _appLifetime.ApplicationStarted).WaitAsync(ct); + // Wait until all other services have started + await Task.Delay(Timeout.Infinite, appLifetime.ApplicationStarted).WaitAsync(ct); } - catch (OperationCanceledException e) when (e.CancellationToken == _appLifetime.ApplicationStarted) + catch (OperationCanceledException e) when (e.CancellationToken == appLifetime.ApplicationStarted) { // Startup completed, continue } @@ -120,7 +108,7 @@ public async Task StartAsync(CancellationToken ct) if (_start is not null) throw new InvalidOperationException("Service is already started"); - await (_start = _service.StartAsync(ct)); + await (_start = service.StartAsync(ct)); // Start execution in the background... #pragma warning disable CS4014 @@ -136,9 +124,9 @@ private async Task ExecuteAsync() try { await WaitAppStartAsync(ct); - await (_execution = _service.ExecuteAsync(ct)); + await (_execution = service.ExecuteAsync(ct)); } - catch (OperationCanceledException e) when (e.CancellationToken == ct) + catch (OperationCanceledException) when (ct.IsCancellationRequested) { // Normal case, we trigger this token ourselves when stopping the service } @@ -164,7 +152,7 @@ public async Task StopAsync(CancellationToken forceExitToken) // Wait until the execution completes or the app is forced to exit await _execution.WaitAsync(forceExitToken); - await _service.StopAsync(forceExitToken); + await service.StopAsync(forceExitToken); } public void Dispose() @@ -177,14 +165,9 @@ internal interface IConcurrentHostedService : IHostedService { } -internal sealed class ConcurrentHostedServices : IHostedService +internal sealed class ConcurrentHostedServices(IEnumerable services) : IHostedService { - private readonly ImmutableArray _services; - - public ConcurrentHostedServices(IEnumerable services) - { - _services = services.ToImmutableArray(); - } + private readonly ImmutableArray _services = services.ToImmutableArray(); public Task StartAsync(CancellationToken cancellationToken) => Task.WhenAll(_services.Select(c => c.StartAsync(cancellationToken))); diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index dfb1e1b..faa690f 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -1,31 +1,81 @@ +using System.Collections.Immutable; +using JetBrains.Annotations; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Microsoft.Extensions.Logging; namespace LocalPost.DependencyInjection; +[PublicAPI] +public static partial class ServiceCollectionEx +{ + public static IServiceCollection AddAppHealthSupervisor(this IServiceCollection services, + IEnumerable? tags = null) + { + services.AddSingleton(provider => new AppHealthSupervisor( + provider.GetRequiredService>(), + provider.GetRequiredService(), + provider.GetRequiredService()) + { + Tags = tags?.ToImmutableHashSet() ?? ImmutableHashSet.Empty + }); + + services.AddBackgroundServiceFor(); + + return services; + } +} + +internal static class HealthChecksBuilderEx +{ + internal static IHealthChecksBuilder AddConsumerLivenessCheck(this IHealthChecksBuilder builder, + string? name = default, HealthStatus? failureStatus = default, IEnumerable? tags = default) + where TQ : IAsyncEnumerable + { + var check = HealthChecks.LivenessCheckFor>(failureStatus, tags); + if (name is not null) + check.Name = name; + + return builder.Add(check); + } + + internal static IHealthChecksBuilder AddNamedConsumerLivenessCheck(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) + where TQ : IAsyncEnumerable, INamedService + { + var check = HealthChecks.LivenessCheckForNamed>(name, failureStatus, tags); + + return builder.Add(check); + } +} + internal static class HealthChecks { - public static HealthCheckRegistration LivenessCheckFor(HealthStatus? failureStatus = null, - IEnumerable? tags = default) where T : class, IBackgroundService => new( - Reflection.FriendlyNameOf(), // Can be overwritten later - provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetRequiredService>() }, - failureStatus, // Can be overwritten later - tags); - - public static HealthCheckRegistration LivenessCheckForNamed(string name, HealthStatus? failureStatus = null, - IEnumerable? tags = default) where T : class, IBackgroundService, INamedService => new( - name, // Can be overwritten later - provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetRequiredService>(name) }, - failureStatus, // Can be overwritten later - tags); - - public static HealthCheckRegistration ReadinessCheckForNamed(string name, HealthStatus? failureStatus = null, - IEnumerable? tags = default) where T : class, IBackgroundService, INamedService => new( - name, // Can be overwritten later - provider => new IBackgroundServiceMonitor.ReadinessCheck - { Service = provider.GetRequiredService>(name) }, - failureStatus, // Can be overwritten later - tags); + public static HealthCheckRegistration LivenessCheckFor( + HealthStatus? failureStatus = null, IEnumerable? tags = null) + where T : class, IBackgroundService => + new(Reflection.FriendlyNameOf(), // Can be overwritten later + provider => new IBackgroundServiceMonitor.LivenessCheck + { Service = provider.GetRequiredService>() }, + failureStatus, // Can be overwritten later + tags); + + public static HealthCheckRegistration LivenessCheckForNamed(string name, + HealthStatus? failureStatus = null, IEnumerable? tags = null) + where T : class, IBackgroundService, INamedService => + new(name, // Can be overwritten later + provider => new IBackgroundServiceMonitor.LivenessCheck + { Service = provider.GetRequiredService>(name) }, + failureStatus, // Can be overwritten later + tags); + + public static HealthCheckRegistration ReadinessCheckForNamed( + string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) + where T : class, IBackgroundService, INamedService => + new(name, // Can be overwritten later + provider => new IBackgroundServiceMonitor.ReadinessCheck + { Service = provider.GetRequiredService>(name) }, + failureStatus, // Can be overwritten later + tags); } diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs new file mode 100644 index 0000000..f04a061 --- /dev/null +++ b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs @@ -0,0 +1,57 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; + +namespace LocalPost.DependencyInjection; + +public static partial class ServiceCollectionEx +{ + internal static bool TryAddBackgroundConsumer(this IServiceCollection services, string name, + HandlerFactory hf, Func of) + where TQ : IAsyncEnumerable, INamedService + { + if (!services.TryAddNamedSingleton(name, CreateConsumer)) + return false; + + services.AddBackgroundServiceForNamed>(name); + + return true; + + BackgroundQueue.NamedConsumer CreateConsumer(IServiceProvider provider) + { + var options = of(provider); + var handler = hf(provider); + + return new BackgroundQueue.NamedConsumer( + provider.GetRequiredService>>(), + provider.GetRequiredService(name), handler, options.MaxConcurrency) + { + BreakOnException = options.BreakOnException + }; + } + } + + internal static bool TryAddBackgroundConsumer(this IServiceCollection services, + HandlerFactory hf, Func of) + where TQ : IAsyncEnumerable + { + if (!services.TryAddSingleton(CreateConsumer)) + return false; + + services.AddBackgroundServiceFor>(); + + return true; + + BackgroundQueue.Consumer CreateConsumer(IServiceProvider provider) + { + var options = of(provider); + var handler = hf(provider); + + return new BackgroundQueue.Consumer( + provider.GetRequiredService>>(), + provider.GetRequiredService(), handler, options.MaxConcurrency) + { + BreakOnException = options.BreakOnException + }; + } + } +} diff --git a/src/LocalPost/DependencyInjection/Registration.cs b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs similarity index 65% rename from src/LocalPost/DependencyInjection/Registration.cs rename to src/LocalPost/DependencyInjection/ServiceCollectionTools.cs index a244e76..1856240 100644 --- a/src/LocalPost/DependencyInjection/Registration.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs @@ -1,62 +1,9 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; namespace LocalPost.DependencyInjection; -internal static class ConsumerGroupRegistration -{ - internal static bool TryAddConsumerGroup(this IServiceCollection services, string name, - HandlerFactory configure) where TQ : IAsyncEnumerable, INamedService - { -// services.TryAddConsumerGroup(name, provider => BackgroundQueue.ConsumerGroupFor( -// provider.GetRequiredService(name), configure(provider), 1)); - if (!services.TryAddNamedSingleton(name, provider => BackgroundQueue.ConsumerGroupForNamed( - provider.GetRequiredService(name), configure(provider), 1))) // FIXME Config - return false; - - services.AddBackgroundServiceForNamed>(name); - - return true; - } - - internal static bool TryAddConsumerGroup(this IServiceCollection services, - HandlerFactory configure) where TQ : IAsyncEnumerable - { - if (!services.TryAddSingleton(provider => BackgroundQueue.ConsumerGroupFor( - provider.GetRequiredService(), configure(provider), 1))) // FIXME Config - return false; - - services.AddBackgroundServiceFor>(); - - return true; - } -} - -internal static class HealthChecksRegistration -{ - public static IHealthChecksBuilder AddConsumerGroupLivenessCheck(this IHealthChecksBuilder builder, - string? name = default, HealthStatus? failureStatus = default, IEnumerable? tags = default) - where TQ : IAsyncEnumerable - { - var check = HealthChecks.LivenessCheckFor>(failureStatus, tags); - if (name is not null) - check.Name = name; - - return builder.Add(check); - } - - public static IHealthChecksBuilder AddNamedConsumerGroupLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) - where TQ : IAsyncEnumerable, INamedService - { - var check = HealthChecks.LivenessCheckForNamed>(name, failureStatus, tags); - - return builder.Add(check); - } -} - -internal static class Registration +internal static class ServiceCollectionTools { public static void AddConcurrentHostedServices(this IServiceCollection services) => services .AddHostedService(); @@ -145,4 +92,15 @@ public static IServiceCollection AddSingletonAlias(th where TService : class where TImplementation : class, TService, INamedService => services.AddSingleton(provider => provider.GetRequiredService(name)); + + public static bool TryAddSingletonAlias(this IServiceCollection services) + where TService : class + where TImplementation : class, TService => + services.TryAddSingleton(provider => provider.GetRequiredService()); + + public static bool TryAddSingletonAlias(this IServiceCollection services, + string name) + where TService : class + where TImplementation : class, TService, INamedService => + services.TryAddSingleton(provider => provider.GetRequiredService(name)); } diff --git a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs index ef4f7ed..ed54bc4 100644 --- a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs +++ b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs @@ -10,9 +10,10 @@ public static T GetRequiredService(this IServiceProvider provider, string nam where T : INamedService => provider.GetRequiredService>().First(x => x.Name == name); - public static T GetOptions(this IServiceProvider provider) => provider.GetOptions(Options.DefaultName); + public static T GetOptions(this IServiceProvider provider) where T : class => + provider.GetRequiredService>().Value; - public static T GetOptions(this IServiceProvider provider, string name) => + public static T GetOptions(this IServiceProvider provider, string name) where T : class => provider.GetRequiredService>().Get(name); public static ILogger GetLoggerFor(this IServiceProvider provider) => diff --git a/src/LocalPost/DependencyInjection/ServiceRegistration.cs b/src/LocalPost/DependencyInjection/ServiceRegistration.cs deleted file mode 100644 index 55c33ec..0000000 --- a/src/LocalPost/DependencyInjection/ServiceRegistration.cs +++ /dev/null @@ -1,39 +0,0 @@ -using JetBrains.Annotations; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Options; - -namespace LocalPost.DependencyInjection; - -[PublicAPI] -public static class ServiceRegistration -{ - public static OptionsBuilder> AddBackgroundJobQueue(this IServiceCollection services) - { - services.TryAddSingleton(); - services.TryAddSingleton(provider => provider.GetRequiredService()); - - return services.AddBackgroundQueue(_ => async (job, ct) => await job(ct)); - } - - // THandler has to be registered by the user - public static OptionsBuilder> AddBackgroundQueue( - this IServiceCollection services, - HandlerFactory? configure = null) where THandler : IHandler => - services.AddBackgroundQueue(HandlerStack.From().Scoped()); - - public static OptionsBuilder> AddBackgroundQueue(this IServiceCollection services, - HandlerFactory configure) - { - services.TryAddSingleton>(provider => provider.GetRequiredService>()); - services.TryAddSingleton(provider => - BackgroundQueue.Create(provider.GetOptions>())); - services.AddBackgroundServiceFor>(); - - // FIXME Prevent adding two services with different handlers... Do not allow calling this method twice for the same queue? - services.TryAddConsumerGroup>(configure); - - return services.AddOptions>(); - } - - // TODO Batched -} diff --git a/src/LocalPost/Handler.cs b/src/LocalPost/Handler.cs new file mode 100644 index 0000000..381d8e5 --- /dev/null +++ b/src/LocalPost/Handler.cs @@ -0,0 +1,24 @@ +namespace LocalPost; + +public delegate ValueTask Handler(T context, CancellationToken ct); + +public delegate Handler HandlerFactory(IServiceProvider provider); + +public delegate Handler HandlerMiddleware(Handler next); + +// Too narrow use case +// public delegate HandlerMiddleware HandlerMiddlewareFactory(IServiceProvider provider); + +// Even more narrow use case, confuses more than helps +// public delegate HandlerFactory HandlerFactoryMiddleware(HandlerFactory hf); + +public interface IHandler +{ + ValueTask InvokeAsync(TOut payload, CancellationToken ct); +} + +// Too narrow use case +// public interface IHandlerMiddleware +// { +// Handler Invoke(Handler next); +// } diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index 4e6dfef..3799868 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -3,242 +3,17 @@ namespace LocalPost; - - -public interface IHandler -{ - ValueTask InvokeAsync(TOut payload, CancellationToken ct); -} - -//public interface IMiddleware -//{ -// Handler Invoke(Handler next); -//} - -public delegate ValueTask Handler(T context, CancellationToken ct); - -public delegate Handler HandlerFactory(IServiceProvider provider); - -//public delegate Handler Middleware(Handler next); -////public delegate Task Middleware(T context, Handler next, CancellationToken ct); -// -//public delegate Middleware MiddlewareFactory(IServiceProvider provider); - - - -public delegate Handler HandlerMiddleware(Handler next); -//public delegate Handler HandlerMiddleware(Handler next); - -public delegate HandlerMiddleware HandlerMiddlewareFactory(IServiceProvider provider); -//public delegate HandlerMiddleware HandlerMiddlewareFactory(IServiceProvider provider); - - -public delegate HandlerFactory HandlerFactoryMiddleware(HandlerFactory hf); -//public delegate HandlerFactory HandlerFactoryMiddleware(HandlerFactory hf); - -public interface IHandlerMiddleware +[PublicAPI] +public static class HandlerStack { - Handler Invoke(Handler next); + public static readonly HandlerFactory Empty = _ => (_, _) => default; } -//public interface IHandlerMiddleware : IMiddleware2 -//{ -//} [PublicAPI] -public static partial class HandlerStack +public static class HandlerStack { public static HandlerFactory For(Handler handler) => _ => handler; public static HandlerFactory From() where THandler : IHandler => provider => provider.GetRequiredService().InvokeAsync; - - - - public static HandlerFactory Map(this HandlerFactory handlerFactory, - HandlerFactoryMiddleware middleware) => middleware(handlerFactory); - - public static HandlerFactory Map(this HandlerFactory handlerFactory, - HandlerMiddlewareFactory middlewareFactory) => provider => - { - var h = handlerFactory(provider); - var m = middlewareFactory(provider); - - return m(h); - }; - -// public static HandlerFactory Map(this HandlerFactory handlerFactory, -// HandlerMiddlewareFactory middlewareFactory) => provider => -// { -// var h = handlerFactory(provider); -// var m = middlewareFactory(provider); -// -// return m(h); -// }; - - public static HandlerFactory Map(this HandlerFactory handlerFactory, - HandlerMiddleware middleware) => handlerFactory.Map(_ => middleware); - -// public static HandlerFactory Map(this HandlerFactory handlerFactory, -// HandlerMiddleware middleware) => handlerFactory.Map(_ => middleware); - - // Really no need... -// public static HandlerFactory Map(this HandlerFactory handlerFactory, -// IMiddleware2 middleware) => handlerFactory.Map(middleware.Invoke); - - public static HandlerFactory Scoped(this HandlerFactory hf) => hf.Map(ScopedHandler.Wrap); - - public static HandlerFactory SkipWhen(this HandlerFactory handlerStack, Func pred) => - handlerStack.Map(next => async (context, ct) => - { - if (pred(context)) - return; - - await next(context, ct); - }); - - -// public static HandlerFactory Append(this HandlerFactory handlerFactory, -// HandlerMiddlewareFactory handlerMiddlewareFactory) => -// provider => handlerMiddlewareFactory(provider)(handlerFactory(provider)); -// -// public static HandlerFactory Append(this HandlerFactory handlerFactory, -// HandlerMiddlewareFactory handlerMiddlewareFactory) => -// provider => handlerMiddlewareFactory(provider)(handlerFactory(provider)); -// -// // Because... -//// public static HandlerFactory Append(this HandlerFactory handlerFactory, -//// Func> middlewareFactory) => -//// handlerFactory.Append(provider => middlewareFactory(provider)); -// -// public static HandlerFactory AppendMiddleware(this HandlerFactory handlerFactory, -// HandlerMiddleware middleware) => -// handlerFactory.Append(_ => middleware); -// -// public static HandlerFactory Append(this HandlerFactory handlerFactory, -// IMiddleware2 middleware) => -// handlerFactory.Append(_ => middleware.Invoke); -// -// // C# can only infer ALL generics of a method... Or nothing, so you have to specify each one manually. Not very -// // convenient, but creating a wrapper class is even worse. -// public static HandlerFactory Append(this HandlerFactory handlerFactory) -// where TMiddleware : class, IMiddleware2 => -// handlerFactory.Append(provider => provider.GetRequiredService().Invoke); -// -// // C# can only infer ALL generics of a method... Or nothing, so you have to specify each one manually. Not very -// // convenient, but creating a wrapper class is even worse. -// public static HandlerFactory Append(this HandlerFactory handlerFactory) -// where TMiddleware : class, IMiddleware2 => handlerFactory.Append(); -} - -[PublicAPI] -public static class HandlerStack -{ - public static readonly HandlerFactory Empty = _ => (_, _) => default; - -// public static HandlerStack2 From(Handler handler) => new() -// { -// HandlerFactory = _ => handler -// }; -// -// public static HandlerStack2 From() where THandler : IHandler => new() -// { -// HandlerFactory = provider => provider.GetRequiredService().InvokeAsync -// }; -// -// public required HandlerFactory HandlerFactory { get; init; } -// -// public static implicit operator HandlerFactory(HandlerStack2 stack) => stack.HandlerFactory; -// -// public HandlerStack2 Append(MiddlewareFactory2 middlewareFactory) => new() -// { -// HandlerFactory = provider => middlewareFactory(provider)(HandlerFactory(provider)) -// }; -// -// public HandlerStack2 Append(Middleware2 middleware) => Append(_ => middleware); -// -// public HandlerStack2 Append() where TMiddleware : class, IMiddleware2 => -// Append(); -// -// public HandlerStack2 Append() where TMiddleware : class, IMiddleware2 => -// Append(provider => provider.GetRequiredService().Invoke); -// -// public HandlerStack2 Scoped() => new() -// { -// HandlerFactory = ScopedHandlerFactory.Wrap(HandlerFactory) -// }; } - - - - - - -// TODO Remove -//[PublicAPI] -//public sealed class HandlerStackBuilder : HandlerStackBuilder> -//{ -//} -// -//[PublicAPI] -//public abstract class HandlerStackBuilder -// where TBuilder : HandlerStackBuilder -//{ -// protected readonly List> Middlewares = new(); -// protected HandlerFactory HandlerFactory = _ => (_, _) => Task.CompletedTask; -// -// public TBuilder SetHandler(Handler handler) => SetHandler(_ => handler); -// -// public TBuilder SetHandler() where THandler : IHandler => -// SetHandler(provider => provider.GetRequiredService().InvokeAsync); -// -// public TBuilder SetHandler(HandlerFactory factory) -// { -// HandlerFactory = factory; -// -// return (TBuilder) this; -// } -// -//// public TBuilder Append() where TMiddleware : IHandler -//// { -//// Middlewares.Add(provider => next => ActivatorUtilities.CreateInstance(provider, next).InvokeAsync); -//// -//// return (TBuilder) this; -//// } -// -// public TBuilder Append(Middleware middleware) => -// Append(_ => middleware); -// -// public TBuilder Append() where TMiddleware : class, IMiddleware -// { -// Middlewares.Add(provider => provider.GetRequiredService().Invoke); -// -// return (TBuilder) this; -// } -// -// public TBuilder Append(MiddlewareFactory factory) -// { -// Middlewares.Add(factory); -// -// return (TBuilder) this; -// } -// -// internal HandlerStack Build() => new(HandlerFactory, Middlewares); -//} -// -//[PublicAPI] -//public sealed class HandlerStack -//{ -// private readonly HandlerFactory _handler; -// private readonly ImmutableArray> _middlewares; -// -// public HandlerStack(HandlerFactory handler, IEnumerable>? middlewares = null) -// { -// _handler = handler; -// _middlewares = middlewares?.ToImmutableArray() ?? ImmutableArray>.Empty; -// } -// -// public Handler Resolve(IServiceProvider provider) => _middlewares -// .Select(factory => factory(provider)) -// .Reverse() -// .Aggregate(_handler(provider), (next, middleware) => middleware(next)); -//} diff --git a/src/LocalPost/HandlerStackEx.cs b/src/LocalPost/HandlerStackEx.cs new file mode 100644 index 0000000..1e79e2b --- /dev/null +++ b/src/LocalPost/HandlerStackEx.cs @@ -0,0 +1,131 @@ +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; + +namespace LocalPost; + +public static partial class HandlerStackEx +{ + // Better use a lambda in place, see Scoped() middleware + // public static HandlerFactory Map(this HandlerFactory hf, + // HandlerFactoryMiddleware middleware) => middleware(hf); + + // Just resolve it manually, it's one line longer, same cognitive load or even less, + // and one additional type less + // public static HandlerFactory Map(this HandlerFactory hf, + // HandlerMiddlewareFactory middlewareFactory) => provider => + // { + // var h = hf(provider); + // var m = middlewareFactory(provider); + // + // return m(h); + // }; + + // Too narrow use case, but makes the Map() method inconvenient to use + // public static HandlerFactory Map(this HandlerFactory hf, + // Func> middlewareFactory) => hf.Map(provider => + // middlewareFactory(provider).Invoke); + // public static HandlerFactory Map(this HandlerFactory hf, + // Func> middlewareFactory) => provider => + // { + // var handler = hf(provider); + // return middlewareFactory(provider).Invoke(handler); + // }; + + // public static HandlerFactory Map(this HandlerFactory hf, + // HandlerMiddleware middleware) => hf.Map(_ => middleware); + public static HandlerFactory Map(this HandlerFactory hf, + HandlerMiddleware middleware) => provider => + { + var handler = hf(provider); + return middleware(handler); + }; + + public static HandlerFactory Touch(this HandlerFactory hf, + HandlerMiddleware middleware) => hf.Map(middleware); + + // No need, just use a lambda in place + // public static HandlerFactory Map(this HandlerFactory hf, + // where T : IHandlerMiddleware => hf.Map(provider => + // ActivatorUtilities.CreateInstance(provider).Invoke); + // + // public static HandlerFactory Scoped(this HandlerFactory hf) => hf.Map(ScopedHandler.Wrap); + + public static HandlerFactory Dispose(this HandlerFactory hf) where T : IDisposable => + hf.Map(next => async (context, ct) => + { + try + { + await next(context, ct); + } + finally + { + context.Dispose(); + } + }); + + public static HandlerFactory DisposeAsync(this HandlerFactory hf) where T : IAsyncDisposable => + hf.Map(next => async (context, ct) => + { + try + { + await next(context, ct); + } + finally + { + await context.DisposeAsync(); + } + }); + + public static HandlerFactory SkipWhen(this HandlerFactory hf, Func pred) => + hf.Map(next => async (context, ct) => + { + if (pred(context)) + return; + + await next(context, ct); + }); + + // public static HandlerFactory ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => + // hf.Map(provider => + // { + // var appLifetime = provider.GetRequiredService(); + // return next => async (context, ct) => + // { + // try + // { + // await next(context, ct); + // } + // catch (OperationCanceledException e) when (e.CancellationToken == ct) + // { + // throw; + // } + // catch + // { + // appLifetime.StopApplication(); + // Environment.ExitCode = exitCode; + // } + // }; + // }); + public static HandlerFactory ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => provider => + { + var appLifetime = provider.GetRequiredService(); + var next = hf(provider); + + return async (context, ct) => + { + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch + { + appLifetime.StopApplication(); + Environment.ExitCode = exitCode; + } + }; + }; +} diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index 25ddbc7..cdb99c7 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -1,7 +1,7 @@ - netstandard2.0 + netstandard2.0;net6.0;net8.0 true LocalPost @@ -42,7 +42,6 @@ - @@ -50,10 +49,19 @@ + + + + - + + + + + + diff --git a/src/LocalPost/Middlewares.cs b/src/LocalPost/Middlewares.cs index 351ef2b..1e25980 100644 --- a/src/LocalPost/Middlewares.cs +++ b/src/LocalPost/Middlewares.cs @@ -1,41 +1,53 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; namespace LocalPost; -public static partial class HandlerStack +public static partial class HandlerStackEx { - public static HandlerFactory LogErrors(this HandlerFactory handlerStack) => - handlerStack.Map(provider => - ActivatorUtilities.CreateInstance>(provider).Invoke); + // public static HandlerFactory LogErrors(this HandlerFactory hf) => hf.Map(provider => + // ActivatorUtilities.CreateInstance>(provider)); + + public static HandlerFactory Scoped(this HandlerFactory hf) => provider => + { + var scopeFactory = provider.GetRequiredService(); + return new ScopedHandler(scopeFactory, hf).InvokeAsync; + }; } -internal class LoggingErrorHandler : IHandlerMiddleware +internal sealed class ScopedHandler(IServiceScopeFactory sf, HandlerFactory hf) : IHandler { - private readonly ILogger> _logger; - - public LoggingErrorHandler(ILogger> logger) + public async ValueTask InvokeAsync(T payload, CancellationToken ct) { - _logger = logger; - } + // See https://andrewlock.net/exploring-dotnet-6-part-10-new-dependency-injection-features-in-dotnet-6/#handling-iasyncdisposable-services-with-iservicescope + // And also https://devblogs.microsoft.com/dotnet/announcing-net-6/#microsoft-extensions-dependencyinjection-createasyncscope-apis + await using var scope = sf.CreateAsyncScope(); - public Handler Invoke(Handler next) => async (context, ct) => - { - try - { - await next(context, ct); - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; - } - catch (Exception e) - { - _logger.LogError(e, "Unhandled exception while processing a message"); - } - }; + var handler = hf(scope.ServiceProvider); + + await handler(payload, ct); + } } +// Too narrow use case in the first place, also easier to implement using a lambda +// internal class ErrorLoggingHandler(ILogger logger) : IHandlerMiddleware +// { +// public Handler Invoke(Handler next) => async (context, ct) => +// { +// try +// { +// await next(context, ct); +// } +// catch (OperationCanceledException e) when (e.CancellationToken == ct) +// { +// throw; +// } +// catch (Exception e) +// { +// logger.LogError(e, "Unhandled exception while processing a message"); +// } +// }; +// } + // TODO Just add it as an example, also using Polly //[PublicAPI] //public static class Middlewares diff --git a/src/LocalPost/Primitives.cs b/src/LocalPost/Primitives.cs new file mode 100644 index 0000000..46e8da3 --- /dev/null +++ b/src/LocalPost/Primitives.cs @@ -0,0 +1,21 @@ +namespace LocalPost; + +// [PublicAPI] +internal readonly record struct MaxSize +{ + public static implicit operator int(MaxSize batchSize) => batchSize.Value; + + public static implicit operator MaxSize(int batchSize) => new(batchSize); + public static implicit operator MaxSize(short batchSize) => new(batchSize); + public static implicit operator MaxSize(ushort batchSize) => new(batchSize); + + public readonly int Value = 1; + + public MaxSize(int value) + { + if (value <= 1) + throw new ArgumentOutOfRangeException(nameof(value), value, "Batch size must be positive."); + + Value = value; + } +} diff --git a/src/LocalPost/QueuePublisher.cs b/src/LocalPost/QueuePublisher.cs new file mode 100644 index 0000000..1431d2f --- /dev/null +++ b/src/LocalPost/QueuePublisher.cs @@ -0,0 +1,10 @@ +using JetBrains.Annotations; + +namespace LocalPost; + +[PublicAPI] +public interface IQueuePublisher +{ + // TODO Custom exception when closed?.. Or just return true/false?.. + ValueTask Enqueue(T item, CancellationToken ct = default); +} diff --git a/src/LocalPost/ScopedHandler.cs b/src/LocalPost/ScopedHandler.cs deleted file mode 100644 index d8f7787..0000000 --- a/src/LocalPost/ScopedHandler.cs +++ /dev/null @@ -1,36 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost; - -internal static class ScopedHandler -{ - public static HandlerFactory Wrap(HandlerFactory handlerFactory) => provider => - { - var scopeFactory = provider.GetRequiredService(); - return new ScopedHandler(scopeFactory, handlerFactory).InvokeAsync; - }; - -} - -internal sealed class ScopedHandler : IHandler -{ - private readonly IServiceScopeFactory _scopeFactory; - private readonly HandlerFactory _handlerFactory; - - public ScopedHandler(IServiceScopeFactory scopeFactory, HandlerFactory handlerFactory) - { - _scopeFactory = scopeFactory; - _handlerFactory = handlerFactory; - } - - public async ValueTask InvokeAsync(T payload, CancellationToken ct) - { - // See https://andrewlock.net/exploring-dotnet-6-part-10-new-dependency-injection-features-in-dotnet-6/#handling-iasyncdisposable-services-with-iservicescope - // And also https://devblogs.microsoft.com/dotnet/announcing-net-6/#microsoft-extensions-dependencyinjection-createasyncscope-apis - await using var scope = _scopeFactory.CreateAsyncScope(); - - var handler = _handlerFactory(scope.ServiceProvider); - - await handler(payload, ct); - } -} From a5e1387eeecf7cc67c5412bc448aa5869483e106 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 25 May 2024 11:28:50 +0000 Subject: [PATCH 07/33] chore: BQ Options, CompletionDelay --- src/LocalPost/BackgroundQueues/BackgroundQueue.cs | 8 ++++---- .../DependencyInjection/BackgroundQueuesBuilder.cs | 12 ++++++------ .../{BackgroundQueueOptions.cs => Options.cs} | 10 +++++----- 3 files changed, 15 insertions(+), 15 deletions(-) rename src/LocalPost/BackgroundQueues/{BackgroundQueueOptions.cs => Options.cs} (80%) diff --git a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs b/src/LocalPost/BackgroundQueues/BackgroundQueue.cs index 6ea88ed..e5f168d 100644 --- a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueues/BackgroundQueue.cs @@ -5,11 +5,11 @@ namespace LocalPost.BackgroundQueues; internal static class BackgroundQueue { - public static BackgroundQueue> Create(BackgroundQueueOptions options) => + public static BackgroundQueue> Create(Options options) => Create>(options, reader => reader.ReadAllAsync()); public static BackgroundQueue>> CreateBatched( - BatchedBackgroundQueueOptions options) => + BatchedOptions options) => Create>>(options, reader => reader .ReadAllAsync() @@ -18,7 +18,7 @@ public static BackgroundQueue>> CreateBatched true); // To make the pipeline linear (single consumer), just add .ToConcurrent() to the end - public static BackgroundQueue Create(BackgroundQueueOptions options, + public static BackgroundQueue Create(Options options, Func>, IAsyncEnumerable> configure, bool proxy = false) // TODO Rename this parameter somehow... { @@ -42,7 +42,7 @@ public static BackgroundQueue Create(BackgroundQueueOptions op pipeline = pipeline.ToConcurrent(); return new BackgroundQueue(channel, pipeline, - TimeSpan.FromMilliseconds(options.CompletionTimeout)); + TimeSpan.FromMilliseconds(options.CompletionDelay)); } } diff --git a/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs index 52bd89b..9e917a2 100644 --- a/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs @@ -8,7 +8,7 @@ namespace LocalPost.BackgroundQueues.DependencyInjection; [PublicAPI] public class BackgroundQueuesBuilder(IServiceCollection services) { - public OptionsBuilder> AddJobQueue() + public OptionsBuilder> AddJobQueue() { services.TryAddSingleton(); services.TryAddSingletonAlias(); @@ -23,7 +23,7 @@ public OptionsBuilder> AddJobQueue() } // THandler has to be registered by the user - public OptionsBuilder> AddQueue() + public OptionsBuilder> AddQueue() where THandler : IHandler => AddQueue( HandlerStack.From() @@ -32,23 +32,23 @@ public OptionsBuilder> AddQueue() .Trace() ); - public OptionsBuilder> AddQueue(HandlerFactory> hf) + public OptionsBuilder> AddQueue(HandlerFactory> hf) { if (!services.TryAddSingletonAlias, BackgroundQueue>>()) // return ob; // Already added, don't register twice throw new InvalidOperationException($"BackgroundQueue<{Reflection.FriendlyNameOf()}> is already registered."); services.TryAddSingleton(provider => - BackgroundQueue.Create(provider.GetOptions>())); + BackgroundQueue.Create(provider.GetOptions>())); services.AddBackgroundServiceFor>>(); services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => { - var options = provider.GetOptions>(); + var options = provider.GetOptions>(); return new ConsumerOptions(options.MaxConcurrency, false); }); - return services.AddOptions>(); + return services.AddOptions>(); } // TODO Batched diff --git a/src/LocalPost/BackgroundQueues/BackgroundQueueOptions.cs b/src/LocalPost/BackgroundQueues/Options.cs similarity index 80% rename from src/LocalPost/BackgroundQueues/BackgroundQueueOptions.cs rename to src/LocalPost/BackgroundQueues/Options.cs index 31d8540..b7ec0fe 100644 --- a/src/LocalPost/BackgroundQueues/BackgroundQueueOptions.cs +++ b/src/LocalPost/BackgroundQueues/Options.cs @@ -4,12 +4,12 @@ namespace LocalPost.BackgroundQueues; // For the DI container, to distinguish between different queues -public sealed record BackgroundQueueOptions : BackgroundQueueOptions; +public sealed record Options : Options; // For the DI container, to distinguish between different queues -public sealed record BatchedBackgroundQueueOptions : BatchedBackgroundQueueOptions; +public sealed record BatchedOptions : BatchedOptions; -public record BatchedBackgroundQueueOptions : BackgroundQueueOptions +public record BatchedOptions : Options { [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 10; @@ -23,7 +23,7 @@ public record BatchedBackgroundQueueOptions : BackgroundQueueOptions /// /// Background queue configuration. /// -public record BackgroundQueueOptions +public record Options { /// /// How to handle new messages when the queue (channel) is full. Default is to drop the oldest message (to not @@ -40,7 +40,7 @@ public record BackgroundQueueOptions /// /// How long to wait before closing the queue (channel) on app shutdown. Default is 1 second. /// - public ushort CompletionTimeout { get; set; } = 1_000; // Milliseconds + public ushort CompletionDelay { get; set; } = 1_000; // Milliseconds /// /// How many messages to process concurrently. Default is 10. From 1ecb183b45b7c87cf4f44cbbaa3804288970f468 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 25 May 2024 14:47:54 +0000 Subject: [PATCH 08/33] chore: some renaming refactoring --- src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 4 +- .../{KafkaActivitySource.cs => Tracing.cs} | 10 +-- src/LocalPost.SqsConsumer/HandlerStackEx.cs | 4 +- src/LocalPost.SqsConsumer/QueueClient.cs | 6 +- .../SqsConsumerService.cs | 83 ------------------- .../{SqsActivitySource.cs => Tracing.cs} | 10 +-- src/LocalPost/BackgroundActivitySource.cs | 26 ------ src/LocalPost/BackgroundQueueConsumer.cs | 2 +- .../BackgroundQueues/BackgroundJobQueue.cs | 6 +- .../BackgroundQueues/BackgroundQueue.cs | 7 +- .../BackgroundQueues/HandlerStackEx.cs | 6 +- src/LocalPost/BackgroundQueues/Tracing.cs | 19 +++++ .../DependencyInjection/HealthChecks.cs | 4 +- .../ServiceCollectionEx.cs | 16 ++-- src/LocalPost/OptionsEx.cs | 9 -- 15 files changed, 54 insertions(+), 158 deletions(-) rename src/LocalPost.KafkaConsumer/{KafkaActivitySource.cs => Tracing.cs} (93%) delete mode 100644 src/LocalPost.SqsConsumer/SqsConsumerService.cs rename src/LocalPost.SqsConsumer/{SqsActivitySource.cs => Tracing.cs} (94%) delete mode 100644 src/LocalPost/BackgroundActivitySource.cs create mode 100644 src/LocalPost/BackgroundQueues/Tracing.cs delete mode 100644 src/LocalPost/OptionsEx.cs diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index 5857d4f..c9aab8e 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -19,7 +19,7 @@ public static HandlerFactory> Trace(this HandlerFactory, ConsumeContext>(next => async (context, ct) => { - using var activity = KafkaActivitySource.StartProcessing(context); + using var activity = Tracing.StartProcessing(context); try { await next(context, ct); @@ -36,7 +36,7 @@ public static HandlerFactory> Trace(this HandlerFactor hf.Map, BatchConsumeContext>(next => async (context, ct) => { - using var activity = KafkaActivitySource.StartProcessing(context); + using var activity = Tracing.StartProcessing(context); try { await next(context, ct); diff --git a/src/LocalPost.KafkaConsumer/KafkaActivitySource.cs b/src/LocalPost.KafkaConsumer/Tracing.cs similarity index 93% rename from src/LocalPost.KafkaConsumer/KafkaActivitySource.cs rename to src/LocalPost.KafkaConsumer/Tracing.cs index ff10ffa..1f90053 100644 --- a/src/LocalPost.KafkaConsumer/KafkaActivitySource.cs +++ b/src/LocalPost.KafkaConsumer/Tracing.cs @@ -76,18 +76,18 @@ public static void AcceptDistributedTracingFrom(this Activity acti // - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 // - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 // Also OTEL semantic convention: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ -internal static class KafkaActivitySource +internal static class Tracing { - private static readonly System.Diagnostics.ActivitySource Source; + private static readonly ActivitySource Source; public static bool IsEnabled => Source.HasListeners(); - static KafkaActivitySource() + static Tracing() { // See https://stackoverflow.com/a/909583/322079 var assembly = Assembly.GetExecutingAssembly(); - var version = AssemblyName.GetAssemblyName(assembly.Location).Version; - Source = new System.Diagnostics.ActivitySource(assembly.FullName, version.ToString()); + var version = assembly.GetName().Version; + Source = new ActivitySource(assembly.FullName, version.ToString()); } public static Activity? StartProcessing(ConsumeContext context) diff --git a/src/LocalPost.SqsConsumer/HandlerStackEx.cs b/src/LocalPost.SqsConsumer/HandlerStackEx.cs index 2293812..571ddb9 100644 --- a/src/LocalPost.SqsConsumer/HandlerStackEx.cs +++ b/src/LocalPost.SqsConsumer/HandlerStackEx.cs @@ -18,7 +18,7 @@ public static HandlerFactory> Trace( this HandlerFactory> handlerStack) => handlerStack.Map, ConsumeContext>(next => async (context, ct) => { - using var activity = SqsActivitySource.StartProcessing(context); + using var activity = Tracing.StartProcessing(context); try { await next(context, ct); @@ -36,7 +36,7 @@ public static HandlerFactory> Trace( handlerStack.Map, BatchConsumeContext>(next => async (context, ct) => { // TODO Link distributed transactions from each message - using var activity = SqsActivitySource.StartProcessing(context); + using var activity = Tracing.StartProcessing(context); try { await next(context, ct); diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index 3027c59..f6e86ea 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -70,7 +70,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) public async Task> PullMessagesAsync(CancellationToken ct) { - using var activity = SqsActivitySource.StartReceiving(this); + using var activity = Tracing.StartReceiving(this); var attributeNames = EndpointOptions.AllAttributes; // Make configurable, later var messageAttributeNames = EndpointOptions.AllMessageAttributes; // Make configurable, later @@ -100,7 +100,7 @@ public async Task> PullMessagesAsync(CancellationToken ct) public async Task DeleteMessageAsync(ConsumeContext context) { - using var activity = SqsActivitySource.StartSettling(context); + using var activity = Tracing.StartSettling(context); await _sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle); // TODO Log failures?.. @@ -108,7 +108,7 @@ public async Task DeleteMessageAsync(ConsumeContext context) public async Task DeleteMessagesAsync(BatchConsumeContext context) { - using var activity = SqsActivitySource.StartSettling(context); + using var activity = Tracing.StartSettling(context); var requests = context.Messages .Select((message, i) => new DeleteMessageBatchRequestEntry(i.ToString(), message.ReceiptHandle)) diff --git a/src/LocalPost.SqsConsumer/SqsConsumerService.cs b/src/LocalPost.SqsConsumer/SqsConsumerService.cs deleted file mode 100644 index 504c505..0000000 --- a/src/LocalPost.SqsConsumer/SqsConsumerService.cs +++ /dev/null @@ -1,83 +0,0 @@ -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost.SqsConsumer; - -// TODO Remove -// internal sealed class SqsConsumerService : INamedService -// { -// public static SqsConsumerService Create(IServiceProvider provider, string name, -// Action> configure) -// { -// var options = provider.GetOptions(name); -// -// var client = ActivatorUtilities.CreateInstance(provider, options); -// var messageSource = new MessageSource(client, options.Prefetch); -// var reader = new BackgroundServiceSupervisor(messageSource); -// -// var middlewares = new HandlerStackBuilder(); -// middlewares.Append(); -// configure(middlewares); -// -// var handler = ScopedHandlerFactory.Wrap(middlewares.Build())(provider); -// -// var consumer = BackgroundQueue.ConsumerFor(messageSource, handler); -// var consumerGroup = BackgroundQueue.ConsumerGroupSupervisorFor(consumer, options.MaxConcurrency); -// -// return new SqsConsumerService(name, reader, consumerGroup); -// } -// -// private SqsConsumerService(string name, IBackgroundServiceSupervisor reader, -// IBackgroundServiceSupervisor consumerGroup) -// { -// Name = name; -// -// Reader = reader; -// _readerReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(reader); -// _readerLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(reader); -// -// ConsumerGroup = consumerGroup; -// _consumerGroupReadinessCheck = new IBackgroundServiceSupervisor.ReadinessCheck(consumerGroup); -// _consumerGroupLivenessCheck = new IBackgroundServiceSupervisor.LivenessCheck(consumerGroup); -// } -// -// public string Name { get; } -// -// // Expose only the root supervisor to the host, to avoid deadlocks (.NET runtime handles background services -// // synchronously by default, so if consumers are stopped first, they will block the reader from completing the -// // channel). -// // public IHostedService Supervisor { get; } -// -// public IConcurrentHostedService Reader { get; } -// private readonly IHealthCheck _readerReadinessCheck; -// private readonly IHealthCheck _readerLivenessCheck; -// -// public IConcurrentHostedService ConsumerGroup { get; } -// private readonly IHealthCheck _consumerGroupReadinessCheck; -// private readonly IHealthCheck _consumerGroupLivenessCheck; -// -// public static HealthCheckRegistration QueueReadinessCheck(string name, HealthStatus? failureStatus = default, -// IEnumerable? tags = default) => new(name, -// provider => provider.GetRequiredService(name)._readerReadinessCheck, -// failureStatus, -// tags); -// -// public static HealthCheckRegistration QueueLivenessCheck(string name, HealthStatus? failureStatus = default, -// IEnumerable? tags = default) => new(name, -// provider => provider.GetRequiredService(name)._readerLivenessCheck, -// failureStatus, -// tags); -// -// public static HealthCheckRegistration ConsumerGroupReadinessCheck(string name, HealthStatus? failureStatus = default, -// IEnumerable? tags = default) => new(name, -// provider => provider.GetRequiredService(name)._consumerGroupReadinessCheck, -// failureStatus, -// tags); -// -// public static HealthCheckRegistration ConsumerGroupLivenessCheck(string name, HealthStatus? failureStatus = default, -// IEnumerable? tags = default) => new(name, -// provider => provider.GetRequiredService(name)._consumerGroupLivenessCheck, -// failureStatus, -// tags); -// } diff --git a/src/LocalPost.SqsConsumer/SqsActivitySource.cs b/src/LocalPost.SqsConsumer/Tracing.cs similarity index 94% rename from src/LocalPost.SqsConsumer/SqsActivitySource.cs rename to src/LocalPost.SqsConsumer/Tracing.cs index 76ea0ce..bde27cd 100644 --- a/src/LocalPost.SqsConsumer/SqsActivitySource.cs +++ b/src/LocalPost.SqsConsumer/Tracing.cs @@ -67,18 +67,18 @@ public static void SetDefaultTags(this Activity? activity, QueueClient client) // - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 // - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 // Also OTEL semantic convention: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ -internal static class SqsActivitySource +internal static class Tracing { - private static readonly System.Diagnostics.ActivitySource Source; + private static readonly ActivitySource Source; public static bool IsEnabled => Source.HasListeners(); - static SqsActivitySource() + static Tracing() { // See https://stackoverflow.com/a/909583/322079 var assembly = Assembly.GetExecutingAssembly(); - var version = AssemblyName.GetAssemblyName(assembly.Location).Version; - Source = new System.Diagnostics.ActivitySource(assembly.FullName, version.ToString()); + var version = assembly.GetName().Version; + Source = new ActivitySource(assembly.FullName, version.ToString()); } public static Activity? StartProcessing(ConsumeContext context) diff --git a/src/LocalPost/BackgroundActivitySource.cs b/src/LocalPost/BackgroundActivitySource.cs deleted file mode 100644 index dd81915..0000000 --- a/src/LocalPost/BackgroundActivitySource.cs +++ /dev/null @@ -1,26 +0,0 @@ -using System.Diagnostics; -using System.Reflection; - -namespace LocalPost; - -internal static class BackgroundActivitySource -{ - public static readonly ActivitySource Source; - - public static bool IsEnabled => Source.HasListeners(); - - static BackgroundActivitySource() - { - // See https://stackoverflow.com/a/909583/322079 - var assembly = Assembly.GetExecutingAssembly(); - var version = AssemblyName.GetAssemblyName(assembly.Location).Version; - Source = new System.Diagnostics.ActivitySource(assembly.FullName, version.ToString()); - } - - // public static Activity? StartProcessing(ConsumeContext context) - // { - // var activity = Source.CreateActivity($"{context.Client.Topic} process", ActivityKind., ac); - // - // return activity; - // } -} diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 2ea95b2..16d9dd1 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -5,7 +5,7 @@ namespace LocalPost; internal sealed record ConsumerOptions(ushort MaxConcurrency, bool BreakOnException); -internal static class BackgroundQueue +internal static class Queue { // public static ConsumerGroup ConsumerGroupFor(TQ queue, Handler handler, ushort maxConcurrency) // where TQ : IAsyncEnumerable => new(Consumer.LoopOver(queue, handler), maxConcurrency); diff --git a/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs b/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs index 1d71991..d72f723 100644 --- a/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs @@ -7,9 +7,5 @@ namespace LocalPost.BackgroundQueues; internal sealed class BackgroundJobQueue(BackgroundQueue> queue) : IBackgroundJobQueue { - public ValueTask Enqueue(ConsumeContext item, CancellationToken ct = default) => - queue.Enqueue(item, ct); - - public ValueTask Enqueue(BackgroundJob payload, CancellationToken ct = default) => - Enqueue(new ConsumeContext(payload), ct); + public ValueTask Enqueue(BackgroundJob payload, CancellationToken ct = default) => queue.Enqueue(payload, ct); } diff --git a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs b/src/LocalPost/BackgroundQueues/BackgroundQueue.cs index e5f168d..db3002d 100644 --- a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueues/BackgroundQueue.cs @@ -55,14 +55,13 @@ internal sealed class BackgroundQueue( ChannelWriter> input, IAsyncEnumerable pipeline, TimeSpan completionDelay) - : IAsyncEnumerable, IBackgroundService, IBackgroundQueue, IQueuePublisher> + : IAsyncEnumerable, IBackgroundService, IBackgroundQueue { public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => pipeline.GetAsyncEnumerator(ct); // Track full or not later - public ValueTask Enqueue(ConsumeContext item, CancellationToken ct = default) => input.WriteAsync(item, ct); - - public ValueTask Enqueue(T item, CancellationToken ct = default) => Enqueue(new ConsumeContext(item), ct); + public ValueTask Enqueue(T item, CancellationToken ct = default) => + input.WriteAsync(new ConsumeContext(item), ct); public bool IsClosed { get; private set; } // TODO Use diff --git a/src/LocalPost/BackgroundQueues/HandlerStackEx.cs b/src/LocalPost/BackgroundQueues/HandlerStackEx.cs index 0dfd14d..a77ea24 100644 --- a/src/LocalPost/BackgroundQueues/HandlerStackEx.cs +++ b/src/LocalPost/BackgroundQueues/HandlerStackEx.cs @@ -4,7 +4,7 @@ namespace LocalPost.BackgroundQueues; [PublicAPI] -public static partial class HandlerStackEx +public static class HandlerStackEx { public static HandlerFactory> UsePayload(this HandlerFactory hf) => hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); @@ -16,9 +16,9 @@ public static HandlerFactory> Trace(this HandlerFactory, ConsumeContext>(next => async (context, ct) => { using var activity = context.ActivityContext.HasValue - ? BackgroundActivitySource.Source.StartActivity(transactionName, ActivityKind.Consumer, + ? Tracing.Source.StartActivity(transactionName, ActivityKind.Consumer, context.ActivityContext.Value) - : BackgroundActivitySource.Source.StartActivity(transactionName, ActivityKind.Consumer); + : Tracing.Source.StartActivity(transactionName, ActivityKind.Consumer); try { await next(context, ct); diff --git a/src/LocalPost/BackgroundQueues/Tracing.cs b/src/LocalPost/BackgroundQueues/Tracing.cs new file mode 100644 index 0000000..c522c13 --- /dev/null +++ b/src/LocalPost/BackgroundQueues/Tracing.cs @@ -0,0 +1,19 @@ +using System.Diagnostics; +using System.Reflection; + +namespace LocalPost.BackgroundQueues; + +internal static class Tracing +{ + public static readonly ActivitySource Source; + + public static bool IsEnabled => Source.HasListeners(); + + static Tracing() + { + // See https://stackoverflow.com/a/909583/322079 + var assembly = Assembly.GetExecutingAssembly(); + var version = assembly.GetName().Version; + Source = new ActivitySource(assembly.FullName, version.ToString()); + } +} diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index faa690f..7afdae7 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -33,7 +33,7 @@ internal static IHealthChecksBuilder AddConsumerLivenessCheck(this IHealt string? name = default, HealthStatus? failureStatus = default, IEnumerable? tags = default) where TQ : IAsyncEnumerable { - var check = HealthChecks.LivenessCheckFor>(failureStatus, tags); + var check = HealthChecks.LivenessCheckFor>(failureStatus, tags); if (name is not null) check.Name = name; @@ -44,7 +44,7 @@ internal static IHealthChecksBuilder AddNamedConsumerLivenessCheck(this I string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) where TQ : IAsyncEnumerable, INamedService { - var check = HealthChecks.LivenessCheckForNamed>(name, failureStatus, tags); + var check = HealthChecks.LivenessCheckForNamed>(name, failureStatus, tags); return builder.Add(check); } diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs index f04a061..bdfbbdb 100644 --- a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs @@ -12,17 +12,17 @@ internal static bool TryAddBackgroundConsumer(this IServiceCollection ser if (!services.TryAddNamedSingleton(name, CreateConsumer)) return false; - services.AddBackgroundServiceForNamed>(name); + services.AddBackgroundServiceForNamed>(name); return true; - BackgroundQueue.NamedConsumer CreateConsumer(IServiceProvider provider) + Queue.NamedConsumer CreateConsumer(IServiceProvider provider) { var options = of(provider); var handler = hf(provider); - return new BackgroundQueue.NamedConsumer( - provider.GetRequiredService>>(), + return new Queue.NamedConsumer( + provider.GetRequiredService>>(), provider.GetRequiredService(name), handler, options.MaxConcurrency) { BreakOnException = options.BreakOnException @@ -37,17 +37,17 @@ internal static bool TryAddBackgroundConsumer(this IServiceCollection ser if (!services.TryAddSingleton(CreateConsumer)) return false; - services.AddBackgroundServiceFor>(); + services.AddBackgroundServiceFor>(); return true; - BackgroundQueue.Consumer CreateConsumer(IServiceProvider provider) + Queue.Consumer CreateConsumer(IServiceProvider provider) { var options = of(provider); var handler = hf(provider); - return new BackgroundQueue.Consumer( - provider.GetRequiredService>>(), + return new Queue.Consumer( + provider.GetRequiredService>>(), provider.GetRequiredService(), handler, options.MaxConcurrency) { BreakOnException = options.BreakOnException diff --git a/src/LocalPost/OptionsEx.cs b/src/LocalPost/OptionsEx.cs deleted file mode 100644 index fc0977f..0000000 --- a/src/LocalPost/OptionsEx.cs +++ /dev/null @@ -1,9 +0,0 @@ -using Microsoft.Extensions.Options; - -namespace LocalPost; - -public static class OptionsEx -{ - public static TOptions Get(this IOptionsMonitor optionsMonitor) => - optionsMonitor.Get(Reflection.FriendlyNameOf()); -} From 7ec881560d3ec7c8640c41f19214d3e352fbd67a Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 25 May 2024 14:50:20 +0000 Subject: [PATCH 09/33] chore: BackgroundQueues to BackgroundQueue --- samples/BackgroundQueueApp/Program.cs | 4 ++-- .../BackgroundJobQueue.cs | 2 +- .../{BackgroundQueues => BackgroundQueue}/BackgroundQueue.cs | 2 +- .../{BackgroundQueues => BackgroundQueue}/ConsumeContext.cs | 2 +- .../DependencyInjection/BackgroundQueuesBuilder.cs | 2 +- .../DependencyInjection/HealthChecks.cs | 2 +- .../DependencyInjection/ServiceCollectionEx.cs | 2 +- .../{BackgroundQueues => BackgroundQueue}/HandlerStackEx.cs | 2 +- .../{BackgroundQueues => BackgroundQueue}/Options.cs | 2 +- .../{BackgroundQueues => BackgroundQueue}/Tracing.cs | 2 +- 10 files changed, 11 insertions(+), 11 deletions(-) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/BackgroundJobQueue.cs (90%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/BackgroundQueue.cs (98%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/ConsumeContext.cs (95%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/DependencyInjection/BackgroundQueuesBuilder.cs (97%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/DependencyInjection/HealthChecks.cs (91%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/DependencyInjection/ServiceCollectionEx.cs (86%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/HandlerStackEx.cs (96%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/Options.cs (97%) rename src/LocalPost/{BackgroundQueues => BackgroundQueue}/Tracing.cs (92%) diff --git a/samples/BackgroundQueueApp/Program.cs b/samples/BackgroundQueueApp/Program.cs index c711050..f65c3a6 100644 --- a/samples/BackgroundQueueApp/Program.cs +++ b/samples/BackgroundQueueApp/Program.cs @@ -1,7 +1,7 @@ using BackgroundQueueApp; using LocalPost; -using LocalPost.BackgroundQueues; -using LocalPost.BackgroundQueues.DependencyInjection; +using LocalPost.BackgroundQueue; +using LocalPost.BackgroundQueue.DependencyInjection; using LocalPost.Polly; using Polly; using Polly.Retry; diff --git a/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs similarity index 90% rename from src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs rename to src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs index d72f723..d238b8e 100644 --- a/src/LocalPost/BackgroundQueues/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs @@ -1,6 +1,6 @@ using JetBrains.Annotations; -namespace LocalPost.BackgroundQueues; +namespace LocalPost.BackgroundQueue; // Just a proxy to the actual queue, needed to expose IBackgroundJobQueue [UsedImplicitly] diff --git a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs similarity index 98% rename from src/LocalPost/BackgroundQueues/BackgroundQueue.cs rename to src/LocalPost/BackgroundQueue/BackgroundQueue.cs index db3002d..eabca0d 100644 --- a/src/LocalPost/BackgroundQueues/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -1,7 +1,7 @@ using System.Threading.Channels; using LocalPost.AsyncEnumerable; -namespace LocalPost.BackgroundQueues; +namespace LocalPost.BackgroundQueue; internal static class BackgroundQueue { diff --git a/src/LocalPost/BackgroundQueues/ConsumeContext.cs b/src/LocalPost/BackgroundQueue/ConsumeContext.cs similarity index 95% rename from src/LocalPost/BackgroundQueues/ConsumeContext.cs rename to src/LocalPost/BackgroundQueue/ConsumeContext.cs index fee0e15..f7cac0d 100644 --- a/src/LocalPost/BackgroundQueues/ConsumeContext.cs +++ b/src/LocalPost/BackgroundQueue/ConsumeContext.cs @@ -1,7 +1,7 @@ using System.Diagnostics; using JetBrains.Annotations; -namespace LocalPost.BackgroundQueues; +namespace LocalPost.BackgroundQueue; [PublicAPI] public readonly record struct ConsumeContext diff --git a/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs similarity index 97% rename from src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs rename to src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 9e917a2..bf0212d 100644 --- a/src/LocalPost/BackgroundQueues/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -3,7 +3,7 @@ using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; -namespace LocalPost.BackgroundQueues.DependencyInjection; +namespace LocalPost.BackgroundQueue.DependencyInjection; [PublicAPI] public class BackgroundQueuesBuilder(IServiceCollection services) diff --git a/src/LocalPost/BackgroundQueues/DependencyInjection/HealthChecks.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs similarity index 91% rename from src/LocalPost/BackgroundQueues/DependencyInjection/HealthChecks.cs rename to src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs index 98a8164..d17f57e 100644 --- a/src/LocalPost/BackgroundQueues/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs @@ -3,7 +3,7 @@ using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; -namespace LocalPost.BackgroundQueues.DependencyInjection; +namespace LocalPost.BackgroundQueue.DependencyInjection; [PublicAPI] diff --git a/src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs similarity index 86% rename from src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs rename to src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs index 77dd680..15c0280 100644 --- a/src/LocalPost/BackgroundQueues/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs @@ -1,7 +1,7 @@ using JetBrains.Annotations; using Microsoft.Extensions.DependencyInjection; -namespace LocalPost.BackgroundQueues.DependencyInjection; +namespace LocalPost.BackgroundQueue.DependencyInjection; [PublicAPI] public static class ServiceCollectionEx diff --git a/src/LocalPost/BackgroundQueues/HandlerStackEx.cs b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs similarity index 96% rename from src/LocalPost/BackgroundQueues/HandlerStackEx.cs rename to src/LocalPost/BackgroundQueue/HandlerStackEx.cs index a77ea24..904ec29 100644 --- a/src/LocalPost/BackgroundQueues/HandlerStackEx.cs +++ b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs @@ -1,7 +1,7 @@ using System.Diagnostics; using JetBrains.Annotations; -namespace LocalPost.BackgroundQueues; +namespace LocalPost.BackgroundQueue; [PublicAPI] public static class HandlerStackEx diff --git a/src/LocalPost/BackgroundQueues/Options.cs b/src/LocalPost/BackgroundQueue/Options.cs similarity index 97% rename from src/LocalPost/BackgroundQueues/Options.cs rename to src/LocalPost/BackgroundQueue/Options.cs index b7ec0fe..f41d297 100644 --- a/src/LocalPost/BackgroundQueues/Options.cs +++ b/src/LocalPost/BackgroundQueue/Options.cs @@ -1,7 +1,7 @@ using System.ComponentModel.DataAnnotations; using System.Threading.Channels; -namespace LocalPost.BackgroundQueues; +namespace LocalPost.BackgroundQueue; // For the DI container, to distinguish between different queues public sealed record Options : Options; diff --git a/src/LocalPost/BackgroundQueues/Tracing.cs b/src/LocalPost/BackgroundQueue/Tracing.cs similarity index 92% rename from src/LocalPost/BackgroundQueues/Tracing.cs rename to src/LocalPost/BackgroundQueue/Tracing.cs index c522c13..45f6ba7 100644 --- a/src/LocalPost/BackgroundQueues/Tracing.cs +++ b/src/LocalPost/BackgroundQueue/Tracing.cs @@ -1,7 +1,7 @@ using System.Diagnostics; using System.Reflection; -namespace LocalPost.BackgroundQueues; +namespace LocalPost.BackgroundQueue; internal static class Tracing { From f23eb81cefb74bb2c075c8243f67bdc3538e995f Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 27 May 2024 07:58:53 +0000 Subject: [PATCH 10/33] chore: modern host builder --- samples/KafkaConsumerApp/Program.cs | 95 +++++++------------- samples/SqsConsumerApp/Program.cs | 75 ++++++++-------- samples/SqsConsumerApp/SqsConsumerApp.csproj | 2 +- 3 files changed, 74 insertions(+), 98 deletions(-) diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index beef3e6..671ca34 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -1,72 +1,45 @@ -using System.ComponentModel.DataAnnotations; using Confluent.Kafka; using LocalPost; using LocalPost.KafkaConsumer; using LocalPost.KafkaConsumer.DependencyInjection; -await Host.CreateDefaultBuilder(args) - .ConfigureServices((context, services) => - { - services.AddScoped(); - - services.AddKafkaConsumers(kafka => - { - // kafka.Defaults.Configure(options => - // { - // options.BootstrapServers = "localhost:9092"; - // options.SecurityProtocol = SecurityProtocol.SaslSsl; - // options.SaslMechanism = SaslMechanism.Plain; - // options.SaslUsername = "admin"; - // options.SaslPassword = ""; - // }); - kafka.Defaults - .Bind(context.Configuration.GetSection("Kafka")) - .ValidateDataAnnotations(); - kafka.AddConsumer("weather-forecasts", HandlerStack.From() - .UseKafkaPayload() - .DeserializeJson() - .Acknowledge() - .Scoped() - .Trace() - ) - .Bind(context.Configuration.GetSection("Kafka:Consumer")) - .Configure(options => - { - options.AutoOffsetReset = AutoOffsetReset.Earliest; - options.EnableAutoCommit = false; - }) - .ValidateDataAnnotations(); - }); +var builder = Host.CreateApplicationBuilder(args); - - // services.AddKafkaConsumer("orders", - // builder => { builder.SetHandler(); }, - // builder => { builder.SetValueDeserializer(new StringDeserializer()); }).Configure( - // (options, kafkaOptions) => - // { - // options.Kafka.GroupId = ""; - // options.Kafka.AutoOffsetReset = AutoOffsetReset.Earliest; - // options.Kafka.EnableAutoCommit = false; // TODO DryRun - // - // options.Kafka.BootstrapServers = "localhost:9092"; - // options.Kafka.SecurityProtocol = SecurityProtocol.SaslSsl; - // options.Kafka.SaslMechanism = SaslMechanism.Plain; - // options.Kafka.SaslUsername = "admin"; - // options.Kafka.SaslPassword = ""; - // }); +builder.Services + .AddScoped() + .AddKafkaConsumers(kafka => + { + // kafka.Defaults.Configure(options => + // { + // options.BootstrapServers = "localhost:9092"; + // options.SecurityProtocol = SecurityProtocol.SaslSsl; + // options.SaslMechanism = SaslMechanism.Plain; + // options.SaslUsername = "admin"; + // options.SaslPassword = ""; + // }); + kafka.Defaults + .Bind(builder.Configuration.GetSection("Kafka")) + .ValidateDataAnnotations(); + kafka.AddConsumer("weather-forecasts", HandlerStack.From() + .UseKafkaPayload() + .DeserializeJson() + .Acknowledge() + .Scoped() + .Trace() + ) + .Bind(builder.Configuration.GetSection("Kafka:Consumer")) + .Configure(options => + { + // options.Kafka.GroupId = ""; + options.AutoOffsetReset = AutoOffsetReset.Earliest; + options.EnableAutoCommit = false; // TODO DryRun + }) + .ValidateDataAnnotations(); + }); + +await builder.Build().RunAsync(); - // Only one consumer per name (topic) is allowed?.. - // services.AddBatchKafkaConsumer("orders", - // builder => - // { - // }, - // builder => - // { - // }); - }) - .Build() - .RunAsync(); public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index c7aa95b..efcb7dd 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -5,43 +5,46 @@ using Serilog; using Serilog.Sinks.FingersCrossed; -await Host.CreateDefaultBuilder(args) - .UseSerilog() - .ConfigureServices((context, services) => services - .AddDefaultAWSOptions(context.Configuration.GetAWSOptions()) - .AddAWSService()) - .ConfigureServices(services => services - .AddScoped() - .AddSqsConsumers(sqs => - { - sqs.Defaults.Configure(options => options.MaxConcurrency = 100); - sqs.AddConsumer("weather-forecasts", - HandlerStack.From() - .UseSqsPayload() - .DeserializeJson() - .Acknowledge() - .Scoped() - .Touch(next => async (context, ct) => +var builder = Host.CreateApplicationBuilder(args); + +builder.Services + .AddSerilog() + .AddDefaultAWSOptions(builder.Configuration.GetAWSOptions()) + .AddAWSService(); +builder.Services + .AddScoped() + .AddSqsConsumers(sqs => + { + sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + sqs.AddConsumer("weather-forecasts", + HandlerStack.From() + .UseSqsPayload() + .DeserializeJson() + .Acknowledge() + .Scoped() + .Touch(next => async (context, ct) => + { + using var logBuffer = LogBuffer.BeginScope(); + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; // Not a real error + } + catch (Exception) { - using var logBuffer = LogBuffer.BeginScope(); - try - { - await next(context, ct); - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; // Not a real error - } - catch (Exception) - { - logBuffer.Flush(); - throw; - } - }) - .Trace()); - })) - .Build() - .RunAsync(); + logBuffer.Flush(); + throw; + } + }) + .Trace()); + }); + +await builder.Build().RunAsync(); + + public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/samples/SqsConsumerApp/SqsConsumerApp.csproj index a076bff..b8e1fc5 100644 --- a/samples/SqsConsumerApp/SqsConsumerApp.csproj +++ b/samples/SqsConsumerApp/SqsConsumerApp.csproj @@ -13,7 +13,7 @@ - + From 6e0826aecfc0b48912d27b99d1eee81f1eee8df5 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Tue, 28 May 2024 07:13:22 +0000 Subject: [PATCH 11/33] WIP (Testcontainers, some fixes & refactoring) --- src/LocalPost.KafkaConsumer/MessageSource.cs | 25 ++---- .../DependencyInjection/SqsBuilder.cs | 6 +- src/LocalPost.SqsConsumer/MessageSource.cs | 11 +-- src/LocalPost.SqsConsumer/QueueClient.cs | 38 +++----- .../AsyncEnumerable/AsyncEnumerableEx.cs | 5 +- src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 8 +- ...AsyncEnumerable.cs => ConcurrentBuffer.cs} | 2 +- .../BackgroundQueue/BackgroundQueue.cs | 4 +- src/LocalPost/BackgroundQueue/Options.cs | 12 +-- src/LocalPost/Primitives.cs | 11 +-- .../ConsumerTests.cs | 90 +++++++++++++++++++ .../LocalPost.KafkaConsumer.Tests.csproj | 5 +- .../LocalPost.SqsConsumer.Tests.csproj | 7 +- tests/LocalPost.Tests/PrimitivesTests.cs | 28 ++++++ tests/LocalPost.Tests/Usings.cs | 1 + 15 files changed, 178 insertions(+), 75 deletions(-) rename src/LocalPost/AsyncEnumerable/{ConcurrentAsyncEnumerable.cs => ConcurrentBuffer.cs} (90%) create mode 100644 tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs create mode 100644 tests/LocalPost.Tests/PrimitivesTests.cs diff --git a/src/LocalPost.KafkaConsumer/MessageSource.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs index a309fc3..b5dcfdd 100644 --- a/src/LocalPost.KafkaConsumer/MessageSource.cs +++ b/src/LocalPost.KafkaConsumer/MessageSource.cs @@ -6,11 +6,11 @@ namespace LocalPost.KafkaConsumer; internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> { - private readonly ConcurrentAsyncEnumerable> _source; + private readonly ConcurrentBuffer> _source; public MessageSource(KafkaTopicClient client) : base(client) { - _source = ConsumeAsync().ToConcurrent(); + _source = ConsumeAsync().ToConcurrentBuffer(); } public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); @@ -21,12 +21,12 @@ public IAsyncEnumerator> GetAsyncEnumerator(CancellationT internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> { - private readonly ConcurrentAsyncEnumerable> _source; + private readonly ConcurrentBuffer> _source; public BatchMessageSource(KafkaTopicClient client, BatchBuilderFactory, BatchConsumeContext> factory) : base(client) { - _source = ConsumeAsync().Batch(factory).ToConcurrent(); + _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); } public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); @@ -35,24 +35,17 @@ public IAsyncEnumerator> GetAsyncEnumerator(Cancella _source.GetAsyncEnumerator(ct); } -internal abstract class MessageSourceBase : IBackgroundService, INamedService +internal abstract class MessageSourceBase(KafkaTopicClient client) : IBackgroundService, INamedService { - private readonly KafkaTopicClient _client; - private bool _stopped; // Some additional reading: https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/ // private readonly TaskCompletionSource _executionTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); - protected MessageSourceBase(KafkaTopicClient client) - { - _client = client; - } - - public string Name => _client.Name; + public string Name => client.Name; // Run on a separate thread, as Confluent Kafka API is blocking - public Task StartAsync(CancellationToken ct) => Task.Run(() => _client.Subscribe(), ct); + public Task StartAsync(CancellationToken ct) => Task.Run(client.Subscribe, ct); public abstract Task ExecuteAsync(CancellationToken ct); @@ -72,7 +65,7 @@ private IEnumerable> Consume(CancellationToken ct) // TODO Transaction activity... while (!ct.IsCancellationRequested && !_stopped) - yield return _client.Read(ct); + yield return client.Read(ct); ct.ThrowIfCancellationRequested(); } @@ -81,6 +74,6 @@ private IEnumerable> Consume(CancellationToken ct) public Task StopAsync(CancellationToken ct) => Task.Run(() => { _stopped = true; - _client.Close(); + client.Close(); }, ct); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index a77f764..6908cff 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -21,8 +21,10 @@ public OptionsBuilder AddConsumer(string name, HandlerFactory - new MessageSource(provider.GetRequiredService(name))); + services.TryAddNamedSingleton(name, provider => new MessageSource( + provider.GetRequiredService(name), + provider.GetOptions(name).Prefetch + )); services.AddBackgroundServiceForNamed(name); services.TryAddBackgroundConsumer, MessageSource>(name, hf, provider => diff --git a/src/LocalPost.SqsConsumer/MessageSource.cs b/src/LocalPost.SqsConsumer/MessageSource.cs index bf9be7e..c5ee670 100644 --- a/src/LocalPost.SqsConsumer/MessageSource.cs +++ b/src/LocalPost.SqsConsumer/MessageSource.cs @@ -6,11 +6,11 @@ namespace LocalPost.SqsConsumer; internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> { - private readonly ConcurrentAsyncEnumerable> _source; + private readonly ConcurrentBuffer> _source; - public MessageSource(QueueClient client) : base(client) + public MessageSource(QueueClient client, int prefetch) : base(client) { - _source = ConsumeAsync().ToConcurrent(); + _source = ConsumeAsync().ToConcurrentBuffer(prefetch); } public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); @@ -21,12 +21,13 @@ public IAsyncEnumerator> GetAsyncEnumerator(CancellationT internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> { - private readonly ConcurrentAsyncEnumerable> _source; + private readonly ConcurrentBuffer> _source; + // TODO Make a note that Prefetch does not play a role here, with batch processing... public BatchMessageSource(QueueClient client, BatchBuilderFactory, BatchConsumeContext> factory) : base(client) { - _source = ConsumeAsync().Batch(factory).ToConcurrent(); + _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); } public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index f6e86ea..c392db9 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -6,29 +6,17 @@ namespace LocalPost.SqsConsumer; -internal sealed class QueueClient : INamedService +internal sealed class QueueClient(ILogger logger, IAmazonSQS sqs, Options options, string name) + : INamedService { - private readonly ILogger _logger; - - private readonly IAmazonSQS _sqs; - private readonly Options _options; - public QueueClient(ILogger logger, IAmazonSQS sqs, IOptionsMonitor options, string name) : this(logger, sqs, options.Get(name), name) { } - public QueueClient(ILogger logger, IAmazonSQS sqs, Options options, string name) - { - _logger = logger; - _sqs = sqs; - _options = options; - Name = name; - } - - public string Name { get; } + public string Name { get; } = name; - public string QueueName => _options.QueueName; + public string QueueName => options.QueueName; private GetQueueAttributesResponse? _queueAttributes; @@ -44,9 +32,9 @@ public QueueClient(ILogger logger, IAmazonSQS sqs, Options options, public async Task ConnectAsync(CancellationToken ct) { - if (string.IsNullOrEmpty(_options.QueueUrl)) + if (string.IsNullOrEmpty(options.QueueUrl)) // Checking for a possible error in the response would be also good... - _queueUrl = (await _sqs.GetQueueUrlAsync(_options.QueueName, ct)).QueueUrl; + _queueUrl = (await sqs.GetQueueUrlAsync(options.QueueName, ct)).QueueUrl; await FetchQueueAttributesAsync(ct); } @@ -56,7 +44,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) try { // Checking for a possible error in the response would be also good... - _queueAttributes = await _sqs.GetQueueAttributesAsync(QueueUrl, EndpointOptions.AllAttributes, ct); + _queueAttributes = await sqs.GetQueueAttributesAsync(QueueUrl, EndpointOptions.AllAttributes, ct); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { @@ -64,7 +52,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) } catch (Exception e) { - _logger.LogWarning(e, "Cannot fetch attributes for SQS {Queue}", _options.QueueName); + logger.LogWarning(e, "Cannot fetch attributes for SQS {Queue}", options.QueueName); } } @@ -77,11 +65,11 @@ public async Task> PullMessagesAsync(CancellationToken ct) // AWS SDK handles network failures, see // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html - var response = await _sqs.ReceiveMessageAsync(new ReceiveMessageRequest + var response = await sqs.ReceiveMessageAsync(new ReceiveMessageRequest { QueueUrl = QueueUrl, - WaitTimeSeconds = _options.WaitTimeSeconds, - MaxNumberOfMessages = _options.MaxNumberOfMessages, + WaitTimeSeconds = options.WaitTimeSeconds, + MaxNumberOfMessages = options.MaxNumberOfMessages, AttributeNames = attributeNames, MessageAttributeNames = messageAttributeNames, }, ct); @@ -101,7 +89,7 @@ public async Task> PullMessagesAsync(CancellationToken ct) public async Task DeleteMessageAsync(ConsumeContext context) { using var activity = Tracing.StartSettling(context); - await _sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle); + await sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle); // TODO Log failures?.. } @@ -116,7 +104,7 @@ public async Task DeleteMessagesAsync(BatchConsumeContext context) .Select(entries => entries.ToList()); await Task.WhenAll(requests.Select(entries => - _sqs.DeleteMessageBatchAsync(QueueUrl, entries))); + sqs.DeleteMessageBatchAsync(QueueUrl, entries))); // TODO Log failures?.. } diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs index 153a020..cffd297 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs @@ -2,9 +2,8 @@ namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumerableEx { - // TODO Better name... - public static ConcurrentAsyncEnumerable ToConcurrent(this IAsyncEnumerable source, - MaxSize bufferMaxSize = default) => new(source, bufferMaxSize); + public static ConcurrentBuffer ToConcurrentBuffer(this IAsyncEnumerable source, int maxSize = 1) => + new(source, maxSize); public static IAsyncEnumerable Batch(this IAsyncEnumerable source, BatchBuilderFactory factory) => new BatchingAsyncEnumerable(source, factory); diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index 7a3c9ce..d867b89 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -117,12 +117,8 @@ public override void Reset() } } -internal sealed class BoundedBatchBuilder : BoundedBatchBuilderBase> +internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) + : BoundedBatchBuilderBase>(batchMaxSize, timeWindow, ct) { - public BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : - base(batchMaxSize, timeWindow, ct) - { - } - public override IReadOnlyList Build() => Batch; } diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs similarity index 90% rename from src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs rename to src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs index 7699639..e29564f 100644 --- a/src/LocalPost/AsyncEnumerable/ConcurrentAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs @@ -2,7 +2,7 @@ namespace LocalPost.AsyncEnumerable; -internal sealed class ConcurrentAsyncEnumerable(IAsyncEnumerable source, MaxSize bufferMaxSize) +internal sealed class ConcurrentBuffer(IAsyncEnumerable source, MaxSize bufferMaxSize) : IAsyncEnumerable { private readonly Channel _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) diff --git a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs index eabca0d..facc07d 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -39,7 +39,7 @@ public static BackgroundQueue Create(Options options, var pipeline = configure(channel.Reader); if (proxy) - pipeline = pipeline.ToConcurrent(); + pipeline = pipeline.ToConcurrentBuffer(); return new BackgroundQueue(channel, pipeline, TimeSpan.FromMilliseconds(options.CompletionDelay)); @@ -81,7 +81,7 @@ private async ValueTask CompleteAsync(CancellationToken ct = default) public Task ExecuteAsync(CancellationToken ct) => pipeline switch { - ConcurrentAsyncEnumerable concurrent => concurrent.Run(ct), + ConcurrentBuffer concurrent => concurrent.Run(ct), _ => Task.CompletedTask }; diff --git a/src/LocalPost/BackgroundQueue/Options.cs b/src/LocalPost/BackgroundQueue/Options.cs index f41d297..2290586 100644 --- a/src/LocalPost/BackgroundQueue/Options.cs +++ b/src/LocalPost/BackgroundQueue/Options.cs @@ -11,11 +11,10 @@ public sealed record BatchedOptions : BatchedOptions; public record BatchedOptions : Options { - [Range(1, ushort.MaxValue)] - public ushort BatchMaxSize { get; set; } = 10; + [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 10; - [Range(1, ushort.MaxValue)] - public int BatchTimeWindowMilliseconds { get; set; } = 1_000; + // TODO Rename to BatchTimeWindowMs + [Range(1, ushort.MaxValue)] public int BatchTimeWindowMilliseconds { get; set; } = 1_000; internal TimeSpan BatchTimeWindow => TimeSpan.FromMilliseconds(BatchTimeWindowMilliseconds); } @@ -35,6 +34,7 @@ public record Options /// Maximum queue (channel) length, after which writes are blocked (see ). /// Default is unlimited. /// + [Range(1, ushort.MaxValue)] public ushort? MaxSize { get; set; } = null; /// @@ -45,5 +45,7 @@ public record Options /// /// How many messages to process concurrently. Default is 10. /// - [Required] public ushort MaxConcurrency { get; set; } = 10; + [Required] + [Range(1, ushort.MaxValue)] + public ushort MaxConcurrency { get; set; } = 10; } diff --git a/src/LocalPost/Primitives.cs b/src/LocalPost/Primitives.cs index 46e8da3..08dec0e 100644 --- a/src/LocalPost/Primitives.cs +++ b/src/LocalPost/Primitives.cs @@ -1,6 +1,6 @@ namespace LocalPost; -// [PublicAPI] +// int, 1 <= value <= int.MaxValue internal readonly record struct MaxSize { public static implicit operator int(MaxSize batchSize) => batchSize.Value; @@ -9,13 +9,14 @@ internal readonly record struct MaxSize public static implicit operator MaxSize(short batchSize) => new(batchSize); public static implicit operator MaxSize(ushort batchSize) => new(batchSize); - public readonly int Value = 1; + private readonly int _value; + public int Value => _value == 0 ? 1 : _value; // Default value... public MaxSize(int value) { - if (value <= 1) - throw new ArgumentOutOfRangeException(nameof(value), value, "Batch size must be positive."); + if (value < 1) + throw new ArgumentOutOfRangeException(nameof(value), value, "Value must be greater than or equal to 1"); - Value = value; + _value = value; } } diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs new file mode 100644 index 0000000..6f9c3e2 --- /dev/null +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -0,0 +1,90 @@ +using System.Text; +using Confluent.Kafka; +using LocalPost.KafkaConsumer.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Testcontainers.Redpanda; +using Xunit.Abstractions; + +namespace LocalPost.KafkaConsumer.Tests; + +public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime +{ + // Called for each test, since each test instantiates a new class instance + private readonly RedpandaContainer _container = new RedpandaBuilder().Build(); + + public async Task InitializeAsync() + { + await _container.StartAsync(); + output.WriteLine(_container.Id); + } + + public Task DisposeAsync() => _container.StopAsync(); + + [Fact] + public async Task handles_messages() + { + var hostBuilder = Host.CreateApplicationBuilder(); + hostBuilder.Services + .AddKafkaConsumers(kafka => + { + kafka.Defaults.Configure(options => + { + options.BootstrapServers = _container.GetBootstrapAddress(); + // options.SecurityProtocol = SecurityProtocol.SaslSsl; + // options.SaslMechanism = SaslMechanism.Plain; + // options.SaslUsername = "admin"; + // options.SaslPassword = ""; + }); + // kafka.Defaults + // .Bind(builder.Configuration.GetSection("Kafka")) + // .ValidateDataAnnotations(); + kafka.AddConsumer("weather-forecasts", HandlerStack.For(async (payload, _) => + { + output.WriteLine(payload); + }) + .Map(next => async (payload, ct) => + { + // TODO Support string payload out of the box?.. + await next(Encoding.UTF8.GetString(payload), ct); + }) + .UseKafkaPayload() + .Acknowledge() + .Scoped() + .Trace() + ) + .Configure(options => + { + options.Topic = "weather-forecasts"; + options.GroupId = "test-consumer"; + // options.AutoOffsetReset = AutoOffsetReset.Earliest; + // options.EnableAutoCommit = false; // TODO DryRun + }) + .ValidateDataAnnotations(); + }); + + var host = hostBuilder.Build(); + + await host.StartAsync(); + + using var producer = new ProducerBuilder(new ProducerConfig + { + BootstrapServers = _container.GetBootstrapAddress() + }).Build(); + + await producer.ProduceAsync("weather-forecasts", new Message + { + Key = "London", + Value = "It will rainy in London tomorrow" + }); + + await Task.Delay(1_000); + + Assert.True(true); + + await host.StopAsync(); + } +} + + +// public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); diff --git a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj index f662938..1282380 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj +++ b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj @@ -14,7 +14,10 @@ - + + + + diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index cc357eb..42aee8d 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -8,12 +8,11 @@ - + - - - + + diff --git a/tests/LocalPost.Tests/PrimitivesTests.cs b/tests/LocalPost.Tests/PrimitivesTests.cs new file mode 100644 index 0000000..d3967ac --- /dev/null +++ b/tests/LocalPost.Tests/PrimitivesTests.cs @@ -0,0 +1,28 @@ +namespace LocalPost.Tests; + +public class PrimitivesTests +{ + [Fact] + public void MaxSize_implicit_conversion() + { + MaxSize batchSize = default; + int value = batchSize; + value.Should().Be(1); + + batchSize = 1; + value = batchSize; + value.Should().Be(1); + + batchSize = 2; + value = batchSize; + value.Should().Be(2); + + batchSize = (short)3; + value = batchSize; + value.Should().Be(3); + + batchSize = (ushort)4; + value = batchSize; + value.Should().Be(4); + } +} diff --git a/tests/LocalPost.Tests/Usings.cs b/tests/LocalPost.Tests/Usings.cs index c802f44..91743bb 100644 --- a/tests/LocalPost.Tests/Usings.cs +++ b/tests/LocalPost.Tests/Usings.cs @@ -1 +1,2 @@ global using Xunit; +global using FluentAssertions; From 8b605717d1041322f5b96a4197996c1de5cdd274 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Thu, 30 May 2024 15:47:37 +0000 Subject: [PATCH 12/33] fix: finally working tests --- docker-compose.yml | 24 ++-- samples/KafkaConsumerApp/Program.cs | 50 ++++++-- .../appsettings.Development.json | 9 +- samples/KafkaConsumerApp/appsettings.json | 4 +- samples/SqsConsumerApp/Program.cs | 3 +- src/LocalPost.KafkaConsumer/ConsumeContext.cs | 15 ++- .../ServiceCollectionEx.cs | 13 +- src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 7 +- .../KafkaTopicClient.cs | 31 ++++- .../LocalPost.KafkaConsumer.csproj | 2 +- src/LocalPost.KafkaConsumer/MessageSource.cs | 1 + src/LocalPost.KafkaConsumer/Tracing.cs | 2 +- src/LocalPost.SqsConsumer/ConsumeContext.cs | 17 ++- src/LocalPost.SqsConsumer/Options.cs | 2 +- src/LocalPost.SqsConsumer/Tracing.cs | 4 +- src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 11 +- .../BackgroundQueuesBuilder.cs | 15 +-- src/LocalPost/ConcurrentHostedServices.cs | 15 +-- .../ConsumerTests.cs | 120 +++++++++--------- .../RpBuilder.cs | 89 +++++++++++++ .../AsyncEnumTests.cs | 10 -- .../ConsumerTests.cs | 76 +++++++++++ .../LocalPost.SqsConsumer.Tests.csproj | 3 + 23 files changed, 364 insertions(+), 159 deletions(-) create mode 100644 tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs delete mode 100644 tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs create mode 100644 tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs diff --git a/docker-compose.yml b/docker-compose.yml index 8528f68..7088fa0 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -17,6 +17,7 @@ services: environment: # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ - DEBUG=${DEBUG:-0} + - SERVICES=sqs volumes: - localstack:/var/lib/localstack # https://docs.localstack.cloud/references/init-hooks/ @@ -24,27 +25,28 @@ services: # Only needed for Lambdas # - /var/run/docker.sock:/var/run/docker.sock redpanda: - # https://docs.redpanda.com/current/deploy/deployment-option/self-hosted/docker-image/ + # Mainly from: https://docs.redpanda.com/redpanda-labs/docker-compose/single-broker/ + # See also: https://docs.redpanda.com/current/deploy/deployment-option/self-hosted/docker-image/ image: docker.redpanda.com/redpandadata/redpanda:v24.1.2 + container_name: redpanda command: - redpanda start + - --mode dev-container - --smp 1 - - --overprovisioned - --kafka-addr internal://0.0.0.0:9092,external://0.0.0.0:19092 # Address the broker advertises to clients that connect to the Kafka API. # Use the internal addresses to connect to the Redpanda brokers # from inside the same Docker network. # Use the external addresses to connect to the Redpanda brokers # from outside the Docker network. - - --advertise-kafka-addr internal://redpanda:9092,external://localhost:19092 + - --advertise-kafka-addr internal://redpanda:9092,external://127.0.0.1:19092 - --pandaproxy-addr internal://0.0.0.0:8082,external://0.0.0.0:18082 # Address the broker advertises to clients that connect to the HTTP Proxy. - - --advertise-pandaproxy-addr internal://redpanda:8082,external://localhost:18082 + - --advertise-pandaproxy-addr internal://redpanda:8082,external://127.0.0.1:18082 - --schema-registry-addr internal://0.0.0.0:8081,external://0.0.0.0:18081 # Redpanda brokers use the RPC API to communicate with each other internally. - --rpc-addr redpanda:33145 - --advertise-rpc-addr redpanda:33145 - - --mode dev-container ports: - 18081:18081 - 18082:18082 @@ -54,12 +56,12 @@ services: - redpanda:/var/lib/redpanda/data networks: - redpanda_network - healthcheck: - test: [ "CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1" ] - interval: 15s - timeout: 3s - retries: 5 - start_period: 5s +# healthcheck: +# test: [ "CMD-SHELL", "rpk cluster health | grep -E 'Healthy:.+true' || exit 1" ] +# interval: 15s +# timeout: 3s +# retries: 5 +# start_period: 5s redpanda-console: image: docker.redpanda.com/redpandadata/console:v2.5.2 entrypoint: /bin/sh diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index 671ca34..d8cb038 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -9,18 +9,10 @@ .AddScoped() .AddKafkaConsumers(kafka => { - // kafka.Defaults.Configure(options => - // { - // options.BootstrapServers = "localhost:9092"; - // options.SecurityProtocol = SecurityProtocol.SaslSsl; - // options.SaslMechanism = SaslMechanism.Plain; - // options.SaslUsername = "admin"; - // options.SaslPassword = ""; - // }); kafka.Defaults .Bind(builder.Configuration.GetSection("Kafka")) .ValidateDataAnnotations(); - kafka.AddConsumer("weather-forecasts", HandlerStack.From() + kafka.AddConsumer("one-and-the-only", HandlerStack.From() .UseKafkaPayload() .DeserializeJson() .Acknowledge() @@ -30,14 +22,44 @@ .Bind(builder.Configuration.GetSection("Kafka:Consumer")) .Configure(options => { - // options.Kafka.GroupId = ""; options.AutoOffsetReset = AutoOffsetReset.Earliest; - options.EnableAutoCommit = false; // TODO DryRun + // options.EnableAutoCommit = false; // TODO DryRun }) .ValidateDataAnnotations(); }); -await builder.Build().RunAsync(); +// TODO Health + Supervisor +var host = builder.Build(); + +// using (var producer = new ProducerBuilder(new ProducerConfig +// { +// BootstrapServers = "127.0.0.1:19092" +// }).Build()) +// { +// // Redpanda: by default, topic is created automatically on the first message +// await producer.ProduceAsync("weather-forecasts", new Message +// { +// Key = "London", +// Value = JsonSerializer.Serialize(new WeatherForecast(25, 77, "Sunny")) +// }); +// await producer.ProduceAsync("weather-forecasts", new Message +// { +// Key = "Paris", +// Value = JsonSerializer.Serialize(new WeatherForecast(18, 64, "Rainy")) +// }); +// await producer.ProduceAsync("weather-forecasts", new Message +// { +// Key = "Toronto", +// Value = JsonSerializer.Serialize(new WeatherForecast(22, 72, "Cloudy")) +// }); +// await producer.ProduceAsync("weather-forecasts", new Message +// { +// Key = "Berlin", +// Value = JsonSerializer.Serialize(new WeatherForecast(20, 68, "Sunny")) +// }); +// } + +await host.RunAsync(); @@ -45,9 +67,9 @@ public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary internal sealed class MessageHandler : IHandler { - public async ValueTask InvokeAsync(WeatherForecast payload, CancellationToken ct) + public ValueTask InvokeAsync(WeatherForecast payload, CancellationToken ct) { - await Task.Delay(1_000, ct); Console.WriteLine(payload); + return ValueTask.CompletedTask; } } diff --git a/samples/KafkaConsumerApp/appsettings.Development.json b/samples/KafkaConsumerApp/appsettings.Development.json index b2dcdb6..0967ef4 100644 --- a/samples/KafkaConsumerApp/appsettings.Development.json +++ b/samples/KafkaConsumerApp/appsettings.Development.json @@ -1,8 +1 @@ -{ - "Logging": { - "LogLevel": { - "Default": "Information", - "Microsoft.Hosting.Lifetime": "Information" - } - } -} +{} diff --git a/samples/KafkaConsumerApp/appsettings.json b/samples/KafkaConsumerApp/appsettings.json index dedf07b..b202834 100644 --- a/samples/KafkaConsumerApp/appsettings.json +++ b/samples/KafkaConsumerApp/appsettings.json @@ -6,10 +6,10 @@ } }, "Kafka": { - "BootstrapServers": "localhost:9092", + "BootstrapServers": "127.0.0.1:19092", "Consumer": { "Topic": "weather-forecasts", - "GroupId": "example-cs-group" + "GroupId": "example-consumer-group" } } } diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index efcb7dd..a418b5a 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -8,7 +8,7 @@ var builder = Host.CreateApplicationBuilder(args); builder.Services - .AddSerilog() + .AddSerilog() // See https://nblumhardt.com/2024/04/serilog-net8-0-minimal/#hooking-up-aspnet-core-and-iloggert .AddDefaultAWSOptions(builder.Configuration.GetAWSOptions()) .AddAWSService(); builder.Services @@ -42,6 +42,7 @@ .Trace()); }); +// TODO Health + Supervisor await builder.Build().RunAsync(); diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 3ee9d59..d10a566 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -15,15 +15,19 @@ public static BatchBuilderFactory, BatchConsumeContext { internal readonly KafkaTopicClient Client; - internal readonly TopicPartitionOffset Offset; + // librdkafka docs: + // When consumer restarts this is where it will start consuming from. + // The committed offset should be last_message_offset+1. + // See https://github.com/confluentinc/librdkafka/wiki/Consumer-offset-management#terminology + internal readonly TopicPartitionOffset NextOffset; internal readonly Message Message; public readonly T Payload; - internal ConsumeContext(KafkaTopicClient client, TopicPartitionOffset offset, Message message, + internal ConsumeContext(KafkaTopicClient client, TopicPartitionOffset nextOffset, Message message, T payload) { Client = client; - Offset = offset; + NextOffset = nextOffset; Message = message; Payload = payload; } @@ -38,7 +42,7 @@ public void Deconstruct(out T payload, out IReadOnlyList headers) public IReadOnlyList Headers => Message.Headers.BackingList; - public ConsumeContext Transform(TOut payload) => new(Client, Offset, Message, payload); + public ConsumeContext Transform(TOut payload) => new(Client, NextOffset, Message, payload); public ConsumeContext Transform(Func, TOut> transform) => Transform(transform(this)); @@ -100,7 +104,4 @@ public async Task> Transform(Func Messages[^1].Client; - - // Use .MaxBy() to not rely on the order?.. - internal TopicPartitionOffset LatestOffset => Messages[^1].Offset; } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs index 2bdc981..ea51521 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -16,11 +16,12 @@ public static IServiceCollection AddKafkaConsumers(this IServiceCollection servi } internal static bool TryAddKafkaClient(this IServiceCollection services, string name) - where TOptions : Options => services.TryAddNamedSingleton(name, provider => - { - var options = provider.GetOptions(name); + where TOptions : Options => + services.TryAddNamedSingleton(name, provider => + { + var options = provider.GetOptions(name); - return new KafkaTopicClient(provider.GetRequiredService>(), - options, options.Topic, name); - }); + return new KafkaTopicClient(provider.GetRequiredService>(), + options, options.Topic, name); + }); } diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index c9aab8e..35ae2f9 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -54,7 +54,7 @@ public static HandlerFactory> Acknowledge(this HandlerFacto async (context, ct) => { await next(context, ct); - context.Client.StoreOffset(context.Offset); + context.Client.StoreOffset(context.NextOffset); }); public static HandlerFactory> Acknowledge( @@ -63,7 +63,10 @@ public static HandlerFactory> Acknowledge( async (context, ct) => { await next(context, ct); - context.Client.StoreOffset(context.LatestOffset); + // Store all the offsets, as it can be a batch of messages from different partitions + // (even different topics, if subscribed using a regex) + foreach (var message in context.Messages) + message.Client.StoreOffset(message.NextOffset); }); #region Deserialize() diff --git a/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs b/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs index 1f0c4e5..d1649eb 100644 --- a/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs +++ b/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs @@ -4,6 +4,27 @@ namespace LocalPost.KafkaConsumer; +internal static class KafkaLogging +{ + public static void LogKafkaMessage(this ILogger logger, string topic, LogMessage log) + { + var level = log.Level switch + { + SyslogLevel.Emergency => LogLevel.Critical, + SyslogLevel.Alert => LogLevel.Critical, + SyslogLevel.Critical => LogLevel.Critical, + SyslogLevel.Error => LogLevel.Error, + SyslogLevel.Warning => LogLevel.Warning, + SyslogLevel.Notice => LogLevel.Information, + SyslogLevel.Info => LogLevel.Information, + SyslogLevel.Debug => LogLevel.Debug, + _ => LogLevel.Information + }; + + logger.Log(level, "{Topic} (via librdkafka): {Message}", topic, log.Message); + } +} + internal sealed class KafkaTopicClient : INamedService, IDisposable { private readonly ILogger _logger; @@ -13,9 +34,9 @@ public KafkaTopicClient(ILogger logger, ConsumerConfig config, { _logger = logger; - var clientBuilder = new ConsumerBuilder(config); - // TODO Error handler, logger - _client = clientBuilder.Build(); + _client = new ConsumerBuilder(config) + .SetLogHandler((_, log) => _logger.LogKafkaMessage(topic, log)) + .Build(); Topic = topic; GroupId = config.GroupId; @@ -52,7 +73,9 @@ public ConsumeContext Read(CancellationToken ct = default) if (result is null || result.IsPartitionEOF || result.Message is null) continue; // Continue waiting for a message - return new ConsumeContext(this, result.TopicPartitionOffset, result.Message, + return new ConsumeContext(this, + new TopicPartitionOffset(result.Topic, result.Partition, result.Offset + 1, result.LeaderEpoch), + result.Message, result.Message.Value); } catch (ConsumeException e) when (!e.Error.IsFatal) diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index ea01d12..c09b55e 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -41,7 +41,7 @@ - + diff --git a/src/LocalPost.KafkaConsumer/MessageSource.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs index b5dcfdd..bcd0b18 100644 --- a/src/LocalPost.KafkaConsumer/MessageSource.cs +++ b/src/LocalPost.KafkaConsumer/MessageSource.cs @@ -13,6 +13,7 @@ public MessageSource(KafkaTopicClient client) : base(client) _source = ConsumeAsync().ToConcurrentBuffer(); } + // Run this (possibly) blocking & long-running task in a separate thread?.. public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => diff --git a/src/LocalPost.KafkaConsumer/Tracing.cs b/src/LocalPost.KafkaConsumer/Tracing.cs index 1f90053..a69476e 100644 --- a/src/LocalPost.KafkaConsumer/Tracing.cs +++ b/src/LocalPost.KafkaConsumer/Tracing.cs @@ -60,7 +60,7 @@ public static void AcceptDistributedTracingFrom(this Activity acti public static Activity? SetTagsFor(this Activity? activity, ConsumeContext context) { // activity?.SetTag("messaging.message.id", context.MessageId); - activity?.SetTag("messaging.kafka.message.offset", context.Offset.Offset.Value); + activity?.SetTag("messaging.kafka.message.offset", context.NextOffset.Offset.Value); // Skip, as we always ignore the key on consumption // activity.SetTag("messaging.kafka.message.key", context.Message.Key); diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index 1ab7534..713aaad 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -65,24 +65,23 @@ public readonly record struct BatchConsumeContext internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindow, ct) { - public override BatchConsumeContext Build() => new(Batch); + // TODO Batch.DrainToImmutable() + public override BatchConsumeContext Build() => new(Batch.ToImmutable()); } - // TODO ImmutableArray - public readonly IReadOnlyList> Messages; + public readonly ImmutableArray> Messages; - internal BatchConsumeContext(IReadOnlyList> messages) + public int Count => Messages.Length; + + internal BatchConsumeContext(ImmutableArray> messages) { - if (messages.Count == 0) + if (messages.Length == 0) throw new ArgumentException("Batch must contain at least one message", nameof(messages)); Messages = messages; } - public BatchConsumeContext Transform(ConsumeContext[] payload) => new(payload); - - public BatchConsumeContext Transform(IEnumerable> payload) => - Transform(payload.ToArray()); + public BatchConsumeContext Transform(IEnumerable> payload) => new(payload.ToImmutableArray()); public BatchConsumeContext Transform(IEnumerable batchPayload) => Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); diff --git a/src/LocalPost.SqsConsumer/Options.cs b/src/LocalPost.SqsConsumer/Options.cs index c266364..dd487af 100644 --- a/src/LocalPost.SqsConsumer/Options.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -27,7 +27,7 @@ public record EndpointOptions /// /// How many messages to prefetch from SQS. Default is 10. /// - public byte Prefetch { get; set; } = 10; // FIXME Use + public byte Prefetch { get; set; } = 10; /// /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. diff --git a/src/LocalPost.SqsConsumer/Tracing.cs b/src/LocalPost.SqsConsumer/Tracing.cs index bde27cd..94d0d92 100644 --- a/src/LocalPost.SqsConsumer/Tracing.cs +++ b/src/LocalPost.SqsConsumer/Tracing.cs @@ -57,7 +57,7 @@ public static void SetDefaultTags(this Activity? activity, QueueClient client) activity?.SetTag("messaging.message.id", context.MessageId); public static Activity? SetTagsFor(this Activity? activity, BatchConsumeContext context) => - activity?.SetTag("messaging.batch.message_count", context.Messages.Count); + activity?.SetTag("messaging.batch.message_count", context.Count); public static Activity? SetTagsFor(this Activity? activity, ReceiveMessageResponse response) => activity?.SetTag("messaging.batch.message_count", response.Messages.Count); @@ -130,7 +130,7 @@ static Tracing() return activity; activity.SetDefaultTags(context.Client); - activity.SetTag("messaging.batch.message_count", context.Messages.Count); + activity.SetTag("messaging.batch.message_count", context.Count); return activity; } diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index d867b89..6bd601e 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -1,3 +1,4 @@ +using System.Collections.Immutable; using Nito.AsyncEx; namespace LocalPost.AsyncEnumerable; @@ -86,14 +87,14 @@ public void Dispose() internal abstract class BoundedBatchBuilderBase : BatchBuilderBase { - private readonly MaxSize _batchMaxSize; - protected List Batch; // FIXME ImmutableArrayBuilder + private readonly int _batchMaxSize; + protected ImmutableArray.Builder Batch; protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : base(timeWindow, ct) { _batchMaxSize = batchMaxSize; - Batch = new List(_batchMaxSize); + Batch = ImmutableArray.CreateBuilder(_batchMaxSize); } public override bool IsEmpty => Batch.Count == 0; @@ -113,12 +114,12 @@ public override bool TryAdd(T entry) public override void Reset() { base.Reset(); - Batch = new List(_batchMaxSize); + Batch = ImmutableArray.CreateBuilder(_batchMaxSize); } } internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : BoundedBatchBuilderBase>(batchMaxSize, timeWindow, ct) { - public override IReadOnlyList Build() => Batch; + public override IReadOnlyList Build() => Batch; // TODO ImmutableArray } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index bf0212d..e0c6eac 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -23,14 +23,13 @@ public OptionsBuilder> AddJobQueue() } // THandler has to be registered by the user - public OptionsBuilder> AddQueue() - where THandler : IHandler => - AddQueue( - HandlerStack.From() - .Scoped() - .UsePayload() - .Trace() - ); + public OptionsBuilder> AddQueue() where THandler : IHandler => AddQueue( + // A way to configure the pipeline?.. + HandlerStack.From() + .Scoped() + .UsePayload() + .Trace() + ); public OptionsBuilder> AddQueue(HandlerFactory> hf) { diff --git a/src/LocalPost/ConcurrentHostedServices.cs b/src/LocalPost/ConcurrentHostedServices.cs index 0600576..d73a100 100644 --- a/src/LocalPost/ConcurrentHostedServices.cs +++ b/src/LocalPost/ConcurrentHostedServices.cs @@ -74,6 +74,7 @@ internal class BackgroundServiceRunner(T service, IHostApplicationLifetime ap private Task? _start; private CancellationTokenSource? _executionCts; private Task? _execution; + private Task? _executionWrapper; public bool Starting => _start is not null && !_start.IsCompleted; @@ -111,16 +112,12 @@ public async Task StartAsync(CancellationToken ct) await (_start = service.StartAsync(ct)); // Start execution in the background... -#pragma warning disable CS4014 - ExecuteAsync(); -#pragma warning restore CS4014 + _executionCts = new CancellationTokenSource(); + _executionWrapper = ExecuteAsync(_executionCts.Token); } - private async Task ExecuteAsync() + private async Task ExecuteAsync(CancellationToken ct) { - _executionCts = new CancellationTokenSource(); - var ct = _executionCts.Token; - try { await WaitAppStartAsync(ct); @@ -148,9 +145,9 @@ public async Task StopAsync(CancellationToken forceExitToken) if (!_executionCts.IsCancellationRequested) _executionCts.Cancel(); // Signal cancellation to the service - if (_execution is not null) + if (_executionWrapper is not null) // Wait until the execution completes or the app is forced to exit - await _execution.WaitAsync(forceExitToken); + await _executionWrapper.WaitAsync(forceExitToken); await service.StopAsync(forceExitToken); } diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index 6f9c3e2..f1b1570 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -3,7 +3,6 @@ using LocalPost.KafkaConsumer.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; -using Testcontainers.Redpanda; using Xunit.Abstractions; namespace LocalPost.KafkaConsumer.Tests; @@ -11,80 +10,85 @@ namespace LocalPost.KafkaConsumer.Tests; public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime { // Called for each test, since each test instantiates a new class instance - private readonly RedpandaContainer _container = new RedpandaBuilder().Build(); + private readonly RpContainer _container = new RpBuilder() + .Build(); + + private const string Topic = "weather-forecasts"; public async Task InitializeAsync() { await _container.StartAsync(); - output.WriteLine(_container.Id); - } - - public Task DisposeAsync() => _container.StopAsync(); - [Fact] - public async Task handles_messages() - { - var hostBuilder = Host.CreateApplicationBuilder(); - hostBuilder.Services - .AddKafkaConsumers(kafka => - { - kafka.Defaults.Configure(options => - { - options.BootstrapServers = _container.GetBootstrapAddress(); - // options.SecurityProtocol = SecurityProtocol.SaslSsl; - // options.SaslMechanism = SaslMechanism.Plain; - // options.SaslUsername = "admin"; - // options.SaslPassword = ""; - }); - // kafka.Defaults - // .Bind(builder.Configuration.GetSection("Kafka")) - // .ValidateDataAnnotations(); - kafka.AddConsumer("weather-forecasts", HandlerStack.For(async (payload, _) => - { - output.WriteLine(payload); - }) - .Map(next => async (payload, ct) => - { - // TODO Support string payload out of the box?.. - await next(Encoding.UTF8.GetString(payload), ct); - }) - .UseKafkaPayload() - .Acknowledge() - .Scoped() - .Trace() - ) - .Configure(options => - { - options.Topic = "weather-forecasts"; - options.GroupId = "test-consumer"; - // options.AutoOffsetReset = AutoOffsetReset.Earliest; - // options.EnableAutoCommit = false; // TODO DryRun - }) - .ValidateDataAnnotations(); - }); - - var host = hostBuilder.Build(); - - await host.StartAsync(); + // Dirty fix, but otherwise the client fails + await Task.Delay(3_000); using var producer = new ProducerBuilder(new ProducerConfig { BootstrapServers = _container.GetBootstrapAddress() }).Build(); - await producer.ProduceAsync("weather-forecasts", new Message + // Redpanda: by default, topic is created automatically on the first message + await producer.ProduceAsync(Topic, new Message { Key = "London", Value = "It will rainy in London tomorrow" }); + await producer.ProduceAsync(Topic, new Message + { + Key = "Paris", + Value = "It will rainy in London tomorrow" + }); + } + + public async Task DisposeAsync() + { + await _container.StopAsync(); + } - await Task.Delay(1_000); + [Fact] + public async Task handles_messages() + { + var received = new List(); - Assert.True(true); + var hostBuilder = Host.CreateApplicationBuilder(); + hostBuilder.Services.AddKafkaConsumers(kafka => kafka + .AddConsumer("test-one", HandlerStack.For(async (payload, _) => + { + received.Add(payload); + }) + .Map(next => async (payload, ct) => + { + // TODO Support string payload out of the box?.. + await next(Encoding.UTF8.GetString(payload), ct); + }) + .UseKafkaPayload() + .Acknowledge() + .Scoped() + .Trace() + ) + .Configure(options => + { + options.BootstrapServers = _container.GetBootstrapAddress(); + options.Topic = Topic; + options.GroupId = "test-app"; + // Otherwise the client attaches to the end of the topic, skipping all the published messages + options.AutoOffsetReset = AutoOffsetReset.Earliest; + }) + .ValidateDataAnnotations()); - await host.StopAsync(); - } -} + var host = hostBuilder.Build(); + + try + { + await host.StartAsync(); + await Task.Delay(1_000); // "App is working" -// public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); + Assert.Equal(2, received.Count); + } + finally + { + await host.StopAsync(); + } + } +} diff --git a/tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs b/tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs new file mode 100644 index 0000000..0acb613 --- /dev/null +++ b/tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs @@ -0,0 +1,89 @@ +using System.Text; +using Docker.DotNet.Models; +using DotNet.Testcontainers.Builders; +using DotNet.Testcontainers.Configurations; +using DotNet.Testcontainers.Containers; + +namespace LocalPost.KafkaConsumer.Tests; + +// See also https://github.com/testcontainers/testcontainers-dotnet/blob/develop/src/Testcontainers.Kafka/KafkaBuilder.cs +public sealed class RpBuilder : ContainerBuilder +{ + public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.1.5"; + + public const ushort KafkaPort = 9092; + public const ushort KafkaAdminPort = 9644; + public const ushort PandaProxyPort = 8082; + public const ushort SchemaRegistryPort = 8081; + + public const string StartupScriptFilePath = "/testcontainers.sh"; + + public RpBuilder() : this(new ContainerConfiguration()) + { + DockerResourceConfiguration = Init().DockerResourceConfiguration; + } + + private RpBuilder(ContainerConfiguration resourceConfiguration) : base(resourceConfiguration) + { + DockerResourceConfiguration = resourceConfiguration; + } + + protected override RpBuilder Clone(IResourceConfiguration resourceConfiguration) => + Merge(DockerResourceConfiguration, new ContainerConfiguration(resourceConfiguration)); + + protected override RpBuilder Clone(IContainerConfiguration resourceConfiguration) => + Merge(DockerResourceConfiguration, new ContainerConfiguration(resourceConfiguration)); + + protected override RpBuilder Merge(ContainerConfiguration oldValue, ContainerConfiguration newValue) => + new(new ContainerConfiguration(oldValue, newValue)); + + protected override ContainerConfiguration DockerResourceConfiguration { get; } + + public override RpContainer Build() + { + Validate(); + return new RpContainer(DockerResourceConfiguration); + } + + protected override RpBuilder Init() + { + return base.Init() + .WithImage(RedpandaImage) + .WithPortBinding(KafkaPort, true) + .WithPortBinding(KafkaAdminPort, true) + .WithPortBinding(PandaProxyPort, true) + .WithPortBinding(SchemaRegistryPort, true) + .WithEntrypoint("/bin/sh", "-c") + .WithCommand("while [ ! -f " + StartupScriptFilePath + " ]; do sleep 0.1; done; " + StartupScriptFilePath) + .WithWaitStrategy(Wait.ForUnixContainer().UntilMessageIsLogged("Started Kafka API server")) + .WithStartupCallback((container, ct) => + { + string[] cmd = + [ + "rpk", "redpanda", "start", + "--smp 1", + "--mode dev-container", + $"--kafka-addr internal://0.0.0.0:29092,external://0.0.0.0:{KafkaPort}", + $"--advertise-kafka-addr internal://{container.IpAddress}:29092,external://{container.Hostname}:{container.GetMappedPublicPort(KafkaPort)}", + $"--pandaproxy-addr internal://0.0.0.0:28082,external://0.0.0.0:{PandaProxyPort}", + $"--advertise-pandaproxy-addr internal://{container.IpAddress}:28082,external://{container.Hostname}:{container.GetMappedPublicPort(PandaProxyPort)}", + $"--schema-registry-addr internal://0.0.0.0:28081,external://0.0.0.0:{SchemaRegistryPort}", + ]; + var startupScript = "#!/bin/sh" + '\n' + '\n' + string.Join(' ', cmd) + '\n'; + + return container.CopyAsync(Encoding.Default.GetBytes(startupScript), StartupScriptFilePath, Unix.FileMode755, ct); + }); + } +} + + + +public sealed class RpContainer(IContainerConfiguration configuration) : DockerContainer(configuration) +{ + public string GetSchemaRegistryAddress() => + new UriBuilder(Uri.UriSchemeHttp, Hostname, GetMappedPublicPort(RpBuilder.SchemaRegistryPort)).ToString(); + + public string GetBootstrapAddress() => + // new UriBuilder("PLAINTEXT", Hostname, GetMappedPublicPort(RpBuilder.KafkaPort)).ToString(); + $"{Hostname}:{GetMappedPublicPort(RpBuilder.KafkaPort)}"; +} diff --git a/tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs b/tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs deleted file mode 100644 index c6a615f..0000000 --- a/tests/LocalPost.SqsConsumer.Tests/AsyncEnumTests.cs +++ /dev/null @@ -1,10 +0,0 @@ -namespace LocalPost.SqsConsumer.Tests; - -public class AsyncEnumTests -{ - [Fact] - public async Task multi_enumerators() - { - - } -} diff --git a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs new file mode 100644 index 0000000..ff3d532 --- /dev/null +++ b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs @@ -0,0 +1,76 @@ +using Amazon.Extensions.NETCore.Setup; +using Amazon.Runtime; +using Amazon.SQS; +using LocalPost.SqsConsumer.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; +using Testcontainers.LocalStack; +using Xunit.Abstractions; + +namespace LocalPost.SqsConsumer.Tests; + +public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime +{ + // Called for each test, since each test instantiates a new class instance + private readonly LocalStackContainer _container = new LocalStackBuilder() + .WithImage("localstack/localstack:3.4") + .WithEnvironment("SERVICES", "sqs") + .Build(); + + + private const string QueueName = "weather-forecasts"; + + private IAmazonSQS _sqs; + + private string? _queueUrl; + + public async Task InitializeAsync() + { + await _container.StartAsync(); + + _sqs = new AmazonSQSClient(new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any"), + new AmazonSQSConfig { ServiceURL = _container.GetConnectionString() }); + + var createResponse = await _sqs.CreateQueueAsync(QueueName); + _queueUrl = createResponse.QueueUrl; + } + + public Task DisposeAsync() => _container.StopAsync(); + + [Fact] + public async Task handles_messages() + { + var hostBuilder = Host.CreateApplicationBuilder(); + + var received = new List(); + + hostBuilder.Services + .AddDefaultAWSOptions(new AWSOptions() + { + DefaultClientConfig = { ServiceURL = _container.GetConnectionString() }, + Credentials = new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any") + }) + .AddAWSService() + .AddSqsConsumers(sqs => sqs.AddConsumer(QueueName, HandlerStack.For(async (payload, _) => + { + received.Add(payload); + }) + .UseSqsPayload() + .Acknowledge() + .Scoped() + .Trace())); + + var host = hostBuilder.Build(); + + await host.StartAsync(); + + await _sqs.SendMessageAsync(_queueUrl, "It will rainy in London tomorrow"); + + await Task.Delay(1_000); + + Assert.Single(received); + Assert.Equal("It will rainy in London tomorrow", received[0]); + + await host.StopAsync(); + } +} diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index 42aee8d..051792e 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -16,6 +16,9 @@ + + + From f48be0f1a20a4197bc19d041c019b70b6e62daed Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Fri, 31 May 2024 20:31:53 +0000 Subject: [PATCH 13/33] chore: deps upgrade --- .config/dotnet-tools.json | 4 ++-- docker-compose.yml | 2 +- src/LocalPost.KafkaConsumer/ConsumeContext.cs | 4 ++-- .../LocalPost.KafkaConsumer.csproj | 2 +- src/LocalPost.Polly/LocalPost.Polly.csproj | 2 +- .../LocalPost.SnsPublisher.csproj | 2 +- src/LocalPost.SqsConsumer/ConsumeContext.cs | 4 ++-- .../LocalPost.SqsConsumer.csproj | 2 +- .../AsyncEnumerable/AsyncEnumeratorEx.cs | 9 ++------- src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 20 +++++++++---------- .../BatchingAsyncEnumerable.cs | 2 +- src/LocalPost/LocalPost.csproj | 2 +- .../ConsumerTests.cs | 2 +- .../LocalPost.KafkaConsumer.Tests.csproj | 9 ++++----- tests/LocalPost.KafkaConsumer.Tests/Usings.cs | 1 + .../LocalPost.SnsPublisher.Tests.csproj | 9 ++++----- .../ConsumerTests.cs | 4 ++-- .../LocalPost.SqsConsumer.Tests.csproj | 9 ++++----- tests/LocalPost.SqsConsumer.Tests/Usings.cs | 1 + tests/LocalPost.Tests/LocalPost.Tests.csproj | 9 ++++----- 20 files changed, 46 insertions(+), 53 deletions(-) diff --git a/.config/dotnet-tools.json b/.config/dotnet-tools.json index c37c1b5..9045ea0 100644 --- a/.config/dotnet-tools.json +++ b/.config/dotnet-tools.json @@ -3,13 +3,13 @@ "isRoot": true, "tools": { "dotnet-sonarscanner": { - "version": "5.9.2", + "version": "6.2.0", "commands": [ "dotnet-sonarscanner" ] }, "gitversion.tool": { - "version": "5.11.1", + "version": "5.12.0", "commands": [ "dotnet-gitversion" ] diff --git a/docker-compose.yml b/docker-compose.yml index 7088fa0..04c0910 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -27,7 +27,7 @@ services: redpanda: # Mainly from: https://docs.redpanda.com/redpanda-labs/docker-compose/single-broker/ # See also: https://docs.redpanda.com/current/deploy/deployment-option/self-hosted/docker-image/ - image: docker.redpanda.com/redpandadata/redpanda:v24.1.2 + image: docker.redpanda.com/redpandadata/redpanda:v24.1.5 container_name: redpanda command: - redpanda start diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index d10a566..936b4e6 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -55,8 +55,8 @@ public async Task> Transform(Func, [PublicAPI] public readonly record struct BatchConsumeContext { - internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) - : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindow, ct) + internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) + : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration, ct) { public override BatchConsumeContext Build() { diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index c09b55e..cac8a00 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -51,7 +51,7 @@ - + diff --git a/src/LocalPost.Polly/LocalPost.Polly.csproj b/src/LocalPost.Polly/LocalPost.Polly.csproj index 96e36f6..ffc50c5 100644 --- a/src/LocalPost.Polly/LocalPost.Polly.csproj +++ b/src/LocalPost.Polly/LocalPost.Polly.csproj @@ -47,7 +47,7 @@ - + diff --git a/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj b/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj index bf2064c..ac6334e 100644 --- a/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj +++ b/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj @@ -45,7 +45,7 @@ - + diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index 713aaad..129be29 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -62,8 +62,8 @@ public async Task> Transform(Func, [PublicAPI] public readonly record struct BatchConsumeContext { - internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) - : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindow, ct) + internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) + : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration, ct) { // TODO Batch.DrainToImmutable() public override BatchConsumeContext Build() => new(Batch.ToImmutable()); diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index 37c4c6e..492fb31 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -51,7 +51,7 @@ - + diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs index f2b945f..7bec94e 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs @@ -15,16 +15,11 @@ public static async ValueTask Consume(this IAsyncEnumerator source, Can if (completed) // Ideally there should be a better way to communicate the completion... + // But because it is usually used for long-running enumerators, fine throw new EndOfEnumeratorException("Source is empty"); return source.Current; } } -internal sealed class EndOfEnumeratorException : Exception -{ - public EndOfEnumeratorException(string message) : base(message) - { - } -} - +internal sealed class EndOfEnumeratorException(string message) : Exception(message); diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index 6bd601e..ac4b2ba 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -23,16 +23,16 @@ internal interface IBatchBuilder : IDisposable internal abstract class BatchBuilderBase : IBatchBuilder { - private readonly TimeSpan _timeWindowLength; + private readonly TimeSpan _timeWindowDuration; private readonly CancellationToken _ct; private CancellationTokenSource _timeWindow; private CancellationTokenTaskSource? _timeWindowTrigger; - protected BatchBuilderBase(TimeSpan timeWindow, CancellationToken ct = default) + protected BatchBuilderBase(TimeSpan timeWindowDuration, CancellationToken ct = default) { - _timeWindowLength = timeWindow; - _ct = ct; // TODO Rename to globalCancellation or something like that + _timeWindowDuration = timeWindowDuration; + _ct = ct; // Rename to globalCancellation or something like that... _timeWindow = StartTimeWindow(); } @@ -52,10 +52,10 @@ protected BatchBuilderBase(TimeSpan timeWindow, CancellationToken ct = default) private CancellationTokenSource StartTimeWindow() { if (_ct == CancellationToken.None) - return new CancellationTokenSource(_timeWindowLength); + return new CancellationTokenSource(_timeWindowDuration); var timeWindow = CancellationTokenSource.CreateLinkedTokenSource(_ct); - timeWindow.CancelAfter(_timeWindowLength); + timeWindow.CancelAfter(_timeWindowDuration); return timeWindow; } @@ -90,8 +90,8 @@ internal abstract class BoundedBatchBuilderBase : BatchBuilderBase.Builder Batch; - protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) : - base(timeWindow, ct) + protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) : + base(timeWindowDuration, ct) { _batchMaxSize = batchMaxSize; Batch = ImmutableArray.CreateBuilder(_batchMaxSize); @@ -118,8 +118,8 @@ public override void Reset() } } -internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindow, CancellationToken ct = default) - : BoundedBatchBuilderBase>(batchMaxSize, timeWindow, ct) +internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) + : BoundedBatchBuilderBase>(batchMaxSize, timeWindowDuration, ct) { public override IReadOnlyList Build() => Batch; // TODO ImmutableArray } diff --git a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs index d816288..398d9a6 100644 --- a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs @@ -12,7 +12,7 @@ public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = de TOut completedBatch; try { - var consumeResult = await source.Consume(ct); // FIXME batchBuilder.TimeWindow + var consumeResult = await source.Consume(batchBuilder.TimeWindow); var added = batchBuilder.TryAdd(consumeResult); if (!added) { diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index cdb99c7..d3e5622 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -64,7 +64,7 @@ - + diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index f1b1570..d3a0c5c 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -84,7 +84,7 @@ public async Task handles_messages() await Task.Delay(1_000); // "App is working" - Assert.Equal(2, received.Count); + received.Should().HaveCount(2); } finally { diff --git a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj index 1282380..2c91812 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj +++ b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj @@ -2,7 +2,6 @@ net6;net8 - enable false @@ -19,10 +18,10 @@ - - - - + + + + diff --git a/tests/LocalPost.KafkaConsumer.Tests/Usings.cs b/tests/LocalPost.KafkaConsumer.Tests/Usings.cs index c802f44..91743bb 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/Usings.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/Usings.cs @@ -1 +1,2 @@ global using Xunit; +global using FluentAssertions; diff --git a/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj b/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj index 1decf69..3126640 100644 --- a/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj +++ b/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj @@ -2,7 +2,6 @@ net6;net8 - enable false @@ -13,10 +12,10 @@ - - - - + + + + diff --git a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs index ff3d532..c4ba8f0 100644 --- a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs @@ -68,8 +68,8 @@ public async Task handles_messages() await Task.Delay(1_000); - Assert.Single(received); - Assert.Equal("It will rainy in London tomorrow", received[0]); + received.Should().HaveCount(1); + received[0].Should().Be("It will rainy in London tomorrow"); await host.StopAsync(); } diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index 051792e..75866c0 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -2,7 +2,6 @@ net6;net8 - enable false @@ -19,10 +18,10 @@ - - - - + + + + diff --git a/tests/LocalPost.SqsConsumer.Tests/Usings.cs b/tests/LocalPost.SqsConsumer.Tests/Usings.cs index c802f44..91743bb 100644 --- a/tests/LocalPost.SqsConsumer.Tests/Usings.cs +++ b/tests/LocalPost.SqsConsumer.Tests/Usings.cs @@ -1 +1,2 @@ global using Xunit; +global using FluentAssertions; diff --git a/tests/LocalPost.Tests/LocalPost.Tests.csproj b/tests/LocalPost.Tests/LocalPost.Tests.csproj index 15cfc3a..0fdfd4f 100644 --- a/tests/LocalPost.Tests/LocalPost.Tests.csproj +++ b/tests/LocalPost.Tests/LocalPost.Tests.csproj @@ -2,7 +2,6 @@ net6;net8 - enable false @@ -14,10 +13,10 @@ - - - - + + + + From d56ffa546f65049daad9903462c63fc9f9c8537c Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 1 Jun 2024 14:39:53 +0000 Subject: [PATCH 14/33] SnsPublisher for later --- .../DependencyInjection/HealthChecks.cs | 26 --------- .../ServiceRegistration.cs | 34 ----------- .../LocalPost.SnsPublisher.csproj | 56 ------------------- .../PublishBatchRequestEntryEx.cs | 10 ---- src/LocalPost.SnsPublisher/Publisher.cs | 54 ------------------ .../PublisherOptions.cs | 13 ----- src/LocalPost.SnsPublisher/Sender.cs | 32 ----------- src/LocalPost.SnsPublisher/SnsBatchBuilder.cs | 56 ------------------- .../TopicPublishRequests.cs | 35 ------------ .../LocalPost.SnsPublisher.Tests.csproj | 25 --------- tests/LocalPost.SnsPublisher.Tests/Usings.cs | 1 - 11 files changed, 342 deletions(-) delete mode 100644 src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs delete mode 100644 src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs delete mode 100644 src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj delete mode 100644 src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs delete mode 100644 src/LocalPost.SnsPublisher/Publisher.cs delete mode 100644 src/LocalPost.SnsPublisher/PublisherOptions.cs delete mode 100644 src/LocalPost.SnsPublisher/Sender.cs delete mode 100644 src/LocalPost.SnsPublisher/SnsBatchBuilder.cs delete mode 100644 src/LocalPost.SnsPublisher/TopicPublishRequests.cs delete mode 100644 tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj delete mode 100644 tests/LocalPost.SnsPublisher.Tests/Usings.cs diff --git a/src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs b/src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs deleted file mode 100644 index 28371ad..0000000 --- a/src/LocalPost.SnsPublisher/DependencyInjection/HealthChecks.cs +++ /dev/null @@ -1,26 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using Amazon.SimpleNotificationService.Model; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost.SnsPublisher.DependencyInjection; - -public static class HealthChecks -{ - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddAmazonSnsBatchPublisherReadinessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - // FIXME Add queue supervisor - .AddBackgroundQueueConsumerReadinessCheck(name, failureStatus, tags, timeout); - - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IHealthChecksBuilder AddAmazonSnsBatchPublisherLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default, - TimeSpan? timeout = default) => builder - // FIXME Add queue supervisor - .AddBackgroundQueueConsumerLivenessCheck(name, failureStatus, tags, timeout); - - // TODO Optional checks for the in-memory queues... Like if they are full or not -} diff --git a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs b/src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs deleted file mode 100644 index d51281c..0000000 --- a/src/LocalPost.SnsPublisher/DependencyInjection/ServiceRegistration.cs +++ /dev/null @@ -1,34 +0,0 @@ -using Amazon.SimpleNotificationService.Model; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.DependencyInjection.Extensions; -using Microsoft.Extensions.Options; - -namespace LocalPost.SnsPublisher.DependencyInjection; - -public static class ServiceRegistration -{ - public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services) - { - services.TryAddSingleton(); - - return services.AddAmazonSnsBatchPublisher(builder => - builder.MiddlewareStackBuilder.SetHandler()); - } - - public static OptionsBuilder AddAmazonSnsBatchPublisher(this IServiceCollection services, - Action.ConsumerBuilder> configure) - { - services.TryAddSingleton(); - services.TryAddSingleton(provider => provider.GetRequiredService()); - - services - .AddBackgroundQueueConsumer(configure) - .Configure>( - (options, consumerOptions) => { options.MaxConcurrency = consumerOptions.Value.Sender.MaxConcurrency; }); - - return services.AddOptions(); - - - } -} diff --git a/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj b/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj deleted file mode 100644 index ac6334e..0000000 --- a/src/LocalPost.SnsPublisher/LocalPost.SnsPublisher.csproj +++ /dev/null @@ -1,56 +0,0 @@ - - - - netstandard2.0 - true - - false - - LocalPost.SnsPublisher - background;task;queue;amazon;sns;aws - Local (in-process) background queue for sending to Amazon SNS. - Alexey Shokov - - README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true - - - - - - - - - - true - - - - true - true - true - true - snupkg - - - true - - - - - - - - - - - - - - - - - diff --git a/src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs b/src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs deleted file mode 100644 index f592f03..0000000 --- a/src/LocalPost.SnsPublisher/PublishBatchRequestEntryEx.cs +++ /dev/null @@ -1,10 +0,0 @@ -using System.Text; -using Amazon.SimpleNotificationService.Model; - -namespace LocalPost.SnsPublisher; - -internal static class PublishBatchRequestEntryEx -{ - // Include attributes in the calculation later?.. - public static int CalculateSize(this PublishBatchRequestEntry entry) => Encoding.UTF8.GetByteCount(entry.Message); -} diff --git a/src/LocalPost.SnsPublisher/Publisher.cs b/src/LocalPost.SnsPublisher/Publisher.cs deleted file mode 100644 index 527470d..0000000 --- a/src/LocalPost.SnsPublisher/Publisher.cs +++ /dev/null @@ -1,54 +0,0 @@ -using System.Collections.Immutable; -using Amazon.SimpleNotificationService.Model; - -namespace LocalPost.SnsPublisher; - -public interface ISnsPublisher -{ - IBackgroundQueue ForTopic(string arn); -} - -internal sealed partial class Publisher : ISnsPublisher, IAsyncEnumerable, - IBackgroundQueueManager, IDisposable -{ - private ImmutableDictionary _channels = - ImmutableDictionary.Empty; - - private readonly AsyncEnumerableMerger _combinedReader = new(true); - - private readonly PublisherOptions _options; - - public Publisher(PublisherOptions options) - { - _options = options; - } - - public bool IsClosed { get; private set; } - - public IBackgroundQueue ForTopic(string arn) => - ImmutableInterlocked.GetOrAdd(ref _channels, arn, RegisterQueueFor); - - private TopicPublishRequests RegisterQueueFor(string arn) - { - var queue = new TopicPublishRequests(_options.PerTopic, arn); - _combinedReader.Add(queue); - - return queue; - } - - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) => - _combinedReader.GetAsyncEnumerator(ct); - - public async ValueTask CompleteAsync(CancellationToken ct = default) - { - // TODO Do not allow to register new topics, as they won't be processed here... - await Task.WhenAll(_channels.Values.Select(q => q.CompleteAsync(ct).AsTask())); - IsClosed = true; - } - - public void Dispose() - { - _combinedReader.Dispose(); - _channels = ImmutableDictionary.Empty; - } -} diff --git a/src/LocalPost.SnsPublisher/PublisherOptions.cs b/src/LocalPost.SnsPublisher/PublisherOptions.cs deleted file mode 100644 index d201533..0000000 --- a/src/LocalPost.SnsPublisher/PublisherOptions.cs +++ /dev/null @@ -1,13 +0,0 @@ -namespace LocalPost.SnsPublisher; - -public sealed record PublisherOptions -{ - // Same for Publish and PublishBatch - public const int RequestMaxSize = 262_144; - - public const int BatchMaxSize = 10; - - public ConsumerOptions Sender { get; init; } = new(); - - public QueueOptions PerTopic { get; init; } = new(); -} diff --git a/src/LocalPost.SnsPublisher/Sender.cs b/src/LocalPost.SnsPublisher/Sender.cs deleted file mode 100644 index f27c520..0000000 --- a/src/LocalPost.SnsPublisher/Sender.cs +++ /dev/null @@ -1,32 +0,0 @@ -using System.Diagnostics; -using Amazon.SimpleNotificationService; -using Amazon.SimpleNotificationService.Model; -using Microsoft.Extensions.Logging; - -namespace LocalPost.SnsPublisher; - -internal sealed class Sender : IHandler -{ - private static readonly ActivitySource Tracer = new(typeof(Sender).Namespace); - - private readonly ILogger _logger; - private readonly IAmazonSimpleNotificationService _sns; - - public Sender(ILogger logger, IAmazonSimpleNotificationService sns) - { - _logger = logger; - _sns = sns; - } - - public async Task InvokeAsync(PublishBatchRequest payload, CancellationToken ct) - { - using var span = Tracer.StartActivity(); - - _logger.LogTrace("Sending a batch of {Amount} message(s) to SNS...", payload.PublishBatchRequestEntries.Count); - var batchResponse = await _sns.PublishBatchAsync(payload, ct); - - if (batchResponse.Failed.Any()) - _logger.LogError("Batch entries failed: {FailedAmount} from {Amount}", - batchResponse.Failed.Count, payload.PublishBatchRequestEntries.Count); - } -} diff --git a/src/LocalPost.SnsPublisher/SnsBatchBuilder.cs b/src/LocalPost.SnsPublisher/SnsBatchBuilder.cs deleted file mode 100644 index fa3b2b5..0000000 --- a/src/LocalPost.SnsPublisher/SnsBatchBuilder.cs +++ /dev/null @@ -1,56 +0,0 @@ -using Amazon.SimpleNotificationService.Model; -using Nito.AsyncEx; - -namespace LocalPost.SnsPublisher; - -internal sealed class SnsBatchBuilder : IBatchBuilder -{ - private readonly CancellationTokenSource _timeWindow = new(TimeSpan.FromSeconds(1)); // TODO Configurable - private readonly CancellationTokenTaskSource _timeWindowTrigger; - private PublishBatchRequest? _batchRequest; - - public SnsBatchBuilder(string topicArn) - { - _batchRequest = new PublishBatchRequest - { - TopicArn = topicArn - }; - - _timeWindowTrigger = new CancellationTokenTaskSource(_timeWindow.Token); - } - - private PublishBatchRequest BatchRequest => _batchRequest ?? throw new ObjectDisposedException(nameof(SnsBatchBuilder)); - - public CancellationToken TimeWindow => _timeWindow.Token; - public Task TimeWindowTrigger => _timeWindowTrigger.Task; - public bool IsEmpty => BatchRequest.PublishBatchRequestEntries.Count == 0; - - private bool CanFit(PublishBatchRequestEntry entry) => - PublisherOptions.BatchMaxSize > BatchRequest.PublishBatchRequestEntries.Count - && - PublisherOptions.RequestMaxSize > BatchRequest.PublishBatchRequestEntries.Append(entry) - .Aggregate(0, (total, e) => total + e.CalculateSize()); - - public bool TryAdd(PublishBatchRequestEntry entry) - { - var canFit = CanFit(entry); - if (!canFit) - return false; - - if (string.IsNullOrEmpty(entry.Id)) - entry.Id = Guid.NewGuid().ToString(); - - BatchRequest.PublishBatchRequestEntries.Add(entry); - - return true; - } - - public PublishBatchRequest Build() => BatchRequest; - - public void Dispose() - { - _timeWindow.Dispose(); - _timeWindowTrigger.Dispose(); - _batchRequest = null; // Just make it unusable - } -} diff --git a/src/LocalPost.SnsPublisher/TopicPublishRequests.cs b/src/LocalPost.SnsPublisher/TopicPublishRequests.cs deleted file mode 100644 index 76f51ba..0000000 --- a/src/LocalPost.SnsPublisher/TopicPublishRequests.cs +++ /dev/null @@ -1,35 +0,0 @@ -using Amazon.SimpleNotificationService.Model; - -namespace LocalPost.SnsPublisher; - -internal sealed partial class Publisher -{ - private sealed class TopicPublishRequests : IBackgroundQueueManager, - IBackgroundQueue, IAsyncEnumerable - { - private readonly string _arn; - private readonly BackgroundQueue _queue; - - public TopicPublishRequests(QueueOptions options, string arn) - { - _arn = arn; - _queue = new BackgroundQueue(options); - } - - public IAsyncEnumerator GetAsyncEnumerator( - CancellationToken cancellationToken = default) => - _queue.Batch(() => new SnsBatchBuilder(_arn)).GetAsyncEnumerator(cancellationToken); - - public ValueTask Enqueue(PublishBatchRequestEntry item, CancellationToken ct = default) - { - if (item.CalculateSize() > PublisherOptions.RequestMaxSize) - throw new ArgumentOutOfRangeException(nameof(item), "Message is too big"); - - return _queue.Enqueue(item, ct); - } - - public bool IsClosed => _queue.IsClosed; - - public ValueTask CompleteAsync(CancellationToken ct = default) => _queue.CompleteAsync(ct); - } -} diff --git a/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj b/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj deleted file mode 100644 index 3126640..0000000 --- a/tests/LocalPost.SnsPublisher.Tests/LocalPost.SnsPublisher.Tests.csproj +++ /dev/null @@ -1,25 +0,0 @@ - - - - net6;net8 - - false - - - - - - - - - - - - - - - - - - - diff --git a/tests/LocalPost.SnsPublisher.Tests/Usings.cs b/tests/LocalPost.SnsPublisher.Tests/Usings.cs deleted file mode 100644 index c802f44..0000000 --- a/tests/LocalPost.SnsPublisher.Tests/Usings.cs +++ /dev/null @@ -1 +0,0 @@ -global using Xunit; From dafd285d84bf8b7fe12269dedf403842eb76663d Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Fri, 21 Jun 2024 08:55:39 +0000 Subject: [PATCH 15/33] WIP --- LocalPost.sln | 6 + samples/KafkaConsumerApp/Program.cs | 4 +- samples/KafkaConsumerApp/appsettings.json | 3 +- samples/SqsConsumerApp/Program.cs | 84 ++++-- samples/SqsConsumerApp/README.md | 8 + samples/SqsConsumerApp/SqsConsumerApp.csproj | 3 + .../appsettings.Development.json | 4 + src/LocalPost.KafkaConsumer/ConsumeContext.cs | 118 ++++---- .../HealthChecksBuilderEx.cs | 12 +- .../DependencyInjection/KafkaBuilder.cs | 146 ++++++---- .../ServiceCollectionEx.cs | 9 +- src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 63 +++-- src/LocalPost.KafkaConsumer/MessageSource.cs | 147 ++++++---- src/LocalPost.KafkaConsumer/Options.cs | 134 ++++++++- src/LocalPost.KafkaConsumer/Tracing.cs | 15 +- src/LocalPost.SqsConsumer/ConsumeContext.cs | 94 ++++--- .../HealthChecksBuilderEx.cs | 12 +- .../ServiceCollectionEx.cs | 3 +- .../DependencyInjection/SqsBuilder.cs | 142 +++++++--- src/LocalPost.SqsConsumer/HandlerStackEx.cs | 77 +++--- src/LocalPost.SqsConsumer/MessageSource.cs | 114 +++++--- src/LocalPost.SqsConsumer/Options.cs | 130 ++++++--- src/LocalPost.SqsConsumer/QueueClient.cs | 20 +- src/LocalPost.SqsConsumer/Tracing.cs | 19 +- .../AsyncEnumerable/AsyncEnumerableEx.cs | 4 +- .../AsyncEnumerable/AsyncEnumeratorEx.cs | 1 + src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 46 ++-- .../BatchingAsyncEnumerable.cs | 4 +- .../AsyncEnumerable/ConcurrentBuffer.cs | 66 ++--- .../BackgroundQueue/BackgroundJobQueue.cs | 2 +- .../BackgroundQueue/BackgroundQueue.cs | 143 ++++++---- .../BackgroundQueuesBuilder.cs | 99 +++++-- .../DependencyInjection/HealthChecks.cs | 2 +- .../BackgroundQueue/HandlerStackEx.cs | 7 + src/LocalPost/BackgroundQueue/Options.cs | 112 +++++--- src/LocalPost/BackgroundQueueConsumer.cs | 242 +++++++++-------- src/LocalPost/ConcurrentHostedServices.cs | 168 ++++++++++-- .../DependencyInjection/HealthChecks.cs | 85 ++++-- .../DependencyInjection/IAssistantService.cs | 28 ++ .../DependencyInjection/INamedService.cs | 14 +- .../ServiceCollectionEx.cs | 104 +++---- .../ServiceCollectionTools.cs | 121 +++++---- .../ServiceProviderLookups.cs | 63 ++++- src/LocalPost/Handler.cs | 27 ++ src/LocalPost/HandlerStack.cs | 255 ++++++++++++++++++ src/LocalPost/HandlerStackEx.cs | 31 +-- src/LocalPost/LocalPost.csproj | 6 - src/LocalPost/Options.cs | 3 + src/LocalPost/QueuePublisher.cs | 4 +- src/LocalPost/Reflection.cs | 10 + .../ConsumerTests.cs | 23 +- .../LocalPost.KafkaConsumer.Tests.csproj | 3 +- .../{RpBuilder.cs => RedpandaContainer.cs} | 34 ++- .../ConsumerTests.cs | 17 +- .../LocalPost.SqsConsumer.Tests.csproj | 1 + .../BatchingAsyncEnumerableTests.cs | 6 +- 56 files changed, 2132 insertions(+), 966 deletions(-) create mode 100644 src/LocalPost/DependencyInjection/IAssistantService.cs create mode 100644 src/LocalPost/Options.cs rename tests/LocalPost.KafkaConsumer.Tests/{RpBuilder.cs => RedpandaContainer.cs} (71%) diff --git a/LocalPost.sln b/LocalPost.sln index 84a536d..7cc06f2 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -36,6 +36,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer.Tes EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.RabbitMqConsumer.Tests", "tests\LocalPost.RabbitMqConsumer.Tests\LocalPost.RabbitMqConsumer.Tests.csproj", "{92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.NatsConsumer", "src\LocalPost.NatsConsumer\LocalPost.NatsConsumer.csproj", "{05A771C9-0987-484A-8A7F-B6B1180F55F9}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -110,6 +112,10 @@ Global {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Debug|Any CPU.Build.0 = Debug|Any CPU {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Release|Any CPU.ActiveCfg = Release|Any CPU {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Release|Any CPU.Build.0 = Release|Any CPU + {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.Build.0 = Debug|Any CPU + {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.ActiveCfg = Release|Any CPU + {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index d8cb038..810868d 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -12,7 +12,7 @@ kafka.Defaults .Bind(builder.Configuration.GetSection("Kafka")) .ValidateDataAnnotations(); - kafka.AddConsumer("one-and-the-only", HandlerStack.From() + kafka.AddConsumer("example-consumer-group", HandlerStack.From() .UseKafkaPayload() .DeserializeJson() .Acknowledge() @@ -20,7 +20,7 @@ .Trace() ) .Bind(builder.Configuration.GetSection("Kafka:Consumer")) - .Configure(options => + .ConfigureConsumer(options => { options.AutoOffsetReset = AutoOffsetReset.Earliest; // options.EnableAutoCommit = false; // TODO DryRun diff --git a/samples/KafkaConsumerApp/appsettings.json b/samples/KafkaConsumerApp/appsettings.json index b202834..c3e961b 100644 --- a/samples/KafkaConsumerApp/appsettings.json +++ b/samples/KafkaConsumerApp/appsettings.json @@ -8,8 +8,7 @@ "Kafka": { "BootstrapServers": "127.0.0.1:19092", "Consumer": { - "Topic": "weather-forecasts", - "GroupId": "example-consumer-group" + "Topic": "weather-forecasts" } } } diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index a418b5a..51222c9 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -1,4 +1,5 @@ using Amazon.SQS; +using JetBrains.Annotations; using LocalPost; using LocalPost.SqsConsumer; using LocalPost.SqsConsumer.DependencyInjection; @@ -11,35 +12,36 @@ .AddSerilog() // See https://nblumhardt.com/2024/04/serilog-net8-0-minimal/#hooking-up-aspnet-core-and-iloggert .AddDefaultAWSOptions(builder.Configuration.GetAWSOptions()) .AddAWSService(); + + + builder.Services .AddScoped() .AddSqsConsumers(sqs => { - sqs.Defaults.Configure(options => options.MaxConcurrency = 100); - sqs.AddConsumer("weather-forecasts", - HandlerStack.From() - .UseSqsPayload() - .DeserializeJson() - .Acknowledge() - .Scoped() - .Touch(next => async (context, ct) => - { - using var logBuffer = LogBuffer.BeginScope(); - try - { - await next(context, ct); - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; // Not a real error - } - catch (Exception) - { - logBuffer.Flush(); - throw; - } - }) - .Trace()); + // sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + // sqs.AddConsumer("weather-forecasts", + // HandlerStack.From() + // .UseSqsPayload() + // .DeserializeJson() + // .Acknowledge() + // .Scoped() + // .LogFingersCrossed() + // .Trace()); + // sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + // sqs.AddConsumer("weather-forecasts", + // Pipeline.Create( + // HandlerStack.From() + // .UseSqsPayload() + // .DeserializeJson() + // .Acknowledge() + // .Scoped() + // .LogFingersCrossed() + // .Trace(), + // maxConcurrency: 100, + // breakOnException: false + // ).Buffer(100) + // ); }); // TODO Health + Supervisor @@ -47,6 +49,12 @@ +record ConsumerOptions +{ + public int MaxConcurrency { get; set; } = 1; +} + +[UsedImplicitly] public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); public class MessageHandler : IHandler @@ -55,5 +63,31 @@ public async ValueTask InvokeAsync(WeatherForecast payload, CancellationToken ct { await Task.Delay(1_000, ct); Console.WriteLine(payload); + + // To show the failure handling + if (payload.TemperatureC > 35) + throw new InvalidOperationException("Too hot"); } } + +public static class FingersCrossedLogging +{ + public static HandlerFactory LogFingersCrossed(this HandlerFactory hf) => + hf.Touch(next => async (context, ct) => + { + using var logBuffer = LogBuffer.BeginScope(); + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; // Not a real error + } + catch (Exception) + { + logBuffer.Flush(); + throw; + } + }); +} diff --git a/samples/SqsConsumerApp/README.md b/samples/SqsConsumerApp/README.md index cddfea0..fdeb0ee 100644 --- a/samples/SqsConsumerApp/README.md +++ b/samples/SqsConsumerApp/README.md @@ -1 +1,9 @@ # SQS Consumer + +## Configuration + +### Queue name + +TODO + +## diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/samples/SqsConsumerApp/SqsConsumerApp.csproj index b8e1fc5..9f6f3e9 100644 --- a/samples/SqsConsumerApp/SqsConsumerApp.csproj +++ b/samples/SqsConsumerApp/SqsConsumerApp.csproj @@ -2,6 +2,8 @@ net8 + + CA1050 @@ -11,6 +13,7 @@ + diff --git a/samples/SqsConsumerApp/appsettings.Development.json b/samples/SqsConsumerApp/appsettings.Development.json index 7758c3f..75a3ba5 100644 --- a/samples/SqsConsumerApp/appsettings.Development.json +++ b/samples/SqsConsumerApp/appsettings.Development.json @@ -1,5 +1,9 @@ { "AWS": { "ServiceURL": "http://localhost:8000" + }, + "WeatherSqsConsumer": { + "MaxConcurrency": 100, + "Attributes": null } } diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 936b4e6..82fab2d 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -4,12 +4,12 @@ namespace LocalPost.KafkaConsumer; -internal static class ConsumeContext -{ - public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( - MaxSize batchMaxSize, TimeSpan timeWindow) => ct => - new BatchConsumeContext.Builder(batchMaxSize, timeWindow, ct); -} +// internal static class ConsumeContext +// { +// public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( +// MaxSize batchMaxSize, TimeSpan timeWindow) => ct => +// new BatchConsumeContext.Builder(batchMaxSize, timeWindow, ct); +// } [PublicAPI] public readonly record struct ConsumeContext @@ -52,56 +52,56 @@ public async Task> Transform(Func, public static implicit operator T(ConsumeContext context) => context.Payload; } -[PublicAPI] -public readonly record struct BatchConsumeContext -{ - internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) - : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration, ct) - { - public override BatchConsumeContext Build() - { -// #if NET6_0_OR_GREATER -// ReadOnlySpan s = CollectionsMarshal.AsSpan(Batch) -// var ia = s.ToImmutableArray(); -// return new BatchConsumeContext(Batch); -// #else -// return new BatchConsumeContext(Batch.ToImmutableArray()); -// #endif - return new BatchConsumeContext(Batch); - } - } - - // TODO ImmutableArray - public readonly IReadOnlyList> Messages; - - internal BatchConsumeContext(IReadOnlyList> messages) - { - if (messages.Count == 0) - throw new ArgumentException("Batch must contain at least one message", nameof(messages)); - - Messages = messages; - } - - public BatchConsumeContext Transform(ConsumeContext[] payload) => new(payload); - - public BatchConsumeContext Transform(IEnumerable> payload) => - Transform(payload.ToArray()); - - public BatchConsumeContext Transform(IEnumerable batchPayload) => - Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); - - public BatchConsumeContext Transform(Func, TOut> transform) - { - // TODO Parallel LINQ - var messages = Messages.Select(transform); - return Transform(messages); - } - - public async Task> Transform(Func, Task> transform) - { - var messages = await Task.WhenAll(Messages.Select(transform)); - return Transform(messages); - } - - internal KafkaTopicClient Client => Messages[^1].Client; -} +// [PublicAPI] +// public readonly record struct BatchConsumeContext +// { +// internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) +// : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration, ct) +// { +// public override BatchConsumeContext Build() +// { +// // #if NET6_0_OR_GREATER +// // ReadOnlySpan s = CollectionsMarshal.AsSpan(Batch) +// // var ia = s.ToImmutableArray(); +// // return new BatchConsumeContext(Batch); +// // #else +// // return new BatchConsumeContext(Batch.ToImmutableArray()); +// // #endif +// return new BatchConsumeContext(Batch); +// } +// } +// +// // TODO ImmutableArray +// public readonly IReadOnlyList> Messages; +// +// internal BatchConsumeContext(IReadOnlyList> messages) +// { +// if (messages.Count == 0) +// throw new ArgumentException("Batch must contain at least one message", nameof(messages)); +// +// Messages = messages; +// } +// +// public BatchConsumeContext Transform(ConsumeContext[] payload) => new(payload); +// +// public BatchConsumeContext Transform(IEnumerable> payload) => +// Transform(payload.ToArray()); +// +// public BatchConsumeContext Transform(IEnumerable batchPayload) => +// Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); +// +// public BatchConsumeContext Transform(Func, TOut> transform) +// { +// // TODO Parallel LINQ +// var messages = Messages.Select(transform); +// return Transform(messages); +// } +// +// public async Task> Transform(Func, Task> transform) +// { +// var messages = await Task.WhenAll(Messages.Select(transform)); +// return Transform(messages); +// } +// +// internal KafkaTopicClient Client => Messages[^1].Client; +// } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs index 03e1f0c..c0b2245 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs @@ -14,11 +14,11 @@ public static class HealthChecksBuilderEx public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddNamedConsumerLivenessCheck>(name); + .Add(HealthChecks.LivenessCheck(name, failureStatus, tags)) + .AddPipelineLivenessCheck(name); - public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddNamedConsumerLivenessCheck>(name); + // public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, + // string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + // .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) + // .AddNamedConsumerLivenessCheck>(name); } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 7893096..757f0e7 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -9,83 +9,117 @@ namespace LocalPost.KafkaConsumer.DependencyInjection; [PublicAPI] public sealed class KafkaBuilder(IServiceCollection services) { - // public IServiceCollection Services { get; } - public OptionsBuilder Defaults { get; } = services.AddOptions(); -// public static OptionsBuilder AddKafkaConsumer(this IServiceCollection Services, string name, -// Action>> configure, -// Action> configureClient) => -// services.AddKafkaConsumer(name, configure, configureClient); - -// // JSON serializer is the default... But for Kafka it can be different?.. -// public static OptionsBuilder AddKafkaConsumer(this IServiceCollection Services, -// string name) where THandler : IHandler> => services.AddKafkaConsumer(name, provider => -// { -// // Keep .Scoped() as far as possible, as from that point all the middlewares will be resolved per request, not -// // just once -// var handlerFactory = HandlerStack2.From>().Scoped() -// .Deserialize(null) -// // TODO Error handler, just log all the errors and proceed (to not break the loop) -// .Acknowledge(); -// -// return handlerFactory(provider); -// }); - - public OptionsBuilder AddConsumer(string name, HandlerFactory> configure) + /// + /// Default batch consumer pipeline. + /// + /// Consumer name (also the default queue name). Should be unique in the application. + /// Handler factory. + /// Pipeline options builder. + public OptionsBuilder AddBatchConsumer(string name, + HandlerFactory>> hf) { - if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... - throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + var defaultPipeline = Pipeline + .Create(hf, provider => provider.GetOptions(name)) + .Batch(provider => provider.GetOptions(name)); - if (!services.TryAddKafkaClient(name)) - throw new ArgumentException("Kafka consumer is already registered", nameof(name)); + Add(name, defaultPipeline) + .Configure>((options, pipelineOptions) => + options.UpdateFrom(pipelineOptions.Get(name).Consume)); - services.TryAddNamedSingleton(name, provider => - new MessageSource(provider.GetRequiredService(name))); - services.AddBackgroundServiceForNamed(name); - - services.TryAddBackgroundConsumer, MessageSource>(name, configure, provider => + return BatchedConsumerFor(name).Configure>((options, clientConfig) => { - var options = provider.GetOptions(name); - return new ConsumerOptions(1, options.BreakOnException); + options.Consume.EnrichFrom(clientConfig.Value); + options.Consume.Topic = name; }); + } + + /// + /// Default consumer pipeline. + /// + /// Consumer name (also the default queue name). Should be unique in the application. + /// Handler factory. + /// Pipeline options builder. + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) + { + var defaultPipeline = Pipeline + .Create(hf, provider => provider.GetOptions(name)); + + Add(name, defaultPipeline) + .Configure>((options, pipelineOptions) => + options.UpdateFrom(pipelineOptions.Get(name).Consume)); - return services.AddOptions(name).Configure>((options, commonConfig) => + return ConsumerFor(name).Configure>((options, clientConfig) => { - options.EnrichFrom(commonConfig.Value); - options.Topic = name; + options.Consume.EnrichFrom(clientConfig.Value); + options.Consume.Topic = name; }); } - public OptionsBuilder AddBatchConsumer(string name, - HandlerFactory> configure) + public OptionsBuilder Add(string name, PipelineRegistration> pr) { - if (string.IsNullOrEmpty(name)) + if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - if (!services.TryAddKafkaClient(name)) - throw new InvalidOperationException("Kafka consumer is already registered"); + if (!services.TryAddKafkaClient(name)) + throw new ArgumentException("Kafka consumer is already registered", nameof(name)); services.TryAddNamedSingleton(name, provider => - { - var options = provider.GetOptions(name); - - return new BatchMessageSource(provider.GetRequiredService(name), - ConsumeContext.BatchBuilder( - options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); - }); - services.AddBackgroundServiceForNamed(name); + new MessageSource(provider.GetRequiredService(name))); + services.AddBackgroundService(name); - services.TryAddBackgroundConsumer, BatchMessageSource>(name, configure, provider => - { - var options = provider.GetOptions(name); - return new ConsumerOptions(1, options.BreakOnException); - }); + pr(services.RegistrationContextFor(name), provider => + provider.GetRequiredService(name)); - return services.AddOptions(name).Configure>((options, commonConfig) => + return PipelineFor(name).Configure>((options, clientConfig) => { - options.EnrichFrom(commonConfig.Value); + options.EnrichFrom(clientConfig.Value); options.Topic = name; }); } + + public OptionsBuilder PipelineFor(string name) => services.AddOptions(name); + + public OptionsBuilder ConsumerFor(string name) => + services.AddOptions(name); + + public OptionsBuilder BatchedConsumerFor(string name) => + services.AddOptions(name); + + // TODO Health checks + + + + // public OptionsBuilder AddBatchConsumer(string name, + // HandlerFactory> configure) + // { + // if (string.IsNullOrEmpty(name)) + // throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + // + // if (!services.TryAddKafkaClient(name)) + // throw new InvalidOperationException("Kafka consumer is already registered"); + // + // services.TryAddNamedSingleton(name, provider => + // { + // var options = provider.GetOptions(name); + // + // return new BatchMessageSource(provider.GetRequiredService(name), + // ConsumeContext.BatchBuilder( + // options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); + // }); + // services.AddBackgroundService(name); + // + // services.TryAddBackgroundConsumer, BatchMessageSource>(name, configure, provider => + // { + // var options = provider.GetOptions(name); + // return new ConsumerOptions(1, options.BreakOnException); + // }); + // + // return services.AddOptions(name).Configure>((options, commonConfig) => + // { + // options.EnrichFrom(commonConfig.Value); + // options.Topic = name; + // }); + // } } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs index ea51521..fa02ba6 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -1,7 +1,6 @@ using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; namespace LocalPost.KafkaConsumer.DependencyInjection; @@ -15,13 +14,11 @@ public static IServiceCollection AddKafkaConsumers(this IServiceCollection servi return services; } - internal static bool TryAddKafkaClient(this IServiceCollection services, string name) - where TOptions : Options => + internal static bool TryAddKafkaClient(this IServiceCollection services, string name) => services.TryAddNamedSingleton(name, provider => { - var options = provider.GetOptions(name); + var options = provider.GetOptions(name); - return new KafkaTopicClient(provider.GetRequiredService>(), - options, options.Topic, name); + return new KafkaTopicClient(provider.GetLoggerFor(), options, options.Topic, name); }); } diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index 35ae2f9..df297e1 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -1,19 +1,27 @@ -using System.Collections.Immutable; using System.Text.Json; using Confluent.Kafka; using JetBrains.Annotations; namespace LocalPost.KafkaConsumer; +// [PublicAPI] +// public static class PipelineOps +// { +// public static PipelineRegistration Batch(this PipelineRegistration> next, +// ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Map>((stream, _) => +// stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration))); +// } + [PublicAPI] public static class HandlerStackEx { public static HandlerFactory> UseKafkaPayload(this HandlerFactory hf) => hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); - public static HandlerFactory> UseKafkaPayload(this HandlerFactory> hf) => - hf.Map, IReadOnlyList>(next => - async (context, ct) => await next(context.Messages.Select(m => m.Payload).ToImmutableList(), ct)); + public static HandlerFactory>> UseKafkaPayload( + this HandlerFactory> hf) => + hf.Map>, IEnumerable>(next => + async (batch, ct) => await next(batch.Select(context => context.Payload), ct)); public static HandlerFactory> Trace(this HandlerFactory> hf) => hf.Map, ConsumeContext>(next => @@ -32,10 +40,11 @@ public static HandlerFactory> Trace(this HandlerFactory> Trace(this HandlerFactory> hf) => - hf.Map, BatchConsumeContext>(next => - async (context, ct) => + public static HandlerFactory>> Trace(this HandlerFactory>> hf) => + hf.Map>, IEnumerable>>(next => + async (batch, ct) => { + var context = batch.ToList(); // TODO Optimize using var activity = Tracing.StartProcessing(context); try { @@ -57,15 +66,16 @@ public static HandlerFactory> Acknowledge(this HandlerFacto context.Client.StoreOffset(context.NextOffset); }); - public static HandlerFactory> Acknowledge( - this HandlerFactory> hf) => - hf.Map, BatchConsumeContext>(next => - async (context, ct) => + public static HandlerFactory>> Acknowledge( + this HandlerFactory>> hf) => + hf.Map>, IEnumerable>>(next => + async (batch, ct) => { + var context = batch.ToList(); // TODO Optimize await next(context, ct); // Store all the offsets, as it can be a batch of messages from different partitions // (even different topics, if subscribed using a regex) - foreach (var message in context.Messages) + foreach (var message in context) message.Client.StoreOffset(message.NextOffset); }); @@ -76,20 +86,21 @@ public static HandlerFactory> Deserialize( hf.Map, ConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct)); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, Func, T> deserialize) => - hf.Map, BatchConsumeContext>(next => - async (context, ct) => await next(context.Transform(deserialize), ct)); + public static HandlerFactory>> Deserialize( + this HandlerFactory>> hf, Func, T> deserialize) => + hf.Map>, IEnumerable>>(next => + async (batch, ct) => await next(batch.Select(context => context.Transform(deserialize)), ct)); public static HandlerFactory> Deserialize( this HandlerFactory> hf, Func, Task> deserialize) => hf.Map, ConsumeContext>(next => async (context, ct) => await next(await context.Transform(deserialize), ct)); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, Func, Task> deserialize) => - hf.Map, BatchConsumeContext>(next => - async (context, ct) => await next(await context.Transform(deserialize), ct)); + public static HandlerFactory>> Deserialize( + this HandlerFactory>> hf, Func, Task> deserialize) => + hf.Map>, IEnumerable>>(next => + async (batch, ct) => + await next(await Task.WhenAll(batch.Select(context => context.Transform(deserialize))), ct)); private static Func, Task> AsyncDeserializer(IAsyncDeserializer deserializer) => context => deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( @@ -99,8 +110,8 @@ public static HandlerFactory> Deserialize( this HandlerFactory> hf, IAsyncDeserializer deserializer) => hf.Deserialize(AsyncDeserializer(deserializer)); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, IAsyncDeserializer deserializer) => + public static HandlerFactory>> Deserialize( + this HandlerFactory>> hf, IAsyncDeserializer deserializer) => hf.Deserialize(AsyncDeserializer(deserializer)); private static Func, T> Deserializer(IDeserializer deserializer) => @@ -111,8 +122,8 @@ public static HandlerFactory> Deserialize( this HandlerFactory> hf, IDeserializer deserializer) => hf.Deserialize(Deserializer(deserializer)); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, IDeserializer deserializer) => + public static HandlerFactory>> Deserialize( + this HandlerFactory>> hf, IDeserializer deserializer) => hf.Deserialize(Deserializer(deserializer)); #endregion @@ -121,7 +132,7 @@ public static HandlerFactory> DeserializeJson( this HandlerFactory> hf, JsonSerializerOptions? options = null) => hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); - public static HandlerFactory> DeserializeJson( - this HandlerFactory> hf, JsonSerializerOptions? options = null) => + public static HandlerFactory>> DeserializeJson( + this HandlerFactory>> hf, JsonSerializerOptions? options = null) => hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.KafkaConsumer/MessageSource.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs index bcd0b18..76a114f 100644 --- a/src/LocalPost.KafkaConsumer/MessageSource.cs +++ b/src/LocalPost.KafkaConsumer/MessageSource.cs @@ -1,57 +1,15 @@ -using System.Runtime.CompilerServices; -using LocalPost.AsyncEnumerable; using LocalPost.DependencyInjection; namespace LocalPost.KafkaConsumer; -internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> -{ - private readonly ConcurrentBuffer> _source; - - public MessageSource(KafkaTopicClient client) : base(client) - { - _source = ConsumeAsync().ToConcurrentBuffer(); - } - - // Run this (possibly) blocking & long-running task in a separate thread?.. - public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); - - public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => - _source.GetAsyncEnumerator(ct); -} - -internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> -{ - private readonly ConcurrentBuffer> _source; - - public BatchMessageSource(KafkaTopicClient client, - BatchBuilderFactory, BatchConsumeContext> factory) : base(client) - { - _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); - } - - public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); - - public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => - _source.GetAsyncEnumerator(ct); -} - -internal abstract class MessageSourceBase(KafkaTopicClient client) : IBackgroundService, INamedService +internal sealed class MessageSource(KafkaTopicClient client) + : IBackgroundService, INamedService, IAsyncEnumerable> { private bool _stopped; - // Some additional reading: https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/ -// private readonly TaskCompletionSource _executionTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); - public string Name => client.Name; - // Run on a separate thread, as Confluent Kafka API is blocking - public Task StartAsync(CancellationToken ct) => Task.Run(client.Subscribe, ct); - - public abstract Task ExecuteAsync(CancellationToken ct); - - protected async IAsyncEnumerable> ConsumeAsync( - [EnumeratorCancellation] CancellationToken ct = default) + public async IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) { // TODO Transaction activity... @@ -61,6 +19,21 @@ protected async IAsyncEnumerable> ConsumeAsync( yield return result; } + // Run on a separate thread, as Confluent Kafka API is blocking + public Task StartAsync(CancellationToken ct) => Task.Run(client.Subscribe, ct); + + // Run this (possibly) blocking & long-running task in a separate thread?.. + public Task ExecuteAsync(CancellationToken ct) => Task.CompletedTask; + + // Run on a separate thread, as Confluent Kafka API is blocking + public Task StopAsync(CancellationToken ct) => Task.Run(() => + { + _stopped = true; + + // TODO Wait for all the pipelines to finish... + client.Close(); + }, ct); + private IEnumerable> Consume(CancellationToken ct) { // TODO Transaction activity... @@ -70,11 +43,81 @@ private IEnumerable> Consume(CancellationToken ct) ct.ThrowIfCancellationRequested(); } - - // Run on a separate thread, as Confluent Kafka API is blocking - public Task StopAsync(CancellationToken ct) => Task.Run(() => - { - _stopped = true; - client.Close(); - }, ct); } + + + +// internal sealed class MessageSource : IAsyncEnumerable> +// { +// private readonly ConcurrentBuffer> _source; +// +// public MessageSource(KafkaTopicClient client) : base(client) +// { +// _source = ConsumeAsync().ToConcurrentBuffer(); +// } +// +// // Run this (possibly) blocking & long-running task in a separate thread?.. +// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); +// +// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => +// _source.GetAsyncEnumerator(ct); +// } +// +// internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> +// { +// private readonly ConcurrentBuffer> _source; +// +// public BatchMessageSource(KafkaTopicClient client, +// BatchBuilderFactory, BatchConsumeContext> factory) : base(client) +// { +// _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); +// } +// +// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); +// +// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => +// _source.GetAsyncEnumerator(ct); +// } +// +// internal abstract class MessageSourceBase(KafkaTopicClient client) : IBackgroundService, INamedService +// { +// private bool _stopped; +// +// // Some additional reading: https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/ +// // private readonly TaskCompletionSource _executionTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); +// +// public string Name => client.Name; +// +// // Run on a separate thread, as Confluent Kafka API is blocking +// public Task StartAsync(CancellationToken ct) => Task.Run(client.Subscribe, ct); +// +// public abstract Task ExecuteAsync(CancellationToken ct); +// +// protected async IAsyncEnumerable> ConsumeAsync( +// [EnumeratorCancellation] CancellationToken ct = default) +// { +// // TODO Transaction activity... +// +// // Give the control back in the beginning, just before blocking in the Kafka's consumer call +// await Task.Yield(); +// foreach (var result in Consume(ct)) +// yield return result; +// } +// +// private IEnumerable> Consume(CancellationToken ct) +// { +// // TODO Transaction activity... +// +// while (!ct.IsCancellationRequested && !_stopped) +// yield return client.Read(ct); +// +// ct.ThrowIfCancellationRequested(); +// } +// +// // Run on a separate thread, as Confluent Kafka API is blocking +// public Task StopAsync(CancellationToken ct) => Task.Run(() => +// { +// _stopped = true; +// client.Close(); +// }, ct); +// } diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 53521c0..7bab681 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -1,36 +1,146 @@ using System.ComponentModel.DataAnnotations; using Confluent.Kafka; using JetBrains.Annotations; +using Microsoft.Extensions.Options; namespace LocalPost.KafkaConsumer; [PublicAPI] -public class Options : ConsumerConfig +public sealed class ConsumerOptions : ConsumerConfig { - public Options() + public ConsumerOptions() { EnableAutoOffsetStore = false; // We will store offsets manually, see Acknowledge middleware } - [Required] public string Topic { get; set; } = null!; - - /// - /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. - /// Default is true. - /// - public bool BreakOnException { get; set; } = true; + [Required] + public string Topic { get; set; } = null!; internal void EnrichFrom(Config config) { foreach (var kv in config) Set(kv.Key, kv.Value); } + + internal void UpdateFrom(ConsumerOptions other) + { + EnrichFrom(other); + Topic = other.Topic; + } +} + +[PublicAPI] +public sealed class DefaultPipelineOptions +{ + public void Deconstruct(out ConsumerOptions consumer, out DefaultPipelineOptions pipeline) + { + consumer = Consume; + pipeline = this; + } + + public ConsumerOptions Consume { get; } = new(); + + [Range(1, ushort.MaxValue)] + public ushort MaxConcurrency { get; set; } = 10; + + // [Range(1, ushort.MaxValue)] + // public ushort Prefetch { get; set; } = 10; + + // /// + // /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. + // /// Default is true. + // /// + // public bool BreakOnException { get; set; } = true; + + public static implicit operator Pipeline.ConsumerOptions(DefaultPipelineOptions options) => new() + { + MaxConcurrency = options.MaxConcurrency, + BreakOnException = false, + }; +} + +[PublicAPI] +public sealed record DefaultBatchPipelineOptions +{ + public void Deconstruct(out ConsumerOptions consumer, out DefaultBatchPipelineOptions pipeline) + { + consumer = Consume; + pipeline = this; + } + + public ConsumerOptions Consume { get; } = new(); + + [Range(1, ushort.MaxValue)] + public ushort MaxConcurrency { get; set; } = 10; + + // [Range(1, ushort.MaxValue)] + // public ushort Prefetch { get; set; } = 10; + + // /// + // /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. + // /// Default is true. + // /// + // public bool BreakOnException { get; set; } = true; + + [Range(1, ushort.MaxValue)] + public ushort BatchMaxSize { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public int TimeWindowMs { get; set; } = 1_000; + + public static implicit operator Pipeline.ConsumerOptions(DefaultBatchPipelineOptions options) => new() + { + MaxConcurrency = options.MaxConcurrency, + BreakOnException = false, + }; + + public static implicit operator BatchOptions(DefaultBatchPipelineOptions options) => new() + { + MaxSize = options.BatchMaxSize, + TimeWindowDuration = options.TimeWindowMs, + }; } [PublicAPI] -public class BatchedOptions : Options +public static class OptionsBuilderEx { - [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 100; + public static OptionsBuilder Configure( + this OptionsBuilder builder, + Action configure) => + builder.Configure(options => + { + var (consumer, pipeline) = options; + + configure(consumer, pipeline); + }); + + public static OptionsBuilder ConfigureConsumer( + this OptionsBuilder builder, + Action configure) => + builder.Configure(options => + { + var (consumer, _) = options; + + configure(consumer); + }); + + public static OptionsBuilder Configure( + this OptionsBuilder builder, + Action configure) => + builder.Configure(options => + { + var (consumer, pipeline) = options; + + configure(consumer, pipeline); + }); + + public static OptionsBuilder ConfigureConsumer( + this OptionsBuilder builder, + Action configure) => + builder.Configure(options => + { + var (consumer, _) = options; - [Range(1, ushort.MaxValue)] public int BatchTimeWindowMilliseconds { get; set; } = 1_000; + configure(consumer); + }); } diff --git a/src/LocalPost.KafkaConsumer/Tracing.cs b/src/LocalPost.KafkaConsumer/Tracing.cs index a69476e..5eccf5a 100644 --- a/src/LocalPost.KafkaConsumer/Tracing.cs +++ b/src/LocalPost.KafkaConsumer/Tracing.cs @@ -68,8 +68,8 @@ public static void AcceptDistributedTracingFrom(this Activity acti return activity; } - public static Activity? SetTagsFor(this Activity? activity, BatchConsumeContext context) => - activity?.SetTag("messaging.batch.message_count", context.Messages.Count); + public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> batch) => + activity?.SetTag("messaging.batch.message_count", batch.Count); } // Npgsql as an inspiration: @@ -105,15 +105,16 @@ static Tracing() return activity; } - - public static Activity? StartProcessing(BatchConsumeContext context) + public static Activity? StartProcessing(IReadOnlyCollection> batch) { - var activity = Source.StartActivity($"{context.Client.Topic} process", ActivityKind.Consumer); + var client = batch.First().Client; + // It is actually can be multiple topics, it is possible to subscribe using a pattern... + var activity = Source.StartActivity($"{client.Topic} process", ActivityKind.Consumer); if (activity is not { IsAllDataRequested: true }) return activity; - activity.SetDefaultTags(context.Client); - activity.SetTagsFor(context); + activity.SetDefaultTags(client); + activity.SetTagsFor(batch); // TODO Accept distributed tracing headers, per each message... diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index 129be29..3f2ca50 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -5,12 +5,12 @@ namespace LocalPost.SqsConsumer; -internal static class ConsumeContext -{ - public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( - MaxSize batchMaxSizeSize, TimeSpan timeWindow) => ct => - new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); -} +// internal static class ConsumeContext +// { +// public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( +// MaxSize batchMaxSizeSize, TimeSpan timeWindow) => ct => +// new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); +// } [PublicAPI] public readonly record struct ConsumeContext @@ -59,45 +59,43 @@ public async Task> Transform(Func, public static implicit operator T(ConsumeContext context) => context.Payload; } -[PublicAPI] -public readonly record struct BatchConsumeContext -{ - internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) - : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration, ct) - { - // TODO Batch.DrainToImmutable() - public override BatchConsumeContext Build() => new(Batch.ToImmutable()); - } - - public readonly ImmutableArray> Messages; - - public int Count => Messages.Length; - - internal BatchConsumeContext(ImmutableArray> messages) - { - if (messages.Length == 0) - throw new ArgumentException("Batch must contain at least one message", nameof(messages)); - - Messages = messages; - } - - public BatchConsumeContext Transform(IEnumerable> payload) => new(payload.ToImmutableArray()); - - public BatchConsumeContext Transform(IEnumerable batchPayload) => - Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); - - public BatchConsumeContext Transform(Func, TOut> transform) - { - // TODO Parallel LINQ - var messages = Messages.Select(transform); - return Transform(messages); - } - - public async Task> Transform(Func, Task> transform) - { - var messages = await Task.WhenAll(Messages.Select(transform)); - return Transform(messages); - } - - internal QueueClient Client => Messages[0].Client; -} +// [PublicAPI] +// public readonly record struct BatchConsumeContext +// { +// internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration) +// : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration) +// { +// // TODO Batch.DrainToImmutable() +// public override BatchConsumeContext Build() => new(Batch.DrainToImmutable()); +// } +// +// public readonly ImmutableArray> Messages; +// +// public int Count => Messages.Length; +// +// internal BatchConsumeContext(ImmutableArray> messages) +// { +// if (messages.Length == 0) +// throw new ArgumentException("Batch must contain at least one message", nameof(messages)); +// +// Messages = messages; +// } +// +// public BatchConsumeContext Transform(IEnumerable> payload) => new(payload.ToImmutableArray()); +// +// public BatchConsumeContext Transform(IEnumerable batchPayload) => +// Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); +// +// public BatchConsumeContext Transform(Func, TOut> transform) => +// // Parallel LINQ?.. +// Transform(Messages.Select(transform)); +// +// public async Task> Transform(Func, Task> transform) +// { +// // TODO AsSpan, to immutable array without allocations +// var messages = await Task.WhenAll(Messages.Select(transform)); +// return Transform(messages); +// } +// +// internal QueueClient Client => Messages[0].Client; +// } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs index dfd824e..a2b76a2 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs @@ -12,11 +12,11 @@ public static class HealthChecksBuilderEx public static IHealthChecksBuilder AddSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddNamedConsumerLivenessCheck>(name); + .Add(HealthChecks.LivenessCheck(name, failureStatus, tags)) + .AddPipelineLivenessCheck(name); - public static IHealthChecksBuilder AddSqsBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - .AddNamedConsumerLivenessCheck>(name); + // public static IHealthChecksBuilder AddSqsBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, + // string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder + // .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) + // .AddNamedConsumerLivenessCheck>(name); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs index bf9804c..8846e39 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -14,8 +14,7 @@ public static IServiceCollection AddSqsConsumers(this IServiceCollection service return services; } - internal static bool TryAddQueueClient(this IServiceCollection services, string name) - where TOptions : Options => + internal static bool TryAddQueueClient(this IServiceCollection services, string name) => services.TryAddNamedSingleton(name, provider => ActivatorUtilities.CreateInstance(provider, name)); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index 6908cff..74ec0c8 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -8,69 +8,129 @@ namespace LocalPost.SqsConsumer.DependencyInjection; [PublicAPI] public sealed class SqsBuilder(IServiceCollection services) { - // public IServiceCollection Services { get; } - public OptionsBuilder Defaults { get; } = services.AddOptions(); - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) + /// + /// Default batch consumer pipeline. + /// + /// Consumer name (also the default queue name). Should be unique in the application. + /// Handler factory. + /// Pipeline options builder. + public OptionsBuilder AddBatchConsumer(string name, + HandlerFactory>> hf) { - if (string.IsNullOrEmpty(name)) - throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + var defaultPipeline = Pipeline + .Create(hf, provider => provider.GetOptions(name)) + .Batch(provider => provider.GetOptions(name)) + .Buffer(provider => provider.GetOptions(name).Prefetch); - if (!services.TryAddQueueClient(name)) - // return ob; // Already added, don't register twice - throw new InvalidOperationException("SQS consumer is already registered"); - - services.TryAddNamedSingleton(name, provider => new MessageSource( - provider.GetRequiredService(name), - provider.GetOptions(name).Prefetch - )); - services.AddBackgroundServiceForNamed(name); + Add(name, defaultPipeline) + .Configure>((options, pipelineOptions) => + options.UpdateFrom(pipelineOptions.Get(name).Consume)); - services.TryAddBackgroundConsumer, MessageSource>(name, hf, provider => + return BatchedConsumerFor(name).Configure>((options, globalOptions) => { - var options = provider.GetOptions(name); - return new ConsumerOptions(options.MaxConcurrency, options.BreakOnException); + options.Consume.UpdateFrom(globalOptions.Value); + options.Consume.QueueName = name; }); + } + + /// + /// Default consumer pipeline. + /// + /// Consumer name (also the default queue name). Should be unique in the application. + /// Handler factory. + /// Pipeline options builder. + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) + { + var defaultPipeline = Pipeline + .Create(hf, provider => provider.GetOptions(name)) + .Buffer(provider => provider.GetOptions(name).Prefetch); - return services.AddOptions(name).Configure>((options, commonConfig) => + Add(name, defaultPipeline) + .Configure>((options, pipelineOptions) => + options.UpdateFrom(pipelineOptions.Get(name).Consume)); + + return ConsumerFor(name).Configure>((options, globalOptions) => { - options.UpdateFrom(commonConfig.Value); - options.QueueName = name; + options.Consume.UpdateFrom(globalOptions.Value); + options.Consume.QueueName = name; }); } - public OptionsBuilder AddBatchConsumer(string name, HandlerFactory> hf) + /// + /// Custom consumer pipeline. + /// + /// Consumer name (also the default queue name). Should be unique in the application. + /// Pipeline registration. + /// Consumer options builder. + public OptionsBuilder Add(string name, PipelineRegistration> pr) { if (string.IsNullOrEmpty(name)) throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - if (!services.TryAddQueueClient(name)) + if (!services.TryAddQueueClient(name)) // return ob; // Already added, don't register twice - throw new InvalidOperationException("SQS consumer is already registered"); - - services.TryAddNamedSingleton(name, provider => - { - var options = provider.GetOptions(name); + throw new InvalidOperationException($"SQS consumer {name} is already registered"); - return new BatchMessageSource(provider.GetRequiredService(name), - ConsumeContext.BatchBuilder( - options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); - }); - services.AddBackgroundServiceForNamed(name); + services.TryAddNamedSingleton(name, provider => new MessageSource( + provider.GetRequiredService(name) + )); + services.AddBackgroundService(name); - // services.TryAddConsumerGroup, BatchMessageSource>(name, hf, - // ConsumerOptions.From(o => new ConsumerOptions(o.MaxConcurrency, o.BreakOnException))); - services.TryAddBackgroundConsumer, BatchMessageSource>(name, hf, provider => - { - var options = provider.GetOptions(name); - return new ConsumerOptions(options.MaxConcurrency, options.BreakOnException); - }); + var context = services.RegistrationContextFor(name); + pr(context, provider => provider.GetRequiredService(name)); - return services.AddOptions(name).Configure>((options, commonConfig) => + return PipelineFor(name).Configure>((options, globalOptions) => { - options.UpdateFrom(commonConfig.Value); + options.UpdateFrom(globalOptions.Value); options.QueueName = name; }); } + + public OptionsBuilder PipelineFor(string name) => services.AddOptions(name); + + public OptionsBuilder ConsumerFor(string name) => + services.AddOptions(name); + + public OptionsBuilder BatchedConsumerFor(string name) => + services.AddOptions(name); + + // TODO Health checks + + + + // public OptionsBuilder AddBatchConsumer(string name, HandlerFactory> hf) + // { + // if (string.IsNullOrEmpty(name)) + // throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + // + // if (!services.TryAddQueueClient(name)) + // // return ob; // Already added, don't register twice + // throw new InvalidOperationException("SQS consumer is already registered"); + // + // services.TryAddNamedSingleton(name, provider => + // { + // var options = provider.GetOptions(name); + // + // return new BatchMessageSource(provider.GetRequiredService(name), + // ConsumeContext.BatchBuilder( + // options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); + // }); + // services.AddBackgroundService(name); + // + // // services.TryAddConsumerGroup, BatchMessageSource>(name, hf, + // // ConsumerOptions.From(o => new ConsumerOptions(o.MaxConcurrency, o.BreakOnException))); + // services.TryAddBackgroundConsumer, BatchMessageSource>(name, hf, provider => + // { + // var options = provider.GetOptions(name); + // return new ConsumerOptions(options.MaxConcurrency, options.BreakOnException); + // }); + // + // return services.AddOptions(name).Configure>((options, commonConfig) => + // { + // options.UpdateFrom(commonConfig.Value); + // options.QueueName = name; + // }); + // } } diff --git a/src/LocalPost.SqsConsumer/HandlerStackEx.cs b/src/LocalPost.SqsConsumer/HandlerStackEx.cs index 571ddb9..c009ca3 100644 --- a/src/LocalPost.SqsConsumer/HandlerStackEx.cs +++ b/src/LocalPost.SqsConsumer/HandlerStackEx.cs @@ -1,22 +1,29 @@ -using System.Collections.Immutable; using System.Text.Json; using JetBrains.Annotations; namespace LocalPost.SqsConsumer; +// [PublicAPI] +// public static class PipelineOps +// { +// public static PipelineRegistration Batch(this PipelineRegistration> next, +// ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Map>((stream, _) => +// stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration))); +// } + [PublicAPI] public static class HandlerStackEx { public static HandlerFactory> UseSqsPayload(this HandlerFactory hf) => hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); - public static HandlerFactory> UseSqsPayload(this HandlerFactory> hf) => - hf.Map, IReadOnlyList>(next => - async (context, ct) => await next(context.Messages.Select(m => m.Payload).ToImmutableList(), ct)); + public static HandlerFactory>> UseSqsPayload( + this HandlerFactory> hf) => + hf.Map>, IEnumerable>(next => + async (batch, ct) => await next(batch.Select(context => context.Payload), ct)); - public static HandlerFactory> Trace( - this HandlerFactory> handlerStack) => - handlerStack.Map, ConsumeContext>(next => async (context, ct) => + public static HandlerFactory> Trace(this HandlerFactory> hf) => + hf.Touch(next => async (context, ct) => { using var activity = Tracing.StartProcessing(context); try @@ -31,10 +38,11 @@ public static HandlerFactory> Trace( } }); - public static HandlerFactory> Trace( - this HandlerFactory> handlerStack) => - handlerStack.Map, BatchConsumeContext>(next => async (context, ct) => + public static HandlerFactory>> Trace( + this HandlerFactory>> hf) => + hf.Touch(next => async (batch, ct) => { + var context = batch.ToList(); // TODO Optimize // TODO Link distributed transactions from each message using var activity = Tracing.StartProcessing(context); try @@ -49,24 +57,23 @@ public static HandlerFactory> Trace( } }); - public static HandlerFactory> Acknowledge( - this HandlerFactory> handlerStack) => - // handlerStack.Map(Middlewares.Acknowledge); - handlerStack.Map, ConsumeContext>(next => + public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => + hf.Touch(next => async (context, ct) => { await next(context, ct); await context.Client.DeleteMessageAsync(context); // TODO Instrument }); - public static HandlerFactory> Acknowledge( - this HandlerFactory> handlerStack) => - // handlerStack.Map(Middlewares.AcknowledgeBatch); - handlerStack.Map, BatchConsumeContext>(next => - async (context, ct) => + public static HandlerFactory>> Acknowledge( + this HandlerFactory>> hf) => + hf.Touch(next => + async (batch, ct) => { + var context = batch.ToList(); // TODO Optimize + var client = context.First().Client; await next(context, ct); - await context.Client.DeleteMessagesAsync(context); // TODO Instrument + await client.DeleteMessagesAsync(context); }); public static HandlerFactory> Deserialize( @@ -74,26 +81,26 @@ public static HandlerFactory> Deserialize( handlerStack.Map, ConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct)); - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, Func, T> deserialize) => - handlerStack.Map, BatchConsumeContext>(next => - async (context, ct) => await next(context.Transform(deserialize), ct)); - - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, Func, Task> deserialize) => - handlerStack.Map, ConsumeContext>(next => - async (context, ct) => await next(await context.Transform(deserialize), ct)); + public static HandlerFactory>> Deserialize( + this HandlerFactory>> hf, Func, T> deserialize) => + hf.Map>, IEnumerable>>(next => + async (batch, ct) => await next(batch.Select(context => context.Transform(deserialize)), ct)); - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, Func, Task> deserialize) => - handlerStack.Map, BatchConsumeContext>(next => - async (context, ct) => await next(await context.Transform(deserialize), ct)); + // public static HandlerFactory> Deserialize( + // this HandlerFactory> handlerStack, Func, Task> deserialize) => + // handlerStack.Map, ConsumeContext>(next => + // async (context, ct) => await next(await context.Transform(deserialize), ct)); + // + // public static HandlerFactory>> Deserialize( + // this HandlerFactory>> hf, Func, Task> deserialize) => + // hf.Map>, IEnumerable>>(next => + // async (context, ct) => await next(await context.Transform(deserialize), ct)); public static HandlerFactory> DeserializeJson( this HandlerFactory> hf, JsonSerializerOptions? options = null) => hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); - public static HandlerFactory> DeserializeJson( - this HandlerFactory> hf, JsonSerializerOptions? options = null) => + public static HandlerFactory>> DeserializeJson( + this HandlerFactory>> hf, JsonSerializerOptions? options = null) => hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.SqsConsumer/MessageSource.cs b/src/LocalPost.SqsConsumer/MessageSource.cs index c5ee670..cde0901 100644 --- a/src/LocalPost.SqsConsumer/MessageSource.cs +++ b/src/LocalPost.SqsConsumer/MessageSource.cs @@ -4,49 +4,28 @@ namespace LocalPost.SqsConsumer; -internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> +internal sealed class MessageSource(QueueClient client) + : IBackgroundService, INamedService, IAsyncEnumerable> { - private readonly ConcurrentBuffer> _source; + private bool _stopped; - public MessageSource(QueueClient client, int prefetch) : base(client) - { - _source = ConsumeAsync().ToConcurrentBuffer(prefetch); - } + public string Name => client.Name; - public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); + public async Task StartAsync(CancellationToken ct) => await client.ConnectAsync(ct); - public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => - _source.GetAsyncEnumerator(ct); -} + public Task ExecuteAsync(CancellationToken ct) => Task.CompletedTask; -internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> -{ - private readonly ConcurrentBuffer> _source; + public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => + ConsumeAsync(ct).GetAsyncEnumerator(ct); - // TODO Make a note that Prefetch does not play a role here, with batch processing... - public BatchMessageSource(QueueClient client, - BatchBuilderFactory, BatchConsumeContext> factory) : base(client) + public Task StopAsync(CancellationToken ct) { - _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); - } - - public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); - - public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => - _source.GetAsyncEnumerator(ct); -} - -internal abstract class MessageSourceBase(QueueClient client) : IBackgroundService, INamedService -{ - private bool _stopped; - - public string Name => client.Name; - - public async Task StartAsync(CancellationToken ct) => await client.ConnectAsync(ct); + _stopped = true; - public abstract Task ExecuteAsync(CancellationToken ct); + return Task.CompletedTask; + } - protected async IAsyncEnumerable> ConsumeAsync( + private async IAsyncEnumerable> ConsumeAsync( [EnumeratorCancellation] CancellationToken ct = default) { while (!ct.IsCancellationRequested && !_stopped) @@ -55,11 +34,66 @@ protected async IAsyncEnumerable> ConsumeAsync( ct.ThrowIfCancellationRequested(); } +} - public Task StopAsync(CancellationToken ct) - { - _stopped = true; - return Task.CompletedTask; - } -} + +// internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> +// { +// private readonly ConcurrentBuffer> _source; +// +// public MessageSource(QueueClient client, int prefetch) : base(client) +// { +// _source = ConsumeAsync().ToConcurrentBuffer(prefetch); +// } +// +// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); +// +// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => +// _source.GetAsyncEnumerator(ct); +// } +// +// internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> +// { +// private readonly ConcurrentBuffer> _source; +// +// // TODO Make a note that Prefetch does not play a role here, with batch processing... +// public BatchMessageSource(QueueClient client, +// BatchBuilderFactory, BatchConsumeContext> factory) : base(client) +// { +// _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); +// } +// +// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); +// +// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => +// _source.GetAsyncEnumerator(ct); +// } +// +// internal abstract class MessageSourceBase(QueueClient client) : IBackgroundService, INamedService +// { +// private bool _stopped; +// +// public string Name => client.Name; +// +// public async Task StartAsync(CancellationToken ct) => await client.ConnectAsync(ct); +// +// public abstract Task ExecuteAsync(CancellationToken ct); +// +// protected async IAsyncEnumerable> ConsumeAsync( +// [EnumeratorCancellation] CancellationToken ct = default) +// { +// while (!ct.IsCancellationRequested && !_stopped) +// foreach (var message in await client.PullMessagesAsync(ct)) +// yield return new ConsumeContext(client, message, message.Body); +// +// ct.ThrowIfCancellationRequested(); +// } +// +// public Task StopAsync(CancellationToken ct) +// { +// _stopped = true; +// +// return Task.CompletedTask; +// } +// } diff --git a/src/LocalPost.SqsConsumer/Options.cs b/src/LocalPost.SqsConsumer/Options.cs index dd487af..9ce9e10 100644 --- a/src/LocalPost.SqsConsumer/Options.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -7,28 +7,8 @@ namespace LocalPost.SqsConsumer; /// General SQS settings. /// [PublicAPI] -public record EndpointOptions +public sealed class EndpointOptions { - // AWS SDK requires List... No way to make it readonly / immutable :( - internal static readonly List AllAttributes = ["All"]; - internal static readonly List AllMessageAttributes = ["All"]; - - /// - /// How many messages to process concurrently. Default is 10. - /// - public ushort MaxConcurrency { get; set; } = 10; - - /// - /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. - /// Default is true. - /// - public bool BreakOnException { get; set; } = true; - - /// - /// How many messages to prefetch from SQS. Default is 10. - /// - public byte Prefetch { get; set; } = 10; - /// /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. /// Default is 20. @@ -66,25 +46,72 @@ public record EndpointOptions // [Range(1, int.MaxValue)] // public int? TimeoutMilliseconds { get; set; } - internal void UpdateFrom(EndpointOptions other) - { - MaxConcurrency = other.MaxConcurrency; - Prefetch = other.Prefetch; - WaitTimeSeconds = other.WaitTimeSeconds; - MaxNumberOfMessages = other.MaxNumberOfMessages; - } + public List AttributeNames { get; set; } = ["All"]; + + public List MessageAttributeNames { get; set; } = ["All"]; } /// /// SQS queue consumer settings. /// [PublicAPI] -public record Options : EndpointOptions +public sealed class ConsumerOptions { + /// + /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. + /// Default is 20. + /// + /// + /// Amazon SQS short and long polling + /// + /// + /// Setting up long polling + /// + [Range(0, 20)] + public byte WaitTimeSeconds { get; set; } = 20; + + /// + /// The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, + /// fewer messages might be returned). Valid values: 1 to 10. Default is 1. + /// + /// + /// Amazon SQS short and long polling + /// + /// + /// Setting up long polling + /// + [Range(1, 10)] + public byte MaxNumberOfMessages { get; set; } = 10; + + public List AttributeNames { get; set; } = ["All"]; + + public List MessageAttributeNames { get; set; } = ["All"]; + + internal void UpdateFrom(EndpointOptions global) + { + // MaxConcurrency = other.MaxConcurrency; + // Prefetch = other.Prefetch; + WaitTimeSeconds = global.WaitTimeSeconds; + MaxNumberOfMessages = global.MaxNumberOfMessages; + AttributeNames = global.AttributeNames; + MessageAttributeNames = global.MessageAttributeNames; + } + + internal void UpdateFrom(ConsumerOptions other) + { + WaitTimeSeconds = other.WaitTimeSeconds; + MaxNumberOfMessages = other.MaxNumberOfMessages; + AttributeNames = other.AttributeNames; + MessageAttributeNames = other.MessageAttributeNames; + QueueName = other.QueueName; + _queueUrl = other._queueUrl; + } + [Required] public string QueueName { get; set; } = null!; private string? _queueUrl; + /// /// If not set, IAmazonSQS.GetQueueUrlAsync(QueueName) will be used once, to get the actual URL of the queue. /// @@ -103,15 +130,50 @@ public string? QueueUrl } } -/// -/// SQS queue batch consumer settings. -/// [PublicAPI] -public record BatchedOptions : Options +public sealed class DefaultPipelineOptions +{ + public ConsumerOptions Consume { get; } = new(); + + [Range(1, ushort.MaxValue)] + public ushort MaxConcurrency { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public ushort Prefetch { get; set; } = 10; + + public static implicit operator Pipeline.ConsumerOptions(DefaultPipelineOptions options) => new() + { + MaxConcurrency = options.MaxConcurrency, + BreakOnException = false, + }; +} + +[PublicAPI] +public sealed record DefaultBatchPipelineOptions { + public ConsumerOptions Consume { get; } = new(); + + [Range(1, ushort.MaxValue)] + public ushort MaxConcurrency { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public ushort Prefetch { get; set; } = 10; + [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 10; [Range(1, ushort.MaxValue)] - public int BatchTimeWindowMilliseconds { get; set; } = 1_000; + public int TimeWindowMs { get; set; } = 1_000; + + public static implicit operator Pipeline.ConsumerOptions(DefaultBatchPipelineOptions options) => new() + { + MaxConcurrency = options.MaxConcurrency, + BreakOnException = false, + }; + + public static implicit operator BatchOptions(DefaultBatchPipelineOptions options) => new() + { + MaxSize = options.BatchMaxSize, + TimeWindowDuration = options.TimeWindowMs, + }; } diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index c392db9..f8dfb32 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -6,10 +6,10 @@ namespace LocalPost.SqsConsumer; -internal sealed class QueueClient(ILogger logger, IAmazonSQS sqs, Options options, string name) +internal sealed class QueueClient(ILogger logger, IAmazonSQS sqs, ConsumerOptions options, string name) : INamedService { - public QueueClient(ILogger logger, IAmazonSQS sqs, IOptionsMonitor options, string name) : + public QueueClient(ILogger logger, IAmazonSQS sqs, IOptionsMonitor options, string name) : this(logger, sqs, options.Get(name), name) { } @@ -44,7 +44,7 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) try { // Checking for a possible error in the response would be also good... - _queueAttributes = await sqs.GetQueueAttributesAsync(QueueUrl, EndpointOptions.AllAttributes, ct); + _queueAttributes = await sqs.GetQueueAttributesAsync(QueueUrl, ["All"], ct); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { @@ -60,8 +60,8 @@ public async Task> PullMessagesAsync(CancellationToken ct) { using var activity = Tracing.StartReceiving(this); - var attributeNames = EndpointOptions.AllAttributes; // Make configurable, later - var messageAttributeNames = EndpointOptions.AllMessageAttributes; // Make configurable, later + // var attributeNames = EndpointOptions.AllAttributes; // Make configurable, later + // var messageAttributeNames = EndpointOptions.AllMessageAttributes; // Make configurable, later // AWS SDK handles network failures, see // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html @@ -70,8 +70,8 @@ public async Task> PullMessagesAsync(CancellationToken ct) QueueUrl = QueueUrl, WaitTimeSeconds = options.WaitTimeSeconds, MaxNumberOfMessages = options.MaxNumberOfMessages, - AttributeNames = attributeNames, - MessageAttributeNames = messageAttributeNames, + AttributeNames = options.AttributeNames, + MessageAttributeNames = options.MessageAttributeNames, }, ct); activity?.SetTagsFor(response); @@ -94,11 +94,11 @@ public async Task DeleteMessageAsync(ConsumeContext context) // TODO Log failures?.. } - public async Task DeleteMessagesAsync(BatchConsumeContext context) + public async Task DeleteMessagesAsync(IReadOnlyCollection> messages) { - using var activity = Tracing.StartSettling(context); + using var activity = Tracing.StartSettling(messages); - var requests = context.Messages + var requests = messages .Select((message, i) => new DeleteMessageBatchRequestEntry(i.ToString(), message.ReceiptHandle)) .Chunk(10) .Select(entries => entries.ToList()); diff --git a/src/LocalPost.SqsConsumer/Tracing.cs b/src/LocalPost.SqsConsumer/Tracing.cs index 94d0d92..6cdd2a2 100644 --- a/src/LocalPost.SqsConsumer/Tracing.cs +++ b/src/LocalPost.SqsConsumer/Tracing.cs @@ -56,7 +56,7 @@ public static void SetDefaultTags(this Activity? activity, QueueClient client) public static Activity? SetTagsFor(this Activity? activity, ConsumeContext context) => activity?.SetTag("messaging.message.id", context.MessageId); - public static Activity? SetTagsFor(this Activity? activity, BatchConsumeContext context) => + public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> context) => activity?.SetTag("messaging.batch.message_count", context.Count); public static Activity? SetTagsFor(this Activity? activity, ReceiveMessageResponse response) => @@ -96,14 +96,14 @@ static Tracing() return activity; } - - public static Activity? StartProcessing(BatchConsumeContext context) + public static Activity? StartProcessing(IReadOnlyCollection> context) { - var activity = Source.StartActivity($"{context.Client.QueueName} process", ActivityKind.Consumer); + var client = context.First().Client; + var activity = Source.StartActivity($"{client.QueueName} process", ActivityKind.Consumer); if (activity is not { IsAllDataRequested: true }) return activity; - activity.SetDefaultTags(context.Client); + activity.SetDefaultTags(client); activity.SetTagsFor(context); // TODO Accept distributed tracing headers, per each message... @@ -123,14 +123,15 @@ static Tracing() return activity; } - public static Activity? StartSettling(BatchConsumeContext context) + public static Activity? StartSettling(IReadOnlyCollection> context) { - var activity = Source.StartActivity($"{context.Client.QueueName} settle", ActivityKind.Consumer); + var client = context.First().Client; + var activity = Source.StartActivity($"{client.QueueName} settle", ActivityKind.Consumer); if (activity is not { IsAllDataRequested: true }) return activity; - activity.SetDefaultTags(context.Client); - activity.SetTag("messaging.batch.message_count", context.Count); + activity.SetDefaultTags(client); + activity.SetTagsFor(context); return activity; } diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs index cffd297..ac630f0 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs @@ -2,8 +2,8 @@ namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumerableEx { - public static ConcurrentBuffer ToConcurrentBuffer(this IAsyncEnumerable source, int maxSize = 1) => - new(source, maxSize); + // public static ConcurrentBuffer ToConcurrentBuffer(this IAsyncEnumerable source, int maxSize = 1) => + // new(source, maxSize); public static IAsyncEnumerable Batch(this IAsyncEnumerable source, BatchBuilderFactory factory) => new BatchingAsyncEnumerable(source, factory); diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs index 7bec94e..5b65a00 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs @@ -10,6 +10,7 @@ public static async ValueTask Consume(this IAsyncEnumerator source, Can var completed = waitTrigger.IsCompleted switch { true => await waitTrigger, + // TODO WaitAsync() from .NET 6+ _ => await waitTrigger.AsTask().WaitAsync(ct) }; diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index ac4b2ba..4547274 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -3,13 +3,12 @@ namespace LocalPost.AsyncEnumerable; -internal delegate IBatchBuilder BatchBuilderFactory(CancellationToken ct = default); +internal delegate IBatchBuilder BatchBuilderFactory(); internal interface IBatchBuilder : IDisposable { CancellationToken TimeWindow { get; } - bool TimeWindowClosed { get; } - Task TimeWindowTrigger { get; } + Task TimeWindowTrigger { get; } bool IsEmpty { get; } bool Full { get; } @@ -24,22 +23,19 @@ internal interface IBatchBuilder : IDisposable internal abstract class BatchBuilderBase : IBatchBuilder { private readonly TimeSpan _timeWindowDuration; - private readonly CancellationToken _ct; private CancellationTokenSource _timeWindow; private CancellationTokenTaskSource? _timeWindowTrigger; - protected BatchBuilderBase(TimeSpan timeWindowDuration, CancellationToken ct = default) + protected BatchBuilderBase(TimeSpan timeWindowDuration) { _timeWindowDuration = timeWindowDuration; - _ct = ct; // Rename to globalCancellation or something like that... - _timeWindow = StartTimeWindow(); } public CancellationToken TimeWindow => _timeWindow.Token; public bool TimeWindowClosed => TimeWindow.IsCancellationRequested; - public Task TimeWindowTrigger => + public Task TimeWindowTrigger => (_timeWindowTrigger ??= new CancellationTokenTaskSource(_timeWindow.Token)).Task; public abstract bool IsEmpty { get; } @@ -49,16 +45,7 @@ protected BatchBuilderBase(TimeSpan timeWindowDuration, CancellationToken ct = d public abstract TBatch Build(); - private CancellationTokenSource StartTimeWindow() - { - if (_ct == CancellationToken.None) - return new CancellationTokenSource(_timeWindowDuration); - - var timeWindow = CancellationTokenSource.CreateLinkedTokenSource(_ct); - timeWindow.CancelAfter(_timeWindowDuration); - - return timeWindow; - } + private CancellationTokenSource StartTimeWindow() => new(_timeWindowDuration); // Should be overwritten in derived classes, to reset their state also public virtual void Reset() @@ -78,7 +65,7 @@ public TBatch Flush() return batch; } - public void Dispose() + public virtual void Dispose() { _timeWindow.Dispose(); _timeWindowTrigger?.Dispose(); @@ -88,13 +75,13 @@ public void Dispose() internal abstract class BoundedBatchBuilderBase : BatchBuilderBase { private readonly int _batchMaxSize; - protected ImmutableArray.Builder Batch; + protected List Batch; - protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) : - base(timeWindowDuration, ct) + protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindowDuration) : + base(timeWindowDuration) { _batchMaxSize = batchMaxSize; - Batch = ImmutableArray.CreateBuilder(_batchMaxSize); + Batch = new List(_batchMaxSize); } public override bool IsEmpty => Batch.Count == 0; @@ -114,12 +101,17 @@ public override bool TryAdd(T entry) public override void Reset() { base.Reset(); - Batch = ImmutableArray.CreateBuilder(_batchMaxSize); + Batch = new List(_batchMaxSize); } } -internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) - : BoundedBatchBuilderBase>(batchMaxSize, timeWindowDuration, ct) +internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindowDuration) + : BoundedBatchBuilderBase>(batchMaxSize, timeWindowDuration) { - public override IReadOnlyList Build() => Batch; // TODO ImmutableArray + public BoundedBatchBuilder(MaxSize batchMaxSize, int timeWindowDuration) + : this(batchMaxSize, TimeSpan.FromMilliseconds(timeWindowDuration)) + { + } + + public override IReadOnlyCollection Build() => Batch; // ImmutableArray or something?.. } diff --git a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs index 398d9a6..8a07045 100644 --- a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs +++ b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs @@ -6,7 +6,7 @@ internal sealed class BatchingAsyncEnumerable( public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) { await using var source = reader.GetAsyncEnumerator(ct); - using var batchBuilder = factory(ct); + using var batchBuilder = factory(); while (!ct.IsCancellationRequested) { TOut completedBatch; @@ -31,7 +31,7 @@ public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = de { break; } - // Batch time window is closed, or the cancellation token is triggered + // Batch time window has closed, or enumerator's cancellation has been triggered catch (OperationCanceledException) { if (batchBuilder.IsEmpty) diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs b/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs index e29564f..e1f3cc0 100644 --- a/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs +++ b/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs @@ -2,36 +2,36 @@ namespace LocalPost.AsyncEnumerable; -internal sealed class ConcurrentBuffer(IAsyncEnumerable source, MaxSize bufferMaxSize) - : IAsyncEnumerable -{ - private readonly Channel _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) - { - SingleReader = false, - SingleWriter = true, - FullMode = BoundedChannelFullMode.Wait, - }); - - public async Task Run(CancellationToken ct) - { - var buffer = _buffer.Writer; - try - { - await foreach (var item in source.WithCancellation(ct)) - await buffer.WriteAsync(item, ct); - } - finally - { - buffer.Complete(); - } - } - - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) - { - var buffer = _buffer.Reader; - // Like ReadAllAsync() from netstandard2.1/.NET Core 3.0+ - while (await buffer.WaitToReadAsync(ct).ConfigureAwait(false)) - while (buffer.TryRead(out var item)) - yield return item; - } -} +// internal sealed class ConcurrentBuffer(IAsyncEnumerable source, MaxSize bufferMaxSize) +// : IAsyncEnumerable +// { +// private readonly Channel _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) +// { +// SingleReader = false, +// SingleWriter = true, +// FullMode = BoundedChannelFullMode.Wait, +// }); +// +// public async Task Run(CancellationToken ct) +// { +// var buffer = _buffer.Writer; +// try +// { +// await foreach (var item in source.WithCancellation(ct)) +// await buffer.WriteAsync(item, ct); +// } +// finally +// { +// buffer.Complete(); +// } +// } +// +// public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) +// { +// var buffer = _buffer.Reader; +// // Like ReadAllAsync() from netstandard2.1/.NET Core 3.0+ +// while (await buffer.WaitToReadAsync(ct).ConfigureAwait(false)) +// while (buffer.TryRead(out var item)) +// yield return item; +// } +// } diff --git a/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs index d238b8e..1f48f2d 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs @@ -4,7 +4,7 @@ namespace LocalPost.BackgroundQueue; // Just a proxy to the actual queue, needed to expose IBackgroundJobQueue [UsedImplicitly] -internal sealed class BackgroundJobQueue(BackgroundQueue> queue) +internal sealed class BackgroundJobQueue(BackgroundQueue queue) : IBackgroundJobQueue { public ValueTask Enqueue(BackgroundJob payload, CancellationToken ct = default) => queue.Enqueue(payload, ct); diff --git a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs index facc07d..9e52afa 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -1,69 +1,70 @@ using System.Threading.Channels; using LocalPost.AsyncEnumerable; +using LocalPost.DependencyInjection; namespace LocalPost.BackgroundQueue; internal static class BackgroundQueue { - public static BackgroundQueue> Create(Options options) => - Create>(options, reader => reader.ReadAllAsync()); - - public static BackgroundQueue>> CreateBatched( - BatchedOptions options) => - Create>>(options, - reader => reader - .ReadAllAsync() - .Batch(ct => - new BoundedBatchBuilder>(options.BatchMaxSize, options.BatchTimeWindow, ct)), - true); + // public static BackgroundQueue> Create(Options options) => + // Create>(options, reader => reader.ReadAllAsync()); + + // public static BackgroundQueue>> CreateBatched( + // BatchedOptions options) => + // Create>>(options, + // reader => reader + // .ReadAllAsync() + // .Batch(ct => + // new BoundedBatchBuilder>(options.BatchMaxSize, options.BatchTimeWindow, ct)), + // true); + + public static BackgroundQueue Create(IServiceProvider provider) => + Create(provider.GetOptions>()); // To make the pipeline linear (single consumer), just add .ToConcurrent() to the end - public static BackgroundQueue Create(Options options, - Func>, IAsyncEnumerable> configure, - bool proxy = false) // TODO Rename this parameter somehow... + public static BackgroundQueue Create(QueueOptions options) { - var channel = options.MaxSize switch + // var channel = options.MaxSize switch + // { + // not null => Channel.CreateBounded>(new BoundedChannelOptions(options.MaxSize.Value) + // { + // SingleReader = options.MaxConcurrency == 1, // TODO Communicate SingleReader hint somehow... + // SingleWriter = false, // Accept it in options? Generally, we do not know how the queue will be used + // FullMode = options.FullMode, + // }), + // _ => Channel.CreateUnbounded>(new UnboundedChannelOptions + // { + // SingleReader = options.MaxConcurrency == 1, + // SingleWriter = false, // We do not know how it will be used + // }) + // }; + var channel = options.Channel switch { - not null => Channel.CreateBounded>(new BoundedChannelOptions(options.MaxSize.Value) - { - SingleReader = proxy || options.MaxConcurrency == 1, - SingleWriter = false, // We do not know how it will be used - FullMode = options.FullMode, - }), - _ => Channel.CreateUnbounded>(new UnboundedChannelOptions - { - SingleReader = proxy || options.MaxConcurrency == 1, - SingleWriter = false, // We do not know how it will be used - }) + BoundedChannelOptions channelOpt => Channel.CreateBounded>(channelOpt), + UnboundedChannelOptions channelOpt => Channel.CreateUnbounded>(channelOpt), + _ => throw new InvalidOperationException("Unknown channel options") }; - var pipeline = configure(channel.Reader); - if (proxy) - pipeline = pipeline.ToConcurrentBuffer(); - - return new BackgroundQueue(channel, pipeline, - TimeSpan.FromMilliseconds(options.CompletionDelay)); + return new BackgroundQueue(channel, TimeSpan.FromMilliseconds(options.CompletionDelay)); } } -internal static partial class BackgroundQueue -{ - public static readonly string Name = "BackgroundQueue/" + Reflection.FriendlyNameOf(); -} - -internal sealed class BackgroundQueue( - ChannelWriter> input, - IAsyncEnumerable pipeline, - TimeSpan completionDelay) - : IAsyncEnumerable, IBackgroundService, IBackgroundQueue +internal sealed class BackgroundQueue(Channel> channel, TimeSpan completionDelay) + : IBackgroundQueue, IAsyncEnumerable>, IBackgroundService { - public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => pipeline.GetAsyncEnumerator(ct); + public async IAsyncEnumerator> GetAsyncEnumerator(CancellationToken cancellationToken) + { + var reader = channel.Reader; + while (await reader.WaitToReadAsync(cancellationToken).ConfigureAwait(false)) + while (reader.TryRead(out var item)) + yield return item; + } // Track full or not later public ValueTask Enqueue(T item, CancellationToken ct = default) => - input.WriteAsync(new ConsumeContext(item), ct); + channel.Writer.WriteAsync(new ConsumeContext(item), ct); - public bool IsClosed { get; private set; } // TODO Use + public bool IsClosed { get; private set; } private async ValueTask CompleteAsync(CancellationToken ct = default) { @@ -73,17 +74,57 @@ private async ValueTask CompleteAsync(CancellationToken ct = default) if (completionDelay.TotalMilliseconds > 0) await Task.Delay(completionDelay, ct); - input.Complete(); + channel.Writer.Complete(); IsClosed = true; } public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - public Task ExecuteAsync(CancellationToken ct) => pipeline switch - { - ConcurrentBuffer concurrent => concurrent.Run(ct), - _ => Task.CompletedTask - }; + public Task ExecuteAsync(CancellationToken ct) => Task.CompletedTask; public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); } + + + +// internal static partial class BackgroundQueue +// { +// public static readonly string Name = "BackgroundQueue/" + Reflection.FriendlyNameOf(); +// } +// +// internal sealed class BackgroundQueue( +// ChannelWriter> input, +// IAsyncEnumerable pipeline, +// TimeSpan completionDelay) +// : IAsyncEnumerable, IBackgroundService, IBackgroundQueue +// { +// public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => pipeline.GetAsyncEnumerator(ct); +// +// // Track full or not later +// public ValueTask Enqueue(T item, CancellationToken ct = default) => +// input.WriteAsync(new ConsumeContext(item), ct); +// +// public bool IsClosed { get; private set; } +// +// private async ValueTask CompleteAsync(CancellationToken ct = default) +// { +// if (IsClosed) +// return; +// +// if (completionDelay.TotalMilliseconds > 0) +// await Task.Delay(completionDelay, ct); +// +// input.Complete(); +// IsClosed = true; +// } +// +// public Task StartAsync(CancellationToken ct) => Task.CompletedTask; +// +// public Task ExecuteAsync(CancellationToken ct) => pipeline switch +// { +// ConcurrentBuffer concurrent => concurrent.Run(ct), +// _ => Task.CompletedTask +// }; +// +// public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); +// } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index e0c6eac..3b42e7a 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -8,7 +8,7 @@ namespace LocalPost.BackgroundQueue.DependencyInjection; [PublicAPI] public class BackgroundQueuesBuilder(IServiceCollection services) { - public OptionsBuilder> AddJobQueue() + public OptionsBuilder> AddJobQueue() { services.TryAddSingleton(); services.TryAddSingletonAlias(); @@ -22,33 +22,88 @@ public OptionsBuilder> AddJobQueue() ); } - // THandler has to be registered by the user - public OptionsBuilder> AddQueue() where THandler : IHandler => AddQueue( - // A way to configure the pipeline?.. - HandlerStack.From() - .Scoped() - .UsePayload() - .Trace() - ); + public OptionsBuilder> AddBatchedQueue( + HandlerFactory>> hf) + { + var defaultPipeline = Pipeline + .Create(hf, provider => provider.GetOptions>()) + .Buffer(1) // To avoid buffering for each concurrent IAsyncEnumerable consumer + .Batch(provider => provider.GetOptions>()); + + Add(defaultPipeline) + .Configure>((options, pipelineOptions) => + options.UpdateFrom(pipelineOptions.Queue)); + + return BatchedQueueFor(); + } - public OptionsBuilder> AddQueue(HandlerFactory> hf) + // // THandler has to be registered by the user + // public OptionsBuilder> AddQueue() where THandler : IHandler => AddQueue( + // // A way to configure the handler?.. + // HandlerStack.From() + // .Scoped() + // .UsePayload() + // .Trace() + // ); + + public OptionsBuilder> AddQueue(HandlerFactory> hf) { - if (!services.TryAddSingletonAlias, BackgroundQueue>>()) + var defaultPipeline = Pipeline + .Create(hf, provider => provider.GetOptions>()); + + Add(defaultPipeline) + .Configure>((options, pipelineOptions) => + options.UpdateFrom(pipelineOptions.Queue)); + + return QueueFor(); + } + + public OptionsBuilder> Add(PipelineRegistration> pr) + { + if (!services.TryAddSingletonAlias, BackgroundQueue>()) // return ob; // Already added, don't register twice - throw new InvalidOperationException($"BackgroundQueue<{Reflection.FriendlyNameOf()}> is already registered."); + throw new InvalidOperationException( + $"{Reflection.FriendlyNameOf>()}> is already registered."); - services.TryAddSingleton(provider => - BackgroundQueue.Create(provider.GetOptions>())); - services.AddBackgroundServiceFor>>(); + services.TryAddSingleton(BackgroundQueue.Create); + services.AddBackgroundService>(); - services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => - { - var options = provider.GetOptions>(); - return new ConsumerOptions(options.MaxConcurrency, false); - }); + var context = services.RegistrationContextFor>(); + pr(context, provider => provider.GetRequiredService>()); + // services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => + // { + // var options = provider.GetOptions>(); + // return new ConsumerOptions(options.MaxConcurrency, false); + // }); - return services.AddOptions>(); + return PipelineFor(); } - // TODO Batched + public OptionsBuilder> PipelineFor() => services.AddOptions>(); + + public OptionsBuilder> QueueFor() => + services.AddOptions>(); + + public OptionsBuilder> BatchedQueueFor() => + services.AddOptions>(); + + + + // public OptionsBuilder> AddQueue(HandlerFactory> hf) + // { + // if (!services.TryAddSingletonAlias, BackgroundQueue>>()) + // // return ob; // Already added, don't register twice + // throw new InvalidOperationException($"BackgroundQueue<{Reflection.FriendlyNameOf()}> is already registered."); + // + // services.TryAddSingleton(provider => BackgroundQueue.Create(provider.GetOptions>())); + // services.AddBackgroundService>>(); + // + // services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => + // { + // var options = provider.GetOptions>(); + // return new ConsumerOptions(options.MaxConcurrency, false); + // }); + // + // return services.AddOptions>(); + // } } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs index d17f57e..8ee2993 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs @@ -14,5 +14,5 @@ public static class HealthChecksBuilderEx public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .AddConsumerLivenessCheck, T>(); + .AddPipelineLivenessCheck>(); } diff --git a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs index 904ec29..dd8c562 100644 --- a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs +++ b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs @@ -3,6 +3,13 @@ namespace LocalPost.BackgroundQueue; +// public static class PipelineOps +// { +// public static PipelineRegistration Batch(this PipelineRegistration> next, +// ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Map>((stream, _) => +// stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration))); +// }w + [PublicAPI] public static class HandlerStackEx { diff --git a/src/LocalPost/BackgroundQueue/Options.cs b/src/LocalPost/BackgroundQueue/Options.cs index 2290586..5357fa4 100644 --- a/src/LocalPost/BackgroundQueue/Options.cs +++ b/src/LocalPost/BackgroundQueue/Options.cs @@ -3,49 +3,101 @@ namespace LocalPost.BackgroundQueue; -// For the DI container, to distinguish between different queues -public sealed record Options : Options; +// // For the DI container, to distinguish between different queues +// public sealed record QueueOptions : QueueOptions; -// For the DI container, to distinguish between different queues -public sealed record BatchedOptions : BatchedOptions; - -public record BatchedOptions : Options -{ - [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 10; - - // TODO Rename to BatchTimeWindowMs - [Range(1, ushort.MaxValue)] public int BatchTimeWindowMilliseconds { get; set; } = 1_000; - - internal TimeSpan BatchTimeWindow => TimeSpan.FromMilliseconds(BatchTimeWindowMilliseconds); -} +// // For the DI container, to distinguish between different queues +// public sealed record BatchedOptions : BatchedOptions; +// +// public record BatchedOptions : Options +// { +// [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 10; +// +// // TODO Rename to BatchTimeWindowMs +// [Range(1, ushort.MaxValue)] public int BatchTimeWindowMilliseconds { get; set; } = 1_000; +// +// internal TimeSpan BatchTimeWindow => TimeSpan.FromMilliseconds(BatchTimeWindowMilliseconds); +// } /// /// Background queue configuration. /// -public record Options +public sealed class QueueOptions { - /// - /// How to handle new messages when the queue (channel) is full. Default is to drop the oldest message (to not - /// block the producer). - /// - public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; + // /// + // /// How to handle new messages when the queue (channel) is full. Default is to drop the oldest message (to not + // /// block the producer). + // /// + // public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; + // + // /// + // /// Maximum queue (channel) length, after which writes are blocked (see ). + // /// Default is unlimited. + // /// + // [Range(1, ushort.MaxValue)] + // public ushort? MaxSize { get; set; } = null; - /// - /// Maximum queue (channel) length, after which writes are blocked (see ). - /// Default is unlimited. - /// - [Range(1, ushort.MaxValue)] - public ushort? MaxSize { get; set; } = null; + public ChannelOptions Channel { get; set; } = new UnboundedChannelOptions(); /// /// How long to wait before closing the queue (channel) on app shutdown. Default is 1 second. /// public ushort CompletionDelay { get; set; } = 1_000; // Milliseconds - /// - /// How many messages to process concurrently. Default is 10. - /// - [Required] + // /// + // /// How many messages to process concurrently. Default is 10. + // /// + // [Required] + // [Range(1, ushort.MaxValue)] + // public ushort MaxConcurrency { get; set; } = 10; + + internal void UpdateFrom(QueueOptions options) + { + // FullMode = options.FullMode; + // MaxSize = options.MaxSize; + Channel = options.Channel; + CompletionDelay = options.CompletionDelay; + // MaxConcurrency = options.MaxConcurrency; + } +} + +public sealed class DefaultPipelineOptions +{ + public QueueOptions Queue { get; } = new(); + [Range(1, ushort.MaxValue)] public ushort MaxConcurrency { get; set; } = 10; + + public static implicit operator Pipeline.ConsumerOptions(DefaultPipelineOptions options) => new() + { + MaxConcurrency = options.MaxConcurrency, + BreakOnException = false, + }; +} + +public sealed class DefaultBatchPipelineOptions +{ + // public QueueOptions> Queue { get; } = new(); + public QueueOptions Queue { get; } = new(); + + [Range(1, ushort.MaxValue)] + public ushort MaxConcurrency { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public ushort BatchMaxSize { get; set; } = 10; + + [Range(1, ushort.MaxValue)] + public int TimeWindowMs { get; set; } = 1_000; + + public static implicit operator Pipeline.ConsumerOptions(DefaultBatchPipelineOptions options) => new() + { + MaxConcurrency = options.MaxConcurrency, + BreakOnException = false, + }; + + public static implicit operator BatchOptions(DefaultBatchPipelineOptions options) => new() + { + MaxSize = options.BatchMaxSize, + TimeWindowDuration = options.TimeWindowMs, + }; } diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs index 16d9dd1..87ce7de 100644 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ b/src/LocalPost/BackgroundQueueConsumer.cs @@ -1,130 +1,146 @@ using LocalPost.DependencyInjection; -using Microsoft.Extensions.Logging; namespace LocalPost; -internal sealed record ConsumerOptions(ushort MaxConcurrency, bool BreakOnException); +internal interface IStreamRunner : IBackgroundService, IAssistantService; -internal static class Queue +internal sealed class StreamRunner(IAsyncEnumerable stream, StreamProcessor consume) : IStreamRunner { - // public static ConsumerGroup ConsumerGroupFor(TQ queue, Handler handler, ushort maxConcurrency) - // where TQ : IAsyncEnumerable => new(Consumer.LoopOver(queue, handler), maxConcurrency); - // - // public static NamedConsumerGroup ConsumerGroupForNamed( - // TQ queue, Handler handler, ushort maxConcurrency) - // where TQ : IAsyncEnumerable, INamedService => - // new(queue, Consumer.LoopOver(queue, handler), maxConcurrency); + public required AssistedService Target { get; init; } - // Parametrized class, to be used with the Dependency Injection container - internal class NamedConsumer( - ILogger> logger, - TQ queue, - Handler handler, - ushort maxConcurrency) - : ConsumerBase(logger, queue, handler, maxConcurrency), INamedService - where TQ : IAsyncEnumerable, INamedService - { - public string Name { get; } = queue.Name; - } + private Task? _exec; + private CancellationTokenSource? _execCts; - // Parametrized class, to be used with the Dependency Injection container - internal class Consumer( - ILogger> logger, - TQ queue, - Handler handler, - ushort maxConcurrency) - : ConsumerBase(logger, queue, handler, maxConcurrency) - where TQ : IAsyncEnumerable; + public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - internal abstract class ConsumerBase( - ILogger> logger, - IAsyncEnumerable queue, - Handler handler, - ushort maxConcurrency) - : IBackgroundService //, IDisposable + public Task ExecuteAsync(CancellationToken ct) { - public bool BreakOnException { get; init; } = false; - // private bool _broken = false; - - private Task? _exec; - private CancellationTokenSource? _execCts; - - private async Task Execute(CancellationToken execCt) - { - // using var loopCts = new CancellationTokenSource(); - using var loopCts = CancellationTokenSource.CreateLinkedTokenSource(execCt); - // using var cts = CancellationTokenSource.CreateLinkedTokenSource(execCt, loopCts.Token); - var loopCt = loopCts.Token; - - await Task.WhenAll(Enumerable.Range(1, maxConcurrency) - .Select(_ => Loop())); - - return; - - async Task Loop() - { - try - { - await foreach (var message in queue.WithCancellation(loopCt)) - await Handle(message); - } - catch (OperationCanceledException) when (loopCt.IsCancellationRequested) - { - // Fine, breaking the loop because of an exception in the handler - } - } - - async Task Handle(T message) - { - try - { - await handler(message, execCt); - } - catch (OperationCanceledException) when (execCt.IsCancellationRequested) - { - throw; // App shutdown timeout (force shutdown) - } - catch (Exception e) - { - if (BreakOnException) - { - // Break the loop (all the concurrent executions of it) - // ReSharper disable once AccessToDisposedClosure - loopCts.Cancel(); - // Push it up, so the service is marked as unhealthy - throw; - } + if (_exec is not null) + return _exec; - logger.LogError(e, "Failed to handle a message"); - } - } - } + var execCts = _execCts = new CancellationTokenSource(); + return _exec = consume(stream, execCts.Token); + } - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + // Process the rest (leftovers). Common cases: + // - SQS: message source (fetcher) has been stopped, so we just need to process leftovers from the channel + // - Kafka: message source (consumer) has been stopped, so we just need to process leftovers from the channel + // - Background (job) queue: hope that the producers are stopped, so no new messages should appear, so we + // just need to process leftovers from the queue + public Task StopAsync(CancellationToken ct) + { + if (_exec is null) + return Task.CompletedTask; - public Task ExecuteAsync(CancellationToken ct) - { - if (_exec is not null) - return _exec; + ct.Register(() => _execCts?.Cancel()); + return _exec; - var execCts = _execCts = new CancellationTokenSource(); - return _exec = Execute(execCts.Token); - } + // Cleanup the state?.. + } +} - // Process the rest (leftovers). Common cases: - // - SQS: message source (fetcher) has been stopped, so we just need to process leftovers from the channel - // - Kafka: message source (consumer) has been stopped, so we just need to process leftovers from the channel - // - Background (job) queue: hope that the producers are stopped, so no new messages should appear, so we - // just need to process leftovers from the queue - public Task StopAsync(CancellationToken ct) - { - if (_exec is null) - return Task.CompletedTask; - ct.Register(() => _execCts?.Cancel()); - return _exec; - // Cleanup the state?.. - } - } -} +// internal sealed record ConsumerOptions(ushort MaxConcurrency, bool BreakOnException); +// +// internal static class Queue +// { +// internal interface IConsumer : IBackgroundService, IServiceFor; +// +// internal sealed class Consumer( +// ILogger> logger, +// IAsyncEnumerable queue, +// Handler handler, +// ushort maxConcurrency) +// : IConsumer //, IDisposable +// { +// public required string Target { get; init; } +// +// public bool BreakOnException { get; init; } = false; +// // private bool _broken = false; +// +// private Task? _exec; +// private CancellationTokenSource? _execCts; +// +// private async Task Execute(CancellationToken execCt) +// { +// // using var loopCts = new CancellationTokenSource(); +// using var loopCts = CancellationTokenSource.CreateLinkedTokenSource(execCt); +// // using var cts = CancellationTokenSource.CreateLinkedTokenSource(execCt, loopCts.Token); +// var loopCt = loopCts.Token; +// +// await Task.WhenAll(Enumerable.Range(1, maxConcurrency) +// .Select(_ => Loop())); +// +// return; +// +// async Task Loop() +// { +// try +// { +// await foreach (var message in queue.WithCancellation(loopCt)) +// await Handle(message); +// } +// catch (OperationCanceledException) when (loopCt.IsCancellationRequested) +// { +// // It is either: +// // - app shutdown timeout (force shutdown) +// // - handler exception (when BreakOnException is set) +// // Just break the loop +// } +// } +// +// async Task Handle(T message) +// { +// try +// { +// await handler(message, execCt); +// } +// catch (OperationCanceledException) when (execCt.IsCancellationRequested) +// { +// throw; // App shutdown timeout (force shutdown) +// } +// catch (Exception e) +// { +// if (BreakOnException) +// { +// // Break the loop (all the concurrent executions of it) +// // ReSharper disable once AccessToDisposedClosure +// loopCts.Cancel(); +// // Push it up, so the service is marked as unhealthy +// throw; +// } +// +// logger.LogError(e, "Failed to handle a message"); +// } +// } +// } +// +// public Task StartAsync(CancellationToken ct) => Task.CompletedTask; +// +// public Task ExecuteAsync(CancellationToken ct) +// { +// if (_exec is not null) +// return _exec; +// +// var execCts = _execCts = new CancellationTokenSource(); +// return _exec = Execute(execCts.Token); +// } +// +// // Process the rest (leftovers). Common cases: +// // - SQS: message source (fetcher) has been stopped, so we just need to process leftovers from the channel +// // - Kafka: message source (consumer) has been stopped, so we just need to process leftovers from the channel +// // - Background (job) queue: hope that the producers are stopped, so no new messages should appear, so we +// // just need to process leftovers from the queue +// public Task StopAsync(CancellationToken ct) +// { +// if (_exec is null) +// return Task.CompletedTask; +// +// ct.Register(() => _execCts?.Cancel()); +// return _exec; +// +// // Cleanup the state?.. +// } +// } +// } diff --git a/src/LocalPost/ConcurrentHostedServices.cs b/src/LocalPost/ConcurrentHostedServices.cs index d73a100..a5d4ea3 100644 --- a/src/LocalPost/ConcurrentHostedServices.cs +++ b/src/LocalPost/ConcurrentHostedServices.cs @@ -1,3 +1,4 @@ +using System.Collections; using System.Collections.Immutable; using System.Diagnostics.CodeAnalysis; using LocalPost.DependencyInjection; @@ -17,6 +18,18 @@ internal interface IBackgroundService Task StopAsync(CancellationToken ct); } +internal sealed class BackgroundServicesMonitor(IReadOnlyCollection services) + : IBackgroundServiceMonitor +{ + public bool Started => services.All(s => s.Started); + public bool Running => services.All(s => s.Running); + public Task Stopped => Task.WhenAll(services.Select(s => s.Stopped)); + + public bool Crashed => services.Any(s => s.Crashed); + + public Exception? Exception => services.Select(s => s.Exception).FirstOrDefault(); +} + internal interface IBackgroundServiceMonitor { public sealed class LivenessCheck : IHealthCheck @@ -54,28 +67,48 @@ public Task CheckHealthAsync(HealthCheckContext context, public bool Running { get; } + public Task Stopped { get; } + [MemberNotNullWhen(true, nameof(Exception))] public bool Crashed { get; } public Exception? Exception { get; } } -internal class NamedBackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) - : BackgroundServiceRunner(service, appLifetime), INamedService - where T : class, IBackgroundService, INamedService +internal sealed class BackgroundServices : IConcurrentHostedService, IDisposable { - public string Name { get; } = service.Name; + public readonly IReadOnlyCollection Runners; + + public BackgroundServices(IEnumerable services, IHostApplicationLifetime appLifetime) + { + Runners = services.Select(s => new BackgroundServiceRunner(s, appLifetime)).ToArray(); + } + + public Task StartAsync(CancellationToken cancellationToken) => + Task.WhenAll(Runners.Select(s => s.StartAsync(cancellationToken))); + + public Task StopAsync(CancellationToken cancellationToken) => + Task.WhenAll(Runners.Select(s => s.StopAsync(cancellationToken))); + + public void Dispose() + { + foreach (var service in Runners) + service.Dispose(); + } } -internal class BackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) +internal sealed class BackgroundServiceRunner(IBackgroundService service, IHostApplicationLifetime appLifetime) : IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable - where T : class, IBackgroundService { private Task? _start; private CancellationTokenSource? _executionCts; private Task? _execution; private Task? _executionWrapper; + private readonly TaskCompletionSource _stopped = new(); + + public IBackgroundService Service => service; + public bool Starting => _start is not null && !_start.IsCompleted; // StartedSuccessfully?.. @@ -83,6 +116,8 @@ internal class BackgroundServiceRunner(T service, IHostApplicationLifetime ap public bool Running => _execution is not null && !_execution.IsCompleted; + public Task Stopped => _stopped.Task; + public bool StartCrashed => _start is not null && _start.Status == TaskStatus.Faulted; public bool RunCrashed => _execution is not null && _execution.Status == TaskStatus.Faulted; public bool Crashed => StartCrashed || RunCrashed; @@ -142,14 +177,21 @@ public async Task StopAsync(CancellationToken forceExitToken) // Or simply ignore and return?.. throw new InvalidOperationException("Service has not been started"); - if (!_executionCts.IsCancellationRequested) - _executionCts.Cancel(); // Signal cancellation to the service + try + { + if (!_executionCts.IsCancellationRequested) + _executionCts.Cancel(); // Signal cancellation to the service - if (_executionWrapper is not null) - // Wait until the execution completes or the app is forced to exit - await _executionWrapper.WaitAsync(forceExitToken); + if (_executionWrapper is not null) + // Wait until the execution completes or the app is forced to exit + await _executionWrapper.WaitAsync(forceExitToken); - await service.StopAsync(forceExitToken); + await service.StopAsync(forceExitToken); + } + finally + { + _stopped.TrySetResult(true); + } } public void Dispose() @@ -158,9 +200,105 @@ public void Dispose() } } -internal interface IConcurrentHostedService : IHostedService -{ -} +// internal sealed class BackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) +// : IServiceFor, IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable +// where T : class, IBackgroundService +// { +// private Task? _start; +// private CancellationTokenSource? _executionCts; +// private Task? _execution; +// private Task? _executionWrapper; +// +// public string Target => service switch +// { +// INamedService namedService => namedService.Name, +// IServiceFor serviceForNamed => serviceForNamed.Target, +// _ => Reflection.FriendlyNameOf() +// }; +// +// public bool Starting => _start is not null && !_start.IsCompleted; +// +// // StartedSuccessfully?.. +// public bool Started => _start is not null && _start.Status == TaskStatus.RanToCompletion; +// +// public bool Running => _execution is not null && !_execution.IsCompleted; +// +// public bool StartCrashed => _start is not null && _start.Status == TaskStatus.Faulted; +// public bool RunCrashed => _execution is not null && _execution.Status == TaskStatus.Faulted; +// public bool Crashed => StartCrashed || RunCrashed; +// +// // TODO Test +// public Exception? Exception => (StartCrashed ? _start?.Exception : _execution?.Exception)?.InnerException; +// +// private async Task WaitAppStartAsync(CancellationToken ct) +// { +// try +// { +// // Wait until all other services have started +// await Task.Delay(Timeout.Infinite, appLifetime.ApplicationStarted).WaitAsync(ct); +// } +// catch (OperationCanceledException e) when (e.CancellationToken == appLifetime.ApplicationStarted) +// { +// // Startup completed, continue +// } +// } +// +// public async Task StartAsync(CancellationToken ct) +// { +// // All the services are started from the same (main) thread, so there are no races +// if (_start is not null) +// throw new InvalidOperationException("Service is already started"); +// +// await (_start = service.StartAsync(ct)); +// +// // Start execution in the background... +// _executionCts = new CancellationTokenSource(); +// _executionWrapper = ExecuteAsync(_executionCts.Token); +// } +// +// private async Task ExecuteAsync(CancellationToken ct) +// { +// try +// { +// await WaitAppStartAsync(ct); +// await (_execution = service.ExecuteAsync(ct)); +// } +// catch (OperationCanceledException) when (ct.IsCancellationRequested) +// { +// // Normal case, we trigger this token ourselves when stopping the service +// } +// catch (Exception) +// { +// // Otherwise it's an error, but swallow it silently (this method is called in "fire and forget" mode, not +// // awaited, so any unhandled exception will arrive in TaskScheduler.UnobservedTaskException, which is not +// // what we want). +// // See also: https://stackoverflow.com/a/59300076/322079. +// } +// } +// +// public async Task StopAsync(CancellationToken forceExitToken) +// { +// if (_executionCts is null) +// // Or simply ignore and return?.. +// throw new InvalidOperationException("Service has not been started"); +// +// if (!_executionCts.IsCancellationRequested) +// _executionCts.Cancel(); // Signal cancellation to the service +// +// if (_executionWrapper is not null) +// // Wait until the execution completes or the app is forced to exit +// await _executionWrapper.WaitAsync(forceExitToken); +// +// await service.StopAsync(forceExitToken); +// } +// +// public void Dispose() +// { +// _executionCts?.Dispose(); +// } +// } + +internal interface IConcurrentHostedService : IHostedService; internal sealed class ConcurrentHostedServices(IEnumerable services) : IHostedService { diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index 7afdae7..841ce30 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -1,9 +1,8 @@ using System.Collections.Immutable; using JetBrains.Annotations; -using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; -using Microsoft.Extensions.Logging; namespace LocalPost.DependencyInjection; @@ -14,14 +13,14 @@ public static IServiceCollection AddAppHealthSupervisor(this IServiceCollection IEnumerable? tags = null) { services.AddSingleton(provider => new AppHealthSupervisor( - provider.GetRequiredService>(), + provider.GetLoggerFor(), provider.GetRequiredService(), provider.GetRequiredService()) { Tags = tags?.ToImmutableHashSet() ?? ImmutableHashSet.Empty }); - services.AddBackgroundServiceFor(); + services.AddBackgroundService(); return services; } @@ -29,22 +28,21 @@ public static IServiceCollection AddAppHealthSupervisor(this IServiceCollection internal static class HealthChecksBuilderEx { - internal static IHealthChecksBuilder AddConsumerLivenessCheck(this IHealthChecksBuilder builder, - string? name = default, HealthStatus? failureStatus = default, IEnumerable? tags = default) - where TQ : IAsyncEnumerable + internal static IHealthChecksBuilder AddPipelineLivenessCheck(this IHealthChecksBuilder builder, + HealthStatus? failureStatus = default, IEnumerable? tags = default) { - var check = HealthChecks.LivenessCheckFor>(failureStatus, tags); - if (name is not null) - check.Name = name; + var check = HealthChecks.PipelineLivenessCheckFor(failureStatus, tags); + // if (name is not null) + // check.Name = name; return builder.Add(check); } - internal static IHealthChecksBuilder AddNamedConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) - where TQ : IAsyncEnumerable, INamedService + internal static IHealthChecksBuilder AddPipelineLivenessCheck(this IHealthChecksBuilder builder, string name, + HealthStatus? failureStatus = default, IEnumerable? tags = default) + where T : INamedService { - var check = HealthChecks.LivenessCheckForNamed>(name, failureStatus, tags); + var check = HealthChecks.PipelineLivenessCheckFor(name, failureStatus, tags); return builder.Add(check); } @@ -52,30 +50,75 @@ internal static IHealthChecksBuilder AddNamedConsumerLivenessCheck(this I internal static class HealthChecks { - public static HealthCheckRegistration LivenessCheckFor( + public static HealthCheckRegistration LivenessCheck( HealthStatus? failureStatus = null, IEnumerable? tags = null) where T : class, IBackgroundService => new(Reflection.FriendlyNameOf(), // Can be overwritten later provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetRequiredService>() }, + { Service = provider.GetBackgroundServiceRunner() }, failureStatus, // Can be overwritten later tags); - public static HealthCheckRegistration LivenessCheckForNamed(string name, + public static HealthCheckRegistration LivenessCheck(string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) where T : class, IBackgroundService, INamedService => new(name, // Can be overwritten later provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetRequiredService>(name) }, + { Service = provider.GetBackgroundServiceRunner(name) }, failureStatus, // Can be overwritten later tags); - public static HealthCheckRegistration ReadinessCheckForNamed( - string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) + public static HealthCheckRegistration ReadinessCheck(string name, + HealthStatus? failureStatus = null, IEnumerable? tags = null) where T : class, IBackgroundService, INamedService => new(name, // Can be overwritten later provider => new IBackgroundServiceMonitor.ReadinessCheck - { Service = provider.GetRequiredService>(name) }, + { Service = provider.GetBackgroundServiceRunner(name) }, failureStatus, // Can be overwritten later tags); + + // public static HealthCheckRegistration LivenessCheckFor(string target, + // HealthStatus? failureStatus = null, IEnumerable? tags = null) + // where T : class, IBackgroundService, IAssistantService => + // new(target, // Can be overwritten later + // provider => new IBackgroundServiceMonitor.LivenessCheck + // { Service = provider.GetBackgroundServiceRunnerFor(target) }, + // failureStatus, // Can be overwritten later + // tags); + // + // public static HealthCheckRegistration ReadinessCheckFor(string target, + // HealthStatus? failureStatus = null, IEnumerable? tags = null) + // where T : class, IBackgroundService, IAssistantService => + // new(target, // Can be overwritten later + // provider => new IBackgroundServiceMonitor.ReadinessCheck + // { Service = provider.GetBackgroundServiceRunnerFor(target) }, + // failureStatus, // Can be overwritten later + // tags); + + public static HealthCheckRegistration PipelineLivenessCheckFor( + HealthStatus? failureStatus = null, IEnumerable? tags = null) => + // TODO Make it like "MessageSource:pipeline"... + new(Reflection.FriendlyNameOf(), // Can be overwritten later + provider => new IBackgroundServiceMonitor.LivenessCheck + { Service = provider.GetPipelineMonitorFor() }, + failureStatus, // Can be overwritten later + tags); + + public static HealthCheckRegistration PipelineLivenessCheckFor(string name, + HealthStatus? failureStatus = null, IEnumerable? tags = null) + where T : INamedService => + // TODO Make it like "MessageSource:pipeline:name"... + new(Reflection.FriendlyNameOf(name), // Can be overwritten later + provider => new IBackgroundServiceMonitor.LivenessCheck + { Service = provider.GetPipelineMonitorFor(name) }, + failureStatus, // Can be overwritten later + tags); + + // public static HealthCheckRegistration PipelineReadinessCheckFor(string name, + // HealthStatus? failureStatus = null, IEnumerable? tags = null) => + // new(AssistedService.From(), // Can be overwritten later + // provider => new IBackgroundServiceMonitor.ReadinessCheck + // { Service = provider.GetPipelineMonitorFor(name) }, + // failureStatus, // Can be overwritten later + // tags); } diff --git a/src/LocalPost/DependencyInjection/IAssistantService.cs b/src/LocalPost/DependencyInjection/IAssistantService.cs new file mode 100644 index 0000000..510da1d --- /dev/null +++ b/src/LocalPost/DependencyInjection/IAssistantService.cs @@ -0,0 +1,28 @@ +namespace LocalPost.DependencyInjection; + +public readonly record struct AssistedService +{ + private readonly Type _type; + + private readonly string? _name; + + private AssistedService(Type type, string? name = null) + { + _type = type; + _name = name; + } + + internal static AssistedService From() => new(typeof(T)); + + internal static AssistedService From(string name) where T : INamedService => new(typeof(T), name); + + public static implicit operator string(AssistedService service) => service.ToString(); + + public override string ToString() => Reflection.FriendlyNameOf(_type, _name); +} + +internal interface IAssistantService +{ + // string Target { get; } + AssistedService Target { get; } +} diff --git a/src/LocalPost/DependencyInjection/INamedService.cs b/src/LocalPost/DependencyInjection/INamedService.cs index 4e9aecf..d8cc554 100644 --- a/src/LocalPost/DependencyInjection/INamedService.cs +++ b/src/LocalPost/DependencyInjection/INamedService.cs @@ -9,23 +9,25 @@ internal interface INamedService internal sealed class NamedServiceDescriptor : ServiceDescriptor { - public static NamedServiceDescriptor Singleton(string name, - Func implementationFactory) where TService : class, INamedService => - new(typeof(TService), name, implementationFactory, ServiceLifetime.Singleton); + public static NamedServiceDescriptor Singleton(string name, Func iFactory) + where TService : class, INamedService => + new(typeof(TService), name, iFactory, ServiceLifetime.Singleton); public string Name { get; init; } - public NamedServiceDescriptor(Type serviceType, string name, Type implementationType, ServiceLifetime lifetime) : base(serviceType, implementationType, lifetime) + public NamedServiceDescriptor(Type sType, string name, Type iType, ServiceLifetime lifetime) : + base(sType, iType, lifetime) { Name = name; } - public NamedServiceDescriptor(Type serviceType, string name, object instance) : base(serviceType, instance) + public NamedServiceDescriptor(Type sType, string name, object instance) : base(sType, instance) { Name = name; } - public NamedServiceDescriptor(Type serviceType, string name, Func factory, ServiceLifetime lifetime) : base(serviceType, factory, lifetime) + public NamedServiceDescriptor(Type sType, string name, Func factory, + ServiceLifetime lifetime) : base(sType, factory, lifetime) { Name = name; } diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs index bdfbbdb..40bd93d 100644 --- a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs @@ -1,57 +1,67 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; namespace LocalPost.DependencyInjection; public static partial class ServiceCollectionEx { - internal static bool TryAddBackgroundConsumer(this IServiceCollection services, string name, - HandlerFactory hf, Func of) - where TQ : IAsyncEnumerable, INamedService - { - if (!services.TryAddNamedSingleton(name, CreateConsumer)) - return false; + internal static RegistrationContext RegistrationContextFor(this IServiceCollection services) => + new(services, AssistedService.From()); - services.AddBackgroundServiceForNamed>(name); + internal static RegistrationContext RegistrationContextFor(this IServiceCollection services, string name) + where T : INamedService => + new(services, AssistedService.From(name)); - return true; + // internal static bool TryAddBackgroundConsumer(this IServiceCollection services, string name, + // HandlerFactory hf, Func of) + // where TQ : IAsyncEnumerable, INamedService + // { + // if (!services.TryAddNamedSingleton(name, CreateConsumer)) + // return false; + // + // services.AddBackgroundService>(name); + // + // return true; + // + // Queue.NamedConsumer CreateConsumer(IServiceProvider provider) + // { + // var options = of(provider); + // var handler = hf(provider); + // + // return new Queue.NamedConsumer( + // provider.GetRequiredService>>(), + // provider.GetRequiredService(name), handler, options.MaxConcurrency) + // { + // BreakOnException = options.BreakOnException + // }; + // } + // } + // + // internal static bool TryAddBackgroundConsumer(this IServiceCollection services, + // HandlerFactory hf, Func of) + // where TQ : IAsyncEnumerable + // { + // if (!services.TryAddSingleton(CreateConsumer)) + // return false; + // + // services.AddBackgroundService>(); + // + // return true; + // + // Queue.Consumer CreateConsumer(IServiceProvider provider) + // { + // var options = of(provider); + // var handler = hf(provider); + // + // return new Queue.Consumer( + // provider.GetRequiredService>>(), + // provider.GetRequiredService(), handler, options.MaxConcurrency) + // { + // BreakOnException = options.BreakOnException + // }; + // } + // } - Queue.NamedConsumer CreateConsumer(IServiceProvider provider) - { - var options = of(provider); - var handler = hf(provider); - - return new Queue.NamedConsumer( - provider.GetRequiredService>>(), - provider.GetRequiredService(name), handler, options.MaxConcurrency) - { - BreakOnException = options.BreakOnException - }; - } - } - - internal static bool TryAddBackgroundConsumer(this IServiceCollection services, - HandlerFactory hf, Func of) - where TQ : IAsyncEnumerable - { - if (!services.TryAddSingleton(CreateConsumer)) - return false; - - services.AddBackgroundServiceFor>(); - - return true; - - Queue.Consumer CreateConsumer(IServiceProvider provider) - { - var options = of(provider); - var handler = hf(provider); - - return new Queue.Consumer( - provider.GetRequiredService>>(), - provider.GetRequiredService(), handler, options.MaxConcurrency) - { - BreakOnException = options.BreakOnException - }; - } - } + // Just register a background service directly + // internal static IServiceCollection AddBackgroundPipeline(this IServiceCollection services, string target, + // IAsyncEnumerable stream, PipelineConsumer consume) => ... } diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs index 1856240..d2fb785 100644 --- a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs @@ -1,65 +1,94 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; namespace LocalPost.DependencyInjection; internal static class ServiceCollectionTools { - public static void AddConcurrentHostedServices(this IServiceCollection services) => services - .AddHostedService(); - - public static void AddBackgroundServiceForNamed(this IServiceCollection services, string name) - where T : class, IBackgroundService, INamedService +// public static void AddBackgroundServiceFor(this IServiceCollection services, string name) +// where T : class, IBackgroundService, IServiceFor +// { +// services.AddConcurrentHostedServices(); +// +// var added = services.TryAddNamedSingleton>(name, provider => +// new BackgroundServiceRunner(provider.GetRequiredService(name), +// provider.GetRequiredService())); +// if (!added) +// return; +// +// services.AddSingletonAlias>(name); +// } +// +// public static void AddBackgroundService(this IServiceCollection services, string name) +// where T : class, IBackgroundService, INamedService +// { +// services.AddConcurrentHostedServices(); +// +// var added = services.TryAddNamedSingleton>(name, provider => +// new BackgroundServiceRunner(provider.GetRequiredService(name), +// provider.GetRequiredService())); +// if (!added) +// return; +// +// services.AddSingletonAlias>(name); +// } +// +// public static void AddBackgroundService(this IServiceCollection services) +// where T : class, IBackgroundService +// { +// services.AddConcurrentHostedServices(); +// +// // We DO expect that this service is registered by the user... +// // services.AddSingleton(); +// // services.AddSingleton(); +// +// var added = services.TryAddSingleton>(provider => +// new BackgroundServiceRunner(provider.GetRequiredService(), +// provider.GetRequiredService())); +// if (!added) +// return; +// +// services.AddSingletonAlias>(); +// +// +// // FIXME Remove and check +// // services.AddSingleton(provider => +// // provider.GetRequiredService>()); +// } + + public static void AddBackgroundService(this IServiceCollection services, + Func factory) => + services.AddConcurrentHostedServices().AddSingleton(factory); + + public static void AddBackgroundService(this IServiceCollection services) + where T : class, IBackgroundService => + services.AddConcurrentHostedServices().AddSingletonAlias(); + + public static void AddBackgroundService(this IServiceCollection services, string name) + where T : class, IBackgroundService, INamedService => + services.AddConcurrentHostedServices().AddSingletonAlias(name); + + public static IServiceCollection AddConcurrentHostedServices(this IServiceCollection services) { - services.AddConcurrentHostedServices(); - - // We DO expect that this service is registered by the user already... -// services.AddSingleton(); -// services.AddSingleton(); - - var added = services.TryAddNamedSingleton>(name, provider => - new NamedBackgroundServiceRunner(provider.GetRequiredService(name), - provider.GetRequiredService())); - if (!added) - return; - - services.AddSingleton(provider => - provider.GetRequiredService>(name)); - services.AddSingleton(provider => - provider.GetRequiredService>(name)); - } + if (!services.TryAddSingleton()) + return services; - public static void AddBackgroundServiceFor(this IServiceCollection services) - where T : class, IBackgroundService - { - services.AddConcurrentHostedServices(); - - // We DO expect that this service is registered by the user already... -// services.AddSingleton(); -// services.AddSingleton(); - - var added = services.TryAddSingleton>(provider => - new BackgroundServiceRunner(provider.GetRequiredService(), - provider.GetRequiredService())); - if (!added) - return; - - services.AddSingleton(provider => - provider.GetRequiredService>()); - services.AddSingleton(provider => - provider.GetRequiredService>()); + return services + .AddHostedService() + .AddSingletonAlias(); } public static bool TryAddNamedSingleton(this IServiceCollection services, string name, - Func implementationFactory) where TService : class, INamedService => - services.TryAdd(NamedServiceDescriptor.Singleton(name, implementationFactory)); + Func factory) + where TService : class, INamedService => + services.TryAdd(NamedServiceDescriptor.Singleton(name, factory)); public static bool TryAddSingleton(this IServiceCollection services) where TService : class => services.TryAdd(ServiceDescriptor.Singleton()); public static bool TryAddSingleton(this IServiceCollection services, - Func implementationFactory) where TService : class => - services.TryAdd(ServiceDescriptor.Singleton(implementationFactory)); + Func factory) + where TService : class => + services.TryAdd(ServiceDescriptor.Singleton(factory)); // "If binary compatibility were not a problem, then the TryAdd methods could return bool" // from https://github.com/dotnet/runtime/issues/45114#issuecomment-733807639 diff --git a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs index ed54bc4..518e35e 100644 --- a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs +++ b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs @@ -8,7 +8,11 @@ internal static class ServiceProviderLookups { public static T GetRequiredService(this IServiceProvider provider, string name) where T : INamedService => - provider.GetRequiredService>().First(x => x.Name == name); + provider.GetServices().First(service => service.Name == name); + + // public static T GetRequiredServiceFor(this IServiceProvider provider, string target) + // where T : IAssistantService => + // provider.GetServices().First(service => service.Target == target); public static T GetOptions(this IServiceProvider provider) where T : class => provider.GetRequiredService>().Value; @@ -18,4 +22,61 @@ public static T GetOptions(this IServiceProvider provider, string name) where public static ILogger GetLoggerFor(this IServiceProvider provider) => provider.GetRequiredService>(); + + public static BackgroundServiceRunner GetBackgroundServiceRunner(this IServiceProvider provider) + where T : IBackgroundService => + provider.GetRequiredService().Runners + .First(runner => runner.Service is T); + + public static BackgroundServiceRunner GetBackgroundServiceRunner(this IServiceProvider provider, + string name) + where T : IBackgroundService, INamedService => + provider.GetRequiredService().Runners + .First(runner => runner.Service is T s && s.Name == name); + + // public static BackgroundServiceRunner GetBackgroundServiceRunnerFor(this IServiceProvider provider, + // string target) + // where T : IBackgroundService, IAssistantService => + // provider.GetRequiredService().Runners + // .First(runner => runner.Service is T s && s.Target == target); + + public static IBackgroundServiceMonitor GetPipelineMonitorFor(this IServiceProvider provider) + { + var target = AssistedService.From(); + var services = provider.GetRequiredService(); + var runners = services.Runners + .Where(runner => runner.Service is IStreamRunner pr && pr.Target == target) + .ToArray(); + + return new BackgroundServicesMonitor(runners); + } + + public static IBackgroundServiceMonitor GetPipelineMonitorFor(this IServiceProvider provider, string name) + where T : INamedService + { + var target = AssistedService.From(name); + var services = provider.GetRequiredService(); + var runners = services.Runners + .Where(runner => runner.Service is IStreamRunner pr && pr.Target == target) + .ToArray(); + + return new BackgroundServicesMonitor(runners); + } + + public static IEnumerable GetPipelineRunnersFor(this IServiceProvider provider) + { + var target = AssistedService.From(); + return provider.GetServices() + .OfType() + .Where(runner => runner.Target == target); + } + + public static IEnumerable GetPipelineRunnersFor(this IServiceProvider provider, string name) + where T : INamedService + { + var target = AssistedService.From(name); + return provider.GetServices() + .OfType() + .Where(runner => runner.Target == target); + } } diff --git a/src/LocalPost/Handler.cs b/src/LocalPost/Handler.cs index 381d8e5..7a58e78 100644 --- a/src/LocalPost/Handler.cs +++ b/src/LocalPost/Handler.cs @@ -1,9 +1,36 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; + namespace LocalPost; +// Because C# does not support sum types... +// public interface IServiceCollectionFor : IServiceCollection +// { +// string Target { get; } +// } + +public readonly record struct RegistrationContext(IServiceCollection Services, AssistedService Target); + +// TODO Make internal +public delegate Task StreamProcessor(IAsyncEnumerable stream, CancellationToken ct); + +public delegate IAsyncEnumerable PipelineFactory(IServiceProvider provider); + +public delegate void PipelineRegistration(RegistrationContext services, PipelineFactory source); + + + +public delegate IAsyncEnumerable PipelineMiddleware(IAsyncEnumerable source, + CancellationToken ct = default); + + + public delegate ValueTask Handler(T context, CancellationToken ct); public delegate Handler HandlerFactory(IServiceProvider provider); + + public delegate Handler HandlerMiddleware(Handler next); // Too narrow use case diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index 3799868..0896658 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -1,5 +1,10 @@ +using System.Runtime.CompilerServices; +using System.Threading.Channels; using JetBrains.Annotations; +using LocalPost.AsyncEnumerable; +using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Logging; namespace LocalPost; @@ -17,3 +22,253 @@ public static class HandlerStack public static HandlerFactory From() where THandler : IHandler => provider => provider.GetRequiredService().InvokeAsync; } + + + +[PublicAPI] +public static class Pipeline +{ + public sealed record ConsumerOptions(ushort MaxConcurrency = 1, bool BreakOnException = false); + + internal sealed class Consumer( + ILogger> logger, + Handler handler, + ushort maxConcurrency = 1, + bool breakOnException = false) + { + public async Task Consume(IAsyncEnumerable queue, CancellationToken execCt) + { + // using var loopCts = new CancellationTokenSource(); + using var loopCts = CancellationTokenSource.CreateLinkedTokenSource(execCt); + // using var cts = CancellationTokenSource.CreateLinkedTokenSource(execCt, loopCts.Token); + var loopCt = loopCts.Token; + + await Task.WhenAll(Enumerable.Range(1, maxConcurrency) + .Select(_ => Loop())); + + return; + + async Task Loop() + { + try + { + await foreach (var message in queue.WithCancellation(loopCt)) + await Handle(message); + } + catch (OperationCanceledException) when (loopCt.IsCancellationRequested) + { + // It is either: + // - app shutdown timeout (force shutdown) + // - handler exception (when BreakOnException is set) + // Just break the loop + } + } + + async Task Handle(T message) + { + try + { + await handler(message, execCt); + } + catch (OperationCanceledException) when (execCt.IsCancellationRequested) + { + throw; // App shutdown timeout (force shutdown) + } + catch (Exception e) + { + if (breakOnException) + { + // Break the loop (all the concurrent executions of it) + // ReSharper disable once AccessToDisposedClosure + loopCts.Cancel(); + // Push it up, so the service is marked as unhealthy + throw; + } + + logger.LogError(e, "Failed to handle a message"); + } + } + } + } + + public static PipelineRegistration Create(HandlerFactory hf, + ushort maxConcurrency, bool breakOnException) => + Create(hf, _ => new ConsumerOptions(maxConcurrency, breakOnException)); + + public static PipelineRegistration Create(HandlerFactory hf, + Func config) => (context, pf) => + context.Services.AddBackgroundService(provider => + { + var stream = pf(provider); + var (maxConcurrency, breakOnException) = config(provider); + var consumer = new Consumer( + provider.GetRequiredService>>(), + hf(provider), + maxConcurrency, + breakOnException); + + return new StreamRunner(stream, consumer.Consume) + { + Target = context.Target, + }; + }); +} + +[PublicAPI] +public static class PipelineOps +{ + public static PipelineRegistration Where(this PipelineRegistration next, + Func pred) + { + return next.Map(Filter); + + async IAsyncEnumerable Filter(IAsyncEnumerable source, [EnumeratorCancellation] CancellationToken ct) + { + await foreach (var item in source.WithCancellation(ct)) + if (pred(item)) + yield return item; + } + } + + // TODO Option with a service from the DI provider (add IPipelineMiddleware interface) + public static PipelineRegistration Map(this PipelineRegistration next, + PipelineMiddleware middleware) => + (services, pf) => next(services, pf.Map(middleware)); + + public static PipelineFactory Map(this PipelineFactory pf, + PipelineMiddleware middleware) => provider => + { + var source = pf(provider); + return middleware(source); + }; + + private sealed class SharedBuffer(Func config) + { + private Channel? _buffer; + + public Channel GetOrCreate(IServiceProvider provider) + { + if (_buffer is not null) + return _buffer; + + var capacity = config(provider); + return _buffer = Channel.CreateBounded(new BoundedChannelOptions(capacity) + { + FullMode = BoundedChannelFullMode.Wait, + // This is the point in most of the cases, like batching, to have a simple source reader to a buffer, + // so that buffer can be read by multiple consumers + SingleReader = false, + SingleWriter = true, + }); + + } + } + + public static PipelineRegistration Buffer(this PipelineRegistration next, ushort capacity) => + next.Buffer(_ => capacity); + + public static PipelineRegistration Buffer(this PipelineRegistration next, + Func config) + { + var sharedBuffer = new SharedBuffer(config); + + return (context, source) => + { + var (services, target) = context; + + services.AddBackgroundService(provider => + { + // Freeze (resolve) the current pipeline + var stream = source(provider); + // And drain it to the channel, in the background + return new StreamRunner(stream, BufferWriter(provider)) + { + Target = target, + }; + }); + + // Create a new pipeline, from the channel + next(context, provider => + { + var buffer = sharedBuffer.GetOrCreate(provider); + return buffer.Reader.ReadAllAsync(); + }); + }; + + StreamProcessor BufferWriter(IServiceProvider provider) + { + var buffer = sharedBuffer.GetOrCreate(provider); + + return async (source, ct) => + { + try + { + await foreach (var item in source.WithCancellation(ct)) + await buffer.Writer.WriteAsync(item, ct); + } + finally + { + buffer.Writer.Complete(); + } + }; + } + } + + // public static PipelineRegistration Buffer(this PipelineRegistration next, int capacity = 1) + // { + // var buffer = Channel.CreateBounded(new BoundedChannelOptions(capacity) + // { + // FullMode = BoundedChannelFullMode.Wait, + // SingleReader = false, // Configure somehow... + // SingleWriter = true, + // }); + // + // return (context, source) => + // { + // var (services, target) = context; + // + // services.AddBackgroundService(provider => + // { + // // Freeze (resolve) the current pipeline + // var stream = source(provider); + // // And drain it to the channel, in the background + // return new PipelineRunner(stream, BufferWriter(provider)) + // { + // Target = target, + // }; + // }); + // + // // Create a new pipeline, from the channel + // next(context, provider => + // { + // var buffer = sharedBuffer.GetOrCreate(provider); + // return buffer.Reader.ReadAllAsync(); + // }); + // }; + // + // async Task WriteToBuffer(IAsyncEnumerable source, CancellationToken ct) + // { + // try + // { + // await foreach (var item in source.WithCancellation(ct)) + // await buffer.Writer.WriteAsync(item, ct); + // } + // finally + // { + // buffer.Writer.Complete(); + // } + // } + // } + + public static PipelineRegistration Batch(this PipelineRegistration> next, + ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => + next.Batch(_ => new BatchOptions(batchMaxSize, timeWindowDuration)); + + public static PipelineRegistration Batch(this PipelineRegistration> next, + Func config) => (context, source) => next(context, provider => + { + var stream = source(provider); + var (batchMaxSize, timeWindowDuration) = config(provider); + return stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration)); + }); +} diff --git a/src/LocalPost/HandlerStackEx.cs b/src/LocalPost/HandlerStackEx.cs index 1e79e2b..2725fac 100644 --- a/src/LocalPost/HandlerStackEx.cs +++ b/src/LocalPost/HandlerStackEx.cs @@ -1,8 +1,10 @@ +using JetBrains.Annotations; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; namespace LocalPost; +[PublicAPI] public static partial class HandlerStackEx { // Better use a lambda in place, see Scoped() middleware @@ -31,8 +33,6 @@ public static partial class HandlerStackEx // return middlewareFactory(provider).Invoke(handler); // }; - // public static HandlerFactory Map(this HandlerFactory hf, - // HandlerMiddleware middleware) => hf.Map(_ => middleware); public static HandlerFactory Map(this HandlerFactory hf, HandlerMiddleware middleware) => provider => { @@ -51,7 +51,7 @@ public static HandlerFactory Touch(this HandlerFactory hf, // public static HandlerFactory Scoped(this HandlerFactory hf) => hf.Map(ScopedHandler.Wrap); public static HandlerFactory Dispose(this HandlerFactory hf) where T : IDisposable => - hf.Map(next => async (context, ct) => + hf.Touch(next => async (context, ct) => { try { @@ -64,7 +64,7 @@ public static HandlerFactory Dispose(this HandlerFactory hf) where T : }); public static HandlerFactory DisposeAsync(this HandlerFactory hf) where T : IAsyncDisposable => - hf.Map(next => async (context, ct) => + hf.Touch(next => async (context, ct) => { try { @@ -77,7 +77,7 @@ public static HandlerFactory DisposeAsync(this HandlerFactory hf) where }); public static HandlerFactory SkipWhen(this HandlerFactory hf, Func pred) => - hf.Map(next => async (context, ct) => + hf.Touch(next => async (context, ct) => { if (pred(context)) return; @@ -85,27 +85,6 @@ public static HandlerFactory SkipWhen(this HandlerFactory hf, Func ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => - // hf.Map(provider => - // { - // var appLifetime = provider.GetRequiredService(); - // return next => async (context, ct) => - // { - // try - // { - // await next(context, ct); - // } - // catch (OperationCanceledException e) when (e.CancellationToken == ct) - // { - // throw; - // } - // catch - // { - // appLifetime.StopApplication(); - // Environment.ExitCode = exitCode; - // } - // }; - // }); public static HandlerFactory ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => provider => { var appLifetime = provider.GetRequiredService(); diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index d3e5622..ef3373a 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -69,18 +69,12 @@ - - <_Parameter1>$(MSBuildProjectName).SnsPublisher - <_Parameter1>$(MSBuildProjectName).SqsConsumer <_Parameter1>$(MSBuildProjectName).KafkaConsumer - - <_Parameter1>$(MSBuildProjectName).RabbitMqConsumer - diff --git a/src/LocalPost/Options.cs b/src/LocalPost/Options.cs new file mode 100644 index 0000000..2141de4 --- /dev/null +++ b/src/LocalPost/Options.cs @@ -0,0 +1,3 @@ +namespace LocalPost; + +public sealed record BatchOptions(int MaxSize = 10, int TimeWindowDuration = 1_000); diff --git a/src/LocalPost/QueuePublisher.cs b/src/LocalPost/QueuePublisher.cs index 1431d2f..0150c76 100644 --- a/src/LocalPost/QueuePublisher.cs +++ b/src/LocalPost/QueuePublisher.cs @@ -1,8 +1,6 @@ -using JetBrains.Annotations; - namespace LocalPost; -[PublicAPI] +// TODO Remove?.. public interface IQueuePublisher { // TODO Custom exception when closed?.. Or just return true/false?.. diff --git a/src/LocalPost/Reflection.cs b/src/LocalPost/Reflection.cs index fb418a0..913bea4 100644 --- a/src/LocalPost/Reflection.cs +++ b/src/LocalPost/Reflection.cs @@ -1,12 +1,22 @@ using System.Diagnostics.CodeAnalysis; +using LocalPost.DependencyInjection; namespace LocalPost; [ExcludeFromCodeCoverage] internal static class Reflection { + public static string FriendlyNameOf(string name) where T : INamedService => + FriendlyNameOf(typeof(T)) + ":" + name; + public static string FriendlyNameOf() => FriendlyNameOf(typeof(T)); + public static string FriendlyNameOf(Type type, string? instanceName) + { + var name = FriendlyNameOf(type); + return instanceName is null ? name : $"{name}:{instanceName}"; + } + public static string FriendlyNameOf(Type type) => type.IsGenericType switch { true => type.Name.Split('`')[0] diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index d3a0c5c..5f766c0 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -10,7 +10,7 @@ namespace LocalPost.KafkaConsumer.Tests; public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime { // Called for each test, since each test instantiates a new class instance - private readonly RpContainer _container = new RpBuilder() + private readonly RedpandaContainer _container = new RedpandaBuilder() .Build(); private const string Topic = "weather-forecasts"; @@ -19,9 +19,6 @@ public async Task InitializeAsync() { await _container.StartAsync(); - // Dirty fix, but otherwise the client fails - await Task.Delay(3_000); - using var producer = new ProducerBuilder(new ProducerConfig { BootstrapServers = _container.GetBootstrapAddress() @@ -52,9 +49,10 @@ public async Task handles_messages() var hostBuilder = Host.CreateApplicationBuilder(); hostBuilder.Services.AddKafkaConsumers(kafka => kafka - .AddConsumer("test-one", HandlerStack.For(async (payload, _) => + .AddConsumer("test-consumer", HandlerStack.For((payload, _) => { received.Add(payload); + return default; }) .Map(next => async (payload, ct) => { @@ -66,13 +64,18 @@ public async Task handles_messages() .Scoped() .Trace() ) - .Configure(options => + .ConfigureConsumer(consumer => { - options.BootstrapServers = _container.GetBootstrapAddress(); - options.Topic = Topic; - options.GroupId = "test-app"; + consumer.BootstrapServers = _container.GetBootstrapAddress(); + // This is the default value, from the name parameter above + // consumer.GroupId = "test-consumer"; + consumer.Topic = Topic; // Otherwise the client attaches to the end of the topic, skipping all the published messages - options.AutoOffsetReset = AutoOffsetReset.Earliest; + consumer.AutoOffsetReset = AutoOffsetReset.Earliest; + }) + .Configure(pipeline => + { + pipeline.MaxConcurrency = 2; }) .ValidateDataAnnotations()); diff --git a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj index 2c91812..7df79fe 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj +++ b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj @@ -13,8 +13,9 @@ + - + diff --git a/tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs similarity index 71% rename from tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs rename to tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs index 0acb613..cf96115 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/RpBuilder.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs @@ -7,9 +7,9 @@ namespace LocalPost.KafkaConsumer.Tests; // See also https://github.com/testcontainers/testcontainers-dotnet/blob/develop/src/Testcontainers.Kafka/KafkaBuilder.cs -public sealed class RpBuilder : ContainerBuilder +public sealed class RedpandaBuilder : ContainerBuilder { - public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.1.5"; + public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.1.7"; public const ushort KafkaPort = 9092; public const ushort KafkaAdminPort = 9644; @@ -18,34 +18,34 @@ public sealed class RpBuilder : ContainerBuilder resourceConfiguration) => + protected override RedpandaBuilder Clone(IResourceConfiguration resourceConfiguration) => Merge(DockerResourceConfiguration, new ContainerConfiguration(resourceConfiguration)); - protected override RpBuilder Clone(IContainerConfiguration resourceConfiguration) => + protected override RedpandaBuilder Clone(IContainerConfiguration resourceConfiguration) => Merge(DockerResourceConfiguration, new ContainerConfiguration(resourceConfiguration)); - protected override RpBuilder Merge(ContainerConfiguration oldValue, ContainerConfiguration newValue) => + protected override RedpandaBuilder Merge(ContainerConfiguration oldValue, ContainerConfiguration newValue) => new(new ContainerConfiguration(oldValue, newValue)); protected override ContainerConfiguration DockerResourceConfiguration { get; } - public override RpContainer Build() + public override RedpandaContainer Build() { Validate(); - return new RpContainer(DockerResourceConfiguration); + return new RedpandaContainer(DockerResourceConfiguration); } - protected override RpBuilder Init() + protected override RedpandaBuilder Init() { return base.Init() .WithImage(RedpandaImage) @@ -78,12 +78,20 @@ protected override RpBuilder Init() -public sealed class RpContainer(IContainerConfiguration configuration) : DockerContainer(configuration) +public sealed class RedpandaContainer(IContainerConfiguration configuration) : DockerContainer(configuration) { + public override async Task StartAsync(CancellationToken ct = default) + { + await base.StartAsync(ct); + + // Dirty fix, but otherwise the client just fails with strange errors + await Task.Delay(3_000, ct); + } + public string GetSchemaRegistryAddress() => - new UriBuilder(Uri.UriSchemeHttp, Hostname, GetMappedPublicPort(RpBuilder.SchemaRegistryPort)).ToString(); + new UriBuilder(Uri.UriSchemeHttp, Hostname, GetMappedPublicPort(RedpandaBuilder.SchemaRegistryPort)).ToString(); public string GetBootstrapAddress() => // new UriBuilder("PLAINTEXT", Hostname, GetMappedPublicPort(RpBuilder.KafkaPort)).ToString(); - $"{Hostname}:{GetMappedPublicPort(RpBuilder.KafkaPort)}"; + $"{Hostname}:{GetMappedPublicPort(RedpandaBuilder.KafkaPort)}"; } diff --git a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs index c4ba8f0..e5356c9 100644 --- a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs @@ -20,18 +20,18 @@ public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime private const string QueueName = "weather-forecasts"; - private IAmazonSQS _sqs; - private string? _queueUrl; + private IAmazonSQS CreateClient() => + new AmazonSQSClient(new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any"), + new AmazonSQSConfig { ServiceURL = _container.GetConnectionString() }); + public async Task InitializeAsync() { await _container.StartAsync(); - _sqs = new AmazonSQSClient(new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any"), - new AmazonSQSConfig { ServiceURL = _container.GetConnectionString() }); - - var createResponse = await _sqs.CreateQueueAsync(QueueName); + var sqs = CreateClient(); + var createResponse = await sqs.CreateQueueAsync(QueueName); _queueUrl = createResponse.QueueUrl; } @@ -64,9 +64,10 @@ public async Task handles_messages() await host.StartAsync(); - await _sqs.SendMessageAsync(_queueUrl, "It will rainy in London tomorrow"); + var sqs = CreateClient(); + await sqs.SendMessageAsync(_queueUrl, "It will rainy in London tomorrow"); - await Task.Delay(1_000); + await Task.Delay(1_000); // "App is working" received.Should().HaveCount(1); received[0].Should().Be("It will rainy in London tomorrow"); diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index 75866c0..b1a9237 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -13,6 +13,7 @@ + diff --git a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs index e89ed27..a78b3ff 100644 --- a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs @@ -16,7 +16,7 @@ internal async Task batches() SingleWriter = false }); var results = source.Reader.ReadAllAsync().Batch( - (ct) => new BoundedBatchBuilder(10, TimeSpan.FromSeconds(2))); + () => new BoundedBatchBuilder(10, TimeSpan.FromSeconds(2))); async Task Produce() { @@ -35,8 +35,8 @@ async Task Produce() async Task Consume() { var expect = new Queue(); - expect.Enqueue(new[] { 1, 2, 3 }); - expect.Enqueue(new[] { 4, 5 }); + expect.Enqueue([1, 2, 3]); + expect.Enqueue([4, 5]); await foreach (var batch in results) { batch.Should().ContainInOrder(expect.Dequeue()); From 022a84c7e266689cfffa97c8756ae2cda5c45577 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 24 Jun 2024 16:52:23 +0000 Subject: [PATCH 16/33] public to internal --- .../DependencyInjection/KafkaBuilder.cs | 2 +- .../LocalPost.KafkaConsumer.csproj | 2 +- .../DependencyInjection/SqsBuilder.cs | 2 +- .../LocalPost.SqsConsumer.csproj | 2 +- .../BackgroundQueuesBuilder.cs | 2 +- src/LocalPost/Handler.cs | 16 +++++---------- src/LocalPost/HandlerStack.cs | 6 +++--- src/LocalPost/LocalPost.csproj | 20 ++++++++++--------- 8 files changed, 24 insertions(+), 28 deletions(-) diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 757f0e7..2bb981b 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -57,7 +57,7 @@ public OptionsBuilder AddConsumer(string name, HandlerFa }); } - public OptionsBuilder Add(string name, PipelineRegistration> pr) + internal OptionsBuilder Add(string name, PipelineRegistration> pr) { if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... throw new ArgumentException("A proper (non empty) name is required", nameof(name)); diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index cac8a00..835d1f1 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -45,7 +45,7 @@ - + diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index 74ec0c8..bcb7e3c 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -64,7 +64,7 @@ public OptionsBuilder AddConsumer(string name, HandlerFa /// Consumer name (also the default queue name). Should be unique in the application. /// Pipeline registration. /// Consumer options builder. - public OptionsBuilder Add(string name, PipelineRegistration> pr) + internal OptionsBuilder Add(string name, PipelineRegistration> pr) { if (string.IsNullOrEmpty(name)) throw new ArgumentException("A proper (non empty) name is required", nameof(name)); diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index 492fb31..4e4da7e 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -45,7 +45,7 @@ - + diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 3b42e7a..5d0c4d1 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -58,7 +58,7 @@ public OptionsBuilder> AddQueue(HandlerFactory(); } - public OptionsBuilder> Add(PipelineRegistration> pr) + internal OptionsBuilder> Add(PipelineRegistration> pr) { if (!services.TryAddSingletonAlias, BackgroundQueue>()) // return ob; // Already added, don't register twice diff --git a/src/LocalPost/Handler.cs b/src/LocalPost/Handler.cs index 7a58e78..bd291f7 100644 --- a/src/LocalPost/Handler.cs +++ b/src/LocalPost/Handler.cs @@ -3,24 +3,18 @@ namespace LocalPost; -// Because C# does not support sum types... -// public interface IServiceCollectionFor : IServiceCollection -// { -// string Target { get; } -// } - -public readonly record struct RegistrationContext(IServiceCollection Services, AssistedService Target); +internal readonly record struct RegistrationContext(IServiceCollection Services, AssistedService Target); // TODO Make internal -public delegate Task StreamProcessor(IAsyncEnumerable stream, CancellationToken ct); +internal delegate Task StreamProcessor(IAsyncEnumerable stream, CancellationToken ct); -public delegate IAsyncEnumerable PipelineFactory(IServiceProvider provider); +internal delegate IAsyncEnumerable PipelineFactory(IServiceProvider provider); -public delegate void PipelineRegistration(RegistrationContext services, PipelineFactory source); +internal delegate void PipelineRegistration(RegistrationContext services, PipelineFactory source); -public delegate IAsyncEnumerable PipelineMiddleware(IAsyncEnumerable source, +internal delegate IAsyncEnumerable PipelineMiddleware(IAsyncEnumerable source, CancellationToken ct = default); diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index 0896658..c966701 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -91,11 +91,11 @@ async Task Handle(T message) } } - public static PipelineRegistration Create(HandlerFactory hf, + internal static PipelineRegistration Create(HandlerFactory hf, ushort maxConcurrency, bool breakOnException) => Create(hf, _ => new ConsumerOptions(maxConcurrency, breakOnException)); - public static PipelineRegistration Create(HandlerFactory hf, + internal static PipelineRegistration Create(HandlerFactory hf, Func config) => (context, pf) => context.Services.AddBackgroundService(provider => { @@ -115,7 +115,7 @@ public static PipelineRegistration Create(HandlerFactory hf, } [PublicAPI] -public static class PipelineOps +internal static class PipelineOps { public static PipelineRegistration Where(this PipelineRegistration next, Func pred) diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index ef3373a..6b2a1c4 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -42,19 +42,21 @@ - - - - - - + + + + + + + + + - - - + + From 06c7bd40790aebffec82d73218b27fbc95bc5020 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Thu, 27 Jun 2024 10:56:55 +0000 Subject: [PATCH 17/33] .NET 8 keyed services --- .../DependencyInjection/INamedService.cs | 50 +++++++++---------- .../ServiceCollectionTools.cs | 6 +-- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/LocalPost/DependencyInjection/INamedService.cs b/src/LocalPost/DependencyInjection/INamedService.cs index d8cc554..d54f873 100644 --- a/src/LocalPost/DependencyInjection/INamedService.cs +++ b/src/LocalPost/DependencyInjection/INamedService.cs @@ -7,28 +7,28 @@ internal interface INamedService string Name { get; } } -internal sealed class NamedServiceDescriptor : ServiceDescriptor -{ - public static NamedServiceDescriptor Singleton(string name, Func iFactory) - where TService : class, INamedService => - new(typeof(TService), name, iFactory, ServiceLifetime.Singleton); - - public string Name { get; init; } - - public NamedServiceDescriptor(Type sType, string name, Type iType, ServiceLifetime lifetime) : - base(sType, iType, lifetime) - { - Name = name; - } - - public NamedServiceDescriptor(Type sType, string name, object instance) : base(sType, instance) - { - Name = name; - } - - public NamedServiceDescriptor(Type sType, string name, Func factory, - ServiceLifetime lifetime) : base(sType, factory, lifetime) - { - Name = name; - } -} +// internal sealed class NamedServiceDescriptor : ServiceDescriptor +// { +// public static NamedServiceDescriptor Singleton(string name, Func iFactory) +// where TService : class, INamedService => +// new(typeof(TService), name, iFactory, ServiceLifetime.Singleton); +// +// public string Name { get; init; } +// +// public NamedServiceDescriptor(Type sType, string name, Type iType, ServiceLifetime lifetime) : +// base(sType, iType, lifetime) +// { +// Name = name; +// } +// +// public NamedServiceDescriptor(Type sType, string name, object instance) : base(sType, instance) +// { +// Name = name; +// } +// +// public NamedServiceDescriptor(Type sType, string name, Func factory, +// ServiceLifetime lifetime) : base(sType, factory, lifetime) +// { +// Name = name; +// } +// } diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs index d2fb785..466a823 100644 --- a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs @@ -80,7 +80,7 @@ public static IServiceCollection AddConcurrentHostedServices(this IServiceCollec public static bool TryAddNamedSingleton(this IServiceCollection services, string name, Func factory) where TService : class, INamedService => - services.TryAdd(NamedServiceDescriptor.Singleton(name, factory)); + services.TryAdd(ServiceDescriptor.KeyedSingleton(name, factory)); public static bool TryAddSingleton(this IServiceCollection services) where TService : class => services.TryAdd(ServiceDescriptor.Singleton()); @@ -104,8 +104,8 @@ public static bool TryAdd(this IServiceCollection services, ServiceDescriptor de static bool IsEqual(ServiceDescriptor a, ServiceDescriptor b) { var equal = a.ServiceType == b.ServiceType; // && a.Lifetime == b.Lifetime; - if (equal && a is NamedServiceDescriptor namedA && b is NamedServiceDescriptor namedB) - return namedA.Name == namedB.Name; + if (equal && a is { IsKeyedService: true } && b is { IsKeyedService: true }) + return a.ServiceKey == b.ServiceKey; return equal; } From 9fff5f8f920c678fe7b8ee74c2d20bf1c0c7cf7b Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Fri, 28 Jun 2024 08:19:38 +0000 Subject: [PATCH 18/33] chore: deps cleanup (after .NET 6 upgrade) --- .../LocalPost.KafkaConsumer.csproj | 2 +- src/LocalPost.Polly/LocalPost.Polly.csproj | 2 +- .../LocalPost.SqsConsumer.csproj | 2 +- .../AsyncEnumerable/AsyncEnumerableMerger.cs | 2 +- .../AsyncEnumerable/AsyncEnumeratorEx.cs | 2 -- src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 12 ------- .../AsyncEnumerable/ConcurrentSet.cs | 20 ++---------- src/LocalPost/ConcurrentHostedServices.cs | 3 -- src/LocalPost/LocalPost.csproj | 3 +- src/LocalPost/Polyfills.cs | 31 ------------------- .../BatchingAsyncEnumerableTests.cs | 2 -- 11 files changed, 7 insertions(+), 74 deletions(-) delete mode 100644 src/LocalPost/Polyfills.cs diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index 835d1f1..82fc57c 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -1,7 +1,7 @@ - netstandard2.0;net6.0;net8.0 + net6.0;net8.0 true false diff --git a/src/LocalPost.Polly/LocalPost.Polly.csproj b/src/LocalPost.Polly/LocalPost.Polly.csproj index ffc50c5..6e182a0 100644 --- a/src/LocalPost.Polly/LocalPost.Polly.csproj +++ b/src/LocalPost.Polly/LocalPost.Polly.csproj @@ -1,7 +1,7 @@ - netstandard2.0;net6.0;net8.0 + net6.0;net8.0 true false diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index 4e4da7e..a6d6685 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -1,7 +1,7 @@ - netstandard2.0;net6.0;net8.0 + net6.0;net8.0 true false diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs index 4c7ef21..a4c0747 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs @@ -41,7 +41,7 @@ public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = defau while (waits.Length > 0) { - var modificationTrigger = _sources.Modification; + var modificationTrigger = Task.Delay(Timeout.Infinite, _sources.ModificationToken); var waitTrigger = Task.WhenAny(waits); await Task.WhenAny(waitTrigger, modificationTrigger); diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs index 5b65a00..dcc820b 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs @@ -1,5 +1,3 @@ -using Nito.AsyncEx; - namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumeratorEx diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs index 4547274..52e4036 100644 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs @@ -1,6 +1,3 @@ -using System.Collections.Immutable; -using Nito.AsyncEx; - namespace LocalPost.AsyncEnumerable; internal delegate IBatchBuilder BatchBuilderFactory(); @@ -8,7 +5,6 @@ namespace LocalPost.AsyncEnumerable; internal interface IBatchBuilder : IDisposable { CancellationToken TimeWindow { get; } - Task TimeWindowTrigger { get; } bool IsEmpty { get; } bool Full { get; } @@ -25,7 +21,6 @@ internal abstract class BatchBuilderBase : IBatchBuilder private readonly TimeSpan _timeWindowDuration; private CancellationTokenSource _timeWindow; - private CancellationTokenTaskSource? _timeWindowTrigger; protected BatchBuilderBase(TimeSpan timeWindowDuration) { @@ -34,9 +29,6 @@ protected BatchBuilderBase(TimeSpan timeWindowDuration) } public CancellationToken TimeWindow => _timeWindow.Token; - public bool TimeWindowClosed => TimeWindow.IsCancellationRequested; - public Task TimeWindowTrigger => - (_timeWindowTrigger ??= new CancellationTokenTaskSource(_timeWindow.Token)).Task; public abstract bool IsEmpty { get; } public abstract bool Full { get; } @@ -53,9 +45,6 @@ public virtual void Reset() _timeWindow.Cancel(); _timeWindow.Dispose(); _timeWindow = StartTimeWindow(); - - _timeWindowTrigger?.Dispose(); - _timeWindowTrigger = null; } public TBatch Flush() @@ -68,7 +57,6 @@ public TBatch Flush() public virtual void Dispose() { _timeWindow.Dispose(); - _timeWindowTrigger?.Dispose(); } } diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs b/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs index 59bf0c0..d6bd370 100644 --- a/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs +++ b/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs @@ -1,33 +1,22 @@ using System.Collections; using System.Collections.Immutable; -using Nito.AsyncEx; namespace LocalPost.AsyncEnumerable; -internal sealed class ConcurrentSet : IEnumerable, IDisposable +internal sealed class ConcurrentSet(IEnumerable sources) : IEnumerable, IDisposable { private readonly object _modificationLock = new(); - private ImmutableHashSet? _elements; + private ImmutableHashSet? _elements = sources.ToImmutableHashSet(); private CancellationTokenSource _modificationTriggerSource = new(); - private CancellationTokenTaskSource _modificationTriggerTaskSource; - - public ConcurrentSet(IEnumerable sources) - { - _modificationTriggerTaskSource = new CancellationTokenTaskSource(_modificationTriggerSource.Token); - _elements = sources.ToImmutableHashSet(); - } public ImmutableHashSet Elements => _elements ?? throw new ObjectDisposedException(nameof(ConcurrentSet)); public CancellationToken ModificationToken => _modificationTriggerSource.Token; - public Task Modification => _modificationTriggerTaskSource.Task; - private ImmutableHashSet ChangeSources(Func, ImmutableHashSet> change) { ImmutableHashSet changedSources; CancellationTokenSource trigger; - CancellationTokenTaskSource triggerTask; lock (_modificationLock) { changedSources = change(Elements); @@ -36,14 +25,10 @@ private ImmutableHashSet ChangeSources(Func, ImmutableHas _elements = changedSources; trigger = _modificationTriggerSource; - triggerTask = _modificationTriggerTaskSource; _modificationTriggerSource = new CancellationTokenSource(); - _modificationTriggerTaskSource = new CancellationTokenTaskSource(_modificationTriggerSource.Token); } trigger.Cancel(); // Notify about the modification - - triggerTask.Dispose(); trigger.Dispose(); return changedSources; @@ -60,7 +45,6 @@ private ImmutableHashSet ChangeSources(Func, ImmutableHas public void Dispose() { _modificationTriggerSource.Dispose(); - _modificationTriggerTaskSource.Dispose(); _elements = null; } } diff --git a/src/LocalPost/ConcurrentHostedServices.cs b/src/LocalPost/ConcurrentHostedServices.cs index a5d4ea3..b62e157 100644 --- a/src/LocalPost/ConcurrentHostedServices.cs +++ b/src/LocalPost/ConcurrentHostedServices.cs @@ -1,10 +1,7 @@ -using System.Collections; using System.Collections.Immutable; using System.Diagnostics.CodeAnalysis; -using LocalPost.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; -using Nito.AsyncEx; using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; namespace LocalPost; diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index 6b2a1c4..a4240b9 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -1,7 +1,7 @@ - netstandard2.0;net6.0;net8.0 + net6.0;net8.0 true LocalPost @@ -51,7 +51,6 @@ - diff --git a/src/LocalPost/Polyfills.cs b/src/LocalPost/Polyfills.cs deleted file mode 100644 index f4dfccb..0000000 --- a/src/LocalPost/Polyfills.cs +++ /dev/null @@ -1,31 +0,0 @@ -using System.Diagnostics.CodeAnalysis; -using System.Runtime.CompilerServices; -using System.Threading.Channels; - -namespace LocalPost; - -internal static class ChannelReaderEx -{ - // netstandard2.0 does not contain this overload, it's available only from netstandard2.1 (.NET Core 3.0+) - public static async IAsyncEnumerable ReadAllAsync(this ChannelReader reader, - [EnumeratorCancellation] CancellationToken cancellationToken = default) - { - while (await reader.WaitToReadAsync(cancellationToken).ConfigureAwait(false)) - while (reader.TryRead(out var item)) - yield return item; - } -} - -internal static class EnumerableEx -{ - // Can be removed on .NET 6+, see https://stackoverflow.com/a/6362642/322079 - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public static IEnumerable> Chunk(this IEnumerable source, ushort size) - { - while (source.Any()) - { - yield return source.Take(size); - source = source.Skip(size); - } - } -} diff --git a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs index a78b3ff..f8cc65a 100644 --- a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs @@ -1,7 +1,5 @@ using System.Threading.Channels; -using FluentAssertions; using LocalPost.AsyncEnumerable; -using Nito.AsyncEx; namespace LocalPost.Tests.AsyncEnumerable; From 386c64c4e073e4571b53d92bb1eddcace2987491 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 16 Sep 2024 20:11:28 +0000 Subject: [PATCH 19/33] WIP --- .github/workflows/publish.yaml | 4 +-- .github/workflows/qa.yml | 12 ++++---- LocalPost.sln | 9 +++++- README.md | 30 ++++++++++++++++--- .../BackgroundQueueApp.csproj | 2 +- samples/BackgroundQueueApp/Program.cs | 2 +- .../HandlerStackEx.cs | 2 +- .../LocalPost.Resilience.csproj} | 2 +- .../README.md | 0 9 files changed, 46 insertions(+), 17 deletions(-) rename src/{LocalPost.Polly => LocalPost.Resilience}/HandlerStackEx.cs (95%) rename src/{LocalPost.Polly/LocalPost.Polly.csproj => LocalPost.Resilience/LocalPost.Resilience.csproj} (98%) rename src/{LocalPost.Polly => LocalPost.Resilience}/README.md (100%) diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index 674b0ba..b59b95d 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -13,7 +13,7 @@ jobs: with: # Required for GitVersion fetch-depth: 0 - - uses: actions/setup-dotnet@v3 + - uses: actions/setup-dotnet@v4 with: dotnet-version: | 6.0.x @@ -33,7 +33,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-dotnet@v3 with: - dotnet-version: 7.0.x + dotnet-version: 8.0.x - run: dotnet pack -c Release - name: Publish run: | diff --git a/.github/workflows/qa.yml b/.github/workflows/qa.yml index 1dc50b6..fac316e 100644 --- a/.github/workflows/qa.yml +++ b/.github/workflows/qa.yml @@ -9,11 +9,11 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: # Full history is needed to get a proper list of changed files fetch-depth: 0 - - uses: github/super-linter@v4 + - uses: github/super-linter@v7 env: DEFAULT_BRANCH: main GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -29,15 +29,15 @@ jobs: env: DOTNET_NOLOGO: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: # Disabling shallow clone is recommended by SonarCloud for improving relevancy of reporting fetch-depth: 0 - - uses: actions/setup-java@v3 + - uses: actions/setup-java@v4 with: distribution: temurin - java-version: 17 - - uses: actions/setup-dotnet@v3 + java-version: 21 + - uses: actions/setup-dotnet@v4 with: dotnet-version: | 6.0.x diff --git a/LocalPost.sln b/LocalPost.sln index 7cc06f2..325365c 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -28,7 +28,7 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SqsConsumerApp", "samples\SqsConsumerApp\SqsConsumerApp.csproj", "{2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Polly", "src\LocalPost.Polly\LocalPost.Polly.csproj", "{EA69FF51-BEF7-415C-836A-BB5432206F7E}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Resilience", "src\LocalPost.Resilience\LocalPost.Resilience.csproj", "{EA69FF51-BEF7-415C-836A-BB5432206F7E}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "RabbitMqConsumerApp", "samples\RabbitMqConsumerApp\RabbitMqConsumerApp.csproj", "{F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}" EndProject @@ -38,6 +38,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.RabbitMqConsumer. EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.NatsConsumer", "src\LocalPost.NatsConsumer\LocalPost.NatsConsumer.csproj", "{05A771C9-0987-484A-8A7F-B6B1180F55F9}" EndProject +Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "BackgroundQueueApp.FSharp", "samples\BackgroundQueueApp.FSharp\BackgroundQueueApp.FSharp.fsproj", "{79CF7EFF-860D-464F-B59A-55E48D25D70C}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -116,6 +118,10 @@ Global {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.Build.0 = Debug|Any CPU {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.ActiveCfg = Release|Any CPU {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.Build.0 = Release|Any CPU + {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Debug|Any CPU.Build.0 = Debug|Any CPU + {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.ActiveCfg = Release|Any CPU + {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} @@ -123,5 +129,6 @@ Global {C310487A-B976-4D3E-80AF-4ADBE1C63139} = {405721DC-F290-4191-B638-9907D5EB042B} {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD} = {405721DC-F290-4191-B638-9907D5EB042B} {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA} = {405721DC-F290-4191-B638-9907D5EB042B} + {79CF7EFF-860D-464F-B59A-55E48D25D70C} = {405721DC-F290-4191-B638-9907D5EB042B} EndGlobalSection EndGlobal diff --git a/README.md b/README.md index 04482eb..e21f553 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,18 @@ There are multiple ways to run background tasks in .NET. The most common are: ### Installation +For the core library: + +```shell +dotnet add package LocalPost +``` + +AWS SQS, Kafka and other integrations are provided as separate packages: + +```shell +dotnet add package LocalPost.KafkaConsumer +``` + ### .NET 8 asynchronous background services handling Before version 8 .NET runtime handled start/stop of the services only synchronously, but now it is possible to enable @@ -20,8 +32,18 @@ See for details: - https://github.com/dotnet/runtime/blob/v8.0.0/src/libraries/Microsoft.Extensions.Hosting/src/Internal/Host.cs - https://github.com/dotnet/runtime/blob/main/src/libraries/Microsoft.Extensions.Hosting/src/HostOptions.cs -## Similar projects / Inspiration +## Similar projects + +- [Coravel queue](https://docs.coravel.net/Queuing/) — a simple job queue + +More complex jobs management / scheduling: +- [Hangfire](https://www.hangfire.io/) — background job scheduler. Supports advanced scheduling, persistence and jobs distribution across multiple workers. + +Service bus (for bigger solutions): +- [JustSaying](https://github.com/justeattakeaway/JustSaying) +- [NServiceBus](https://docs.particular.net/nservicebus/) +- [MassTransit](https://masstransit.io/) + +## Inspiration -- [FastStream](https://github.com/airtai/faststream) — Python framework with almost the same concept -- [Coravel queue](https://docs.coravel.net/Queuing/)/event broadcasting — only invocable queueing, event broadcasting is different from consuming a queue -- [Hangfire](https://www.hangfire.io/) — for persistent queues (means payload serialisation), LocalPost is completely about in-memory ones +- [FastStream](https://github.com/airtai/faststream) — Python framework with similar goals diff --git a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj index 48c18d7..4440646 100644 --- a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj +++ b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj @@ -14,7 +14,7 @@ - + diff --git a/samples/BackgroundQueueApp/Program.cs b/samples/BackgroundQueueApp/Program.cs index f65c3a6..14044f4 100644 --- a/samples/BackgroundQueueApp/Program.cs +++ b/samples/BackgroundQueueApp/Program.cs @@ -2,7 +2,7 @@ using LocalPost; using LocalPost.BackgroundQueue; using LocalPost.BackgroundQueue.DependencyInjection; -using LocalPost.Polly; +using LocalPost.Resilience; using Polly; using Polly.Retry; diff --git a/src/LocalPost.Polly/HandlerStackEx.cs b/src/LocalPost.Resilience/HandlerStackEx.cs similarity index 95% rename from src/LocalPost.Polly/HandlerStackEx.cs rename to src/LocalPost.Resilience/HandlerStackEx.cs index 9c5fa91..cc91907 100644 --- a/src/LocalPost.Polly/HandlerStackEx.cs +++ b/src/LocalPost.Resilience/HandlerStackEx.cs @@ -1,7 +1,7 @@ using JetBrains.Annotations; using Polly; -namespace LocalPost.Polly; +namespace LocalPost.Resilience; [PublicAPI] public static class HandlerStackEx diff --git a/src/LocalPost.Polly/LocalPost.Polly.csproj b/src/LocalPost.Resilience/LocalPost.Resilience.csproj similarity index 98% rename from src/LocalPost.Polly/LocalPost.Polly.csproj rename to src/LocalPost.Resilience/LocalPost.Resilience.csproj index 6e182a0..68ee3b2 100644 --- a/src/LocalPost.Polly/LocalPost.Polly.csproj +++ b/src/LocalPost.Resilience/LocalPost.Resilience.csproj @@ -6,7 +6,7 @@ false - LocalPost.Polly + LocalPost.Resilience Alexey Shokov Polly integration for LocalPost https://github.com/alexeyshockov/LocalPost/v$(Version) diff --git a/src/LocalPost.Polly/README.md b/src/LocalPost.Resilience/README.md similarity index 100% rename from src/LocalPost.Polly/README.md rename to src/LocalPost.Resilience/README.md From a13f84def82da271ed604398baed4736a96cb8bd Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 30 Dec 2024 12:24:02 +0000 Subject: [PATCH 20/33] WIP --- samples/KafkaConsumerApp/Program.cs | 28 ----- samples/SqsConsumerApp/Program.cs | 46 ++++---- .../DependencyInjection/KafkaBuilder.cs | 3 +- .../DependencyInjection/SqsBuilder.cs | 3 +- .../AsyncEnumerable/AsyncEnumerableEx.cs | 10 +- .../AsyncEnumerable/AsyncEnumeratorEx.cs | 24 ---- src/LocalPost/AsyncEnumerable/BatchBuilder.cs | 105 ------------------ .../BatchingAsyncEnumerable.cs | 51 --------- .../AsyncEnumerable/ConcurrentBuffer.cs | 37 ------ .../BackgroundQueuesBuilder.cs | 3 +- src/LocalPost/HandlerStack.cs | 7 +- src/LocalPost/LocalPost.csproj | 7 +- .../AsyncEnumerableMergerTests.cs | 10 +- .../BatchingAsyncEnumerableTests.cs | 10 +- 14 files changed, 52 insertions(+), 292 deletions(-) delete mode 100644 src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs delete mode 100644 src/LocalPost/AsyncEnumerable/BatchBuilder.cs delete mode 100644 src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs delete mode 100644 src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index 810868d..8f1af4c 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -31,34 +31,6 @@ // TODO Health + Supervisor var host = builder.Build(); -// using (var producer = new ProducerBuilder(new ProducerConfig -// { -// BootstrapServers = "127.0.0.1:19092" -// }).Build()) -// { -// // Redpanda: by default, topic is created automatically on the first message -// await producer.ProduceAsync("weather-forecasts", new Message -// { -// Key = "London", -// Value = JsonSerializer.Serialize(new WeatherForecast(25, 77, "Sunny")) -// }); -// await producer.ProduceAsync("weather-forecasts", new Message -// { -// Key = "Paris", -// Value = JsonSerializer.Serialize(new WeatherForecast(18, 64, "Rainy")) -// }); -// await producer.ProduceAsync("weather-forecasts", new Message -// { -// Key = "Toronto", -// Value = JsonSerializer.Serialize(new WeatherForecast(22, 72, "Cloudy")) -// }); -// await producer.ProduceAsync("weather-forecasts", new Message -// { -// Key = "Berlin", -// Value = JsonSerializer.Serialize(new WeatherForecast(20, 68, "Sunny")) -// }); -// } - await host.RunAsync(); diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index 51222c9..e90e508 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -19,29 +19,29 @@ .AddScoped() .AddSqsConsumers(sqs => { - // sqs.Defaults.Configure(options => options.MaxConcurrency = 100); - // sqs.AddConsumer("weather-forecasts", - // HandlerStack.From() - // .UseSqsPayload() - // .DeserializeJson() - // .Acknowledge() - // .Scoped() - // .LogFingersCrossed() - // .Trace()); - // sqs.Defaults.Configure(options => options.MaxConcurrency = 100); - // sqs.AddConsumer("weather-forecasts", - // Pipeline.Create( - // HandlerStack.From() - // .UseSqsPayload() - // .DeserializeJson() - // .Acknowledge() - // .Scoped() - // .LogFingersCrossed() - // .Trace(), - // maxConcurrency: 100, - // breakOnException: false - // ).Buffer(100) - // ); + sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + sqs.AddConsumer("weather-forecasts", + HandlerStack.From() + .UseSqsPayload() + .DeserializeJson() + .Acknowledge() + .Scoped() + .LogFingersCrossed() + .Trace()); + sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + sqs.AddConsumer("weather-forecasts", + Pipeline.Create( + HandlerStack.From() + .UseSqsPayload() + .DeserializeJson() + .Acknowledge() + .Scoped() + .LogFingersCrossed() + .Trace(), + maxConcurrency: 100, + breakOnException: false + ).Buffer(100) + ); }); // TODO Health + Supervisor diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 2bb981b..bcfc19e 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -1,3 +1,4 @@ +using System.Collections.Immutable; using Confluent.Kafka; using JetBrains.Annotations; using LocalPost.DependencyInjection; @@ -18,7 +19,7 @@ public sealed class KafkaBuilder(IServiceCollection services) /// Handler factory. /// Pipeline options builder. public OptionsBuilder AddBatchConsumer(string name, - HandlerFactory>> hf) + HandlerFactory>> hf) { var defaultPipeline = Pipeline .Create(hf, provider => provider.GetOptions(name)) diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index bcb7e3c..0994e15 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -1,3 +1,4 @@ +using System.Collections.Immutable; using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; @@ -17,7 +18,7 @@ public sealed class SqsBuilder(IServiceCollection services) /// Handler factory. /// Pipeline options builder. public OptionsBuilder AddBatchConsumer(string name, - HandlerFactory>> hf) + HandlerFactory>> hf) { var defaultPipeline = Pipeline .Create(hf, provider => provider.GetOptions(name)) diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs index ac630f0..889f1b5 100644 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs +++ b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs @@ -1,12 +1,14 @@ +using System.Collections.Immutable; + namespace LocalPost.AsyncEnumerable; internal static class AsyncEnumerableEx { - // public static ConcurrentBuffer ToConcurrentBuffer(this IAsyncEnumerable source, int maxSize = 1) => - // new(source, maxSize); + public static IAsyncEnumerable> Batch(this IAsyncEnumerable source, + int maxSize, TimeSpan timeWindow) => new BatchingAsyncEnumerable(source, maxSize, timeWindow); - public static IAsyncEnumerable Batch(this IAsyncEnumerable source, - BatchBuilderFactory factory) => new BatchingAsyncEnumerable(source, factory); + public static IAsyncEnumerable> Batch(this IAsyncEnumerable source, + int maxSize, int timeWindowMs) => Batch(source, maxSize, TimeSpan.FromMilliseconds(timeWindowMs)); public static IAsyncEnumerable Merge(this IEnumerable> sources) => new AsyncEnumerableMerger(sources); diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs deleted file mode 100644 index dcc820b..0000000 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumeratorEx.cs +++ /dev/null @@ -1,24 +0,0 @@ -namespace LocalPost.AsyncEnumerable; - -internal static class AsyncEnumeratorEx -{ - public static async ValueTask Consume(this IAsyncEnumerator source, CancellationToken ct = default) - { - var waitTrigger = source.MoveNextAsync(); - var completed = waitTrigger.IsCompleted switch - { - true => await waitTrigger, - // TODO WaitAsync() from .NET 6+ - _ => await waitTrigger.AsTask().WaitAsync(ct) - }; - - if (completed) - // Ideally there should be a better way to communicate the completion... - // But because it is usually used for long-running enumerators, fine - throw new EndOfEnumeratorException("Source is empty"); - - return source.Current; - } -} - -internal sealed class EndOfEnumeratorException(string message) : Exception(message); diff --git a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs b/src/LocalPost/AsyncEnumerable/BatchBuilder.cs deleted file mode 100644 index 52e4036..0000000 --- a/src/LocalPost/AsyncEnumerable/BatchBuilder.cs +++ /dev/null @@ -1,105 +0,0 @@ -namespace LocalPost.AsyncEnumerable; - -internal delegate IBatchBuilder BatchBuilderFactory(); - -internal interface IBatchBuilder : IDisposable -{ - CancellationToken TimeWindow { get; } - - bool IsEmpty { get; } - bool Full { get; } - - bool TryAdd(T entry); - - TBatch Build(); - void Reset(); - TBatch Flush(); -} - -internal abstract class BatchBuilderBase : IBatchBuilder -{ - private readonly TimeSpan _timeWindowDuration; - - private CancellationTokenSource _timeWindow; - - protected BatchBuilderBase(TimeSpan timeWindowDuration) - { - _timeWindowDuration = timeWindowDuration; - _timeWindow = StartTimeWindow(); - } - - public CancellationToken TimeWindow => _timeWindow.Token; - - public abstract bool IsEmpty { get; } - public abstract bool Full { get; } - - public abstract bool TryAdd(T entry); - - public abstract TBatch Build(); - - private CancellationTokenSource StartTimeWindow() => new(_timeWindowDuration); - - // Should be overwritten in derived classes, to reset their state also - public virtual void Reset() - { - _timeWindow.Cancel(); - _timeWindow.Dispose(); - _timeWindow = StartTimeWindow(); - } - - public TBatch Flush() - { - var batch = Build(); - Reset(); - return batch; - } - - public virtual void Dispose() - { - _timeWindow.Dispose(); - } -} - -internal abstract class BoundedBatchBuilderBase : BatchBuilderBase -{ - private readonly int _batchMaxSize; - protected List Batch; - - protected BoundedBatchBuilderBase(MaxSize batchMaxSize, TimeSpan timeWindowDuration) : - base(timeWindowDuration) - { - _batchMaxSize = batchMaxSize; - Batch = new List(_batchMaxSize); - } - - public override bool IsEmpty => Batch.Count == 0; - - public override bool Full => Batch.Count >= _batchMaxSize; - - public override bool TryAdd(T entry) - { - if (Full) - return false; - - Batch.Add(entry); - - return true; - } - - public override void Reset() - { - base.Reset(); - Batch = new List(_batchMaxSize); - } -} - -internal sealed class BoundedBatchBuilder(MaxSize batchMaxSize, TimeSpan timeWindowDuration) - : BoundedBatchBuilderBase>(batchMaxSize, timeWindowDuration) -{ - public BoundedBatchBuilder(MaxSize batchMaxSize, int timeWindowDuration) - : this(batchMaxSize, TimeSpan.FromMilliseconds(timeWindowDuration)) - { - } - - public override IReadOnlyCollection Build() => Batch; // ImmutableArray or something?.. -} diff --git a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs b/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs deleted file mode 100644 index 8a07045..0000000 --- a/src/LocalPost/AsyncEnumerable/BatchingAsyncEnumerable.cs +++ /dev/null @@ -1,51 +0,0 @@ -namespace LocalPost.AsyncEnumerable; - -internal sealed class BatchingAsyncEnumerable( - IAsyncEnumerable reader, BatchBuilderFactory factory) : IAsyncEnumerable -{ - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) - { - await using var source = reader.GetAsyncEnumerator(ct); - using var batchBuilder = factory(); - while (!ct.IsCancellationRequested) - { - TOut completedBatch; - try - { - var consumeResult = await source.Consume(batchBuilder.TimeWindow); - var added = batchBuilder.TryAdd(consumeResult); - if (!added) - { - completedBatch = batchBuilder.Flush(); - batchBuilder.TryAdd(consumeResult); // TODO If a message does not fit in an empty batch... - } - else - { - if (batchBuilder.Full) - completedBatch = batchBuilder.Flush(); - else - continue; - } - } - catch (EndOfEnumeratorException) - { - break; - } - // Batch time window has closed, or enumerator's cancellation has been triggered - catch (OperationCanceledException) - { - if (batchBuilder.IsEmpty) - continue; - - completedBatch = batchBuilder.Flush(); - } - - yield return completedBatch; - } - - if (!batchBuilder.IsEmpty) - yield return batchBuilder.Flush(); - - ct.ThrowIfCancellationRequested(); - } -} diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs b/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs deleted file mode 100644 index e1f3cc0..0000000 --- a/src/LocalPost/AsyncEnumerable/ConcurrentBuffer.cs +++ /dev/null @@ -1,37 +0,0 @@ -using System.Threading.Channels; - -namespace LocalPost.AsyncEnumerable; - -// internal sealed class ConcurrentBuffer(IAsyncEnumerable source, MaxSize bufferMaxSize) -// : IAsyncEnumerable -// { -// private readonly Channel _buffer = Channel.CreateBounded(new BoundedChannelOptions(bufferMaxSize) -// { -// SingleReader = false, -// SingleWriter = true, -// FullMode = BoundedChannelFullMode.Wait, -// }); -// -// public async Task Run(CancellationToken ct) -// { -// var buffer = _buffer.Writer; -// try -// { -// await foreach (var item in source.WithCancellation(ct)) -// await buffer.WriteAsync(item, ct); -// } -// finally -// { -// buffer.Complete(); -// } -// } -// -// public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) -// { -// var buffer = _buffer.Reader; -// // Like ReadAllAsync() from netstandard2.1/.NET Core 3.0+ -// while (await buffer.WaitToReadAsync(ct).ConfigureAwait(false)) -// while (buffer.TryRead(out var item)) -// yield return item; -// } -// } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 5d0c4d1..17443d7 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -1,3 +1,4 @@ +using System.Collections.Immutable; using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; @@ -23,7 +24,7 @@ public OptionsBuilder> AddJobQueue() } public OptionsBuilder> AddBatchedQueue( - HandlerFactory>> hf) + HandlerFactory>> hf) { var defaultPipeline = Pipeline .Create(hf, provider => provider.GetOptions>()) diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index c966701..74e0251 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -1,3 +1,4 @@ +using System.Collections.Immutable; using System.Runtime.CompilerServices; using System.Threading.Channels; using JetBrains.Annotations; @@ -260,15 +261,15 @@ StreamProcessor BufferWriter(IServiceProvider provider) // } // } - public static PipelineRegistration Batch(this PipelineRegistration> next, + public static PipelineRegistration Batch(this PipelineRegistration> next, ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Batch(_ => new BatchOptions(batchMaxSize, timeWindowDuration)); - public static PipelineRegistration Batch(this PipelineRegistration> next, + public static PipelineRegistration Batch(this PipelineRegistration> next, Func config) => (context, source) => next(context, provider => { var stream = source(provider); var (batchMaxSize, timeWindowDuration) = config(provider); - return stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration)); + return stream.Batch(batchMaxSize, timeWindowDuration); }); } diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index a4240b9..8ea7f14 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -53,13 +53,10 @@ - - - - - + + diff --git a/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs index ee46c60..394827f 100644 --- a/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs @@ -24,6 +24,9 @@ internal async Task aggregates_multiple_channels() source1.Reader.ReadAllAsync(), source2.Reader.ReadAllAsync() }); + await Task.WhenAll(Produce(), Consume()); + return; + async Task Produce() { await source1.Writer.WriteAsync(1); @@ -55,8 +58,6 @@ async Task Consume() expect.Should().BeEmpty(); } - - await Task.WhenAll(Produce(), Consume()); } [Fact] @@ -74,6 +75,9 @@ internal async Task aggregates_multiple_channels_over_time() }); var results = new AsyncEnumerableMerger(true); + await Task.WhenAll(Produce(), Consume()); + return; + async Task Produce() { await source1.Writer.WriteAsync(1); @@ -121,8 +125,6 @@ async Task Consume() cts.IsCancellationRequested.Should().BeTrue(); expect.Should().BeEmpty(); } - - await Task.WhenAll(Produce(), Consume()); } [Fact] diff --git a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs index f8cc65a..3080bea 100644 --- a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs +++ b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs @@ -6,15 +6,17 @@ namespace LocalPost.Tests.AsyncEnumerable; public class BatchingAsyncEnumerableTests { [Fact] - internal async Task batches() + internal async Task collects_in_batches() { var source = Channel.CreateUnbounded(new UnboundedChannelOptions { SingleReader = true, SingleWriter = false }); - var results = source.Reader.ReadAllAsync().Batch( - () => new BoundedBatchBuilder(10, TimeSpan.FromSeconds(2))); + var results = source.Reader.ReadAllAsync().Batch(10, TimeSpan.FromSeconds(2)); + + await Task.WhenAll(Produce(), Consume()); + return; async Task Produce() { @@ -42,7 +44,5 @@ async Task Consume() expect.Should().BeEmpty(); } - - await Task.WhenAll(Produce(), Consume()); } } From ad1dfbbbc80ef45a861f1c8374e6ad2502262190 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 4 Jan 2025 11:37:10 +0000 Subject: [PATCH 21/33] Rework for simplicity --- .editorconfig | 4 +- .github/workflows/publish.yaml | 9 +- .github/workflows/qa.yml | 1 + Directory.Build.props | 2 +- LocalPost.sln | 6 + samples/SqsConsumerApp/Program.cs | 2 +- src/LocalPost.KafkaConsumer/ClientFactory.cs | 94 ++++++ src/LocalPost.KafkaConsumer/ConsumeContext.cs | 93 ++---- src/LocalPost.KafkaConsumer/Consumer.cs | 117 +++++++ .../HealthChecksBuilderEx.cs | 22 +- .../DependencyInjection/KafkaBuilder.cs | 124 ++----- .../ServiceCollectionEx.cs | 10 - src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 70 +--- .../KafkaTopicClient.cs | 97 ------ .../LocalPost.KafkaConsumer.csproj | 2 +- src/LocalPost.KafkaConsumer/MessageSource.cs | 123 ------- src/LocalPost.KafkaConsumer/OffsetManager.cs | 58 ---- src/LocalPost.KafkaConsumer/Options.cs | 145 +------- src/LocalPost.KafkaConsumer/Tracing.cs | 48 ++- src/LocalPost.KafkaConsumer/globalusings.cs | 3 + .../LocalPost.Resilience.csproj | 58 ---- src/LocalPost.Resilience/README.md | 1 - src/LocalPost.SqsConsumer/ConsumeContext.cs | 58 +--- src/LocalPost.SqsConsumer/Consumer.cs | 118 +++++++ .../HealthChecksBuilderEx.cs | 22 +- .../ServiceCollectionEx.cs | 6 - .../DependencyInjection/SqsBuilder.cs | 131 ++------ src/LocalPost.SqsConsumer/HandlerStackEx.cs | 74 +---- .../LocalPost.SqsConsumer.csproj | 2 +- src/LocalPost.SqsConsumer/MessageSource.cs | 99 ------ src/LocalPost.SqsConsumer/Middlewares.cs | 35 -- src/LocalPost.SqsConsumer/Options.cs | 59 +--- src/LocalPost.SqsConsumer/QueueClient.cs | 110 +++---- src/LocalPost.SqsConsumer/Tracing.cs | 30 +- src/LocalPost.SqsConsumer/globalusings.cs | 3 + src/LocalPost/ActivityEx.cs | 8 +- src/LocalPost/AppHealthSupervisor.cs | 16 +- .../AsyncEnumerable/AsyncEnumerableEx.cs | 15 - .../AsyncEnumerable/AsyncEnumerableMerger.cs | 87 ----- .../AsyncEnumerable/ConcurrentSet.cs | 50 --- src/LocalPost/BackgroundQueue.cs | 16 +- .../BackgroundQueue/BackgroundJobQueue.cs | 7 +- .../BackgroundQueue/BackgroundQueue.cs | 209 ++++++------ .../BackgroundQueuesBuilder.cs | 111 ++----- .../DependencyInjection/HealthChecks.cs | 18 - .../HealthChecksBuilderEx.cs | 27 ++ .../ServiceCollectionEx.cs | 1 - .../BackgroundQueue/HandlerStackEx.cs | 8 - src/LocalPost/BackgroundQueue/Options.cs | 106 +----- src/LocalPost/BackgroundQueueConsumer.cs | 146 --------- src/LocalPost/ConcurrentHostedServices.cs | 309 ------------------ .../DependencyInjection/HealthChecks.cs | 104 +----- .../DependencyInjection/IAssistantService.cs | 28 -- .../DependencyInjection/INamedService.cs | 34 -- .../ServiceCollectionEx.cs | 67 ---- .../ServiceCollectionTools.cs | 113 ++----- .../ServiceProviderLookups.cs | 66 ---- src/LocalPost/Handler.cs | 26 -- src/LocalPost/HandlerStack.cs | 257 --------------- .../{HandlerStackEx.cs => HandlerStackOps.cs} | 48 +-- src/LocalPost/LocalPost.csproj | 10 +- src/LocalPost/Middlewares.cs | 100 +++--- src/LocalPost/Options.cs | 3 - src/LocalPost/QueuePublisher.cs | 8 - src/LocalPost/Reflection.cs | 16 +- .../Resilience}/HandlerStackEx.cs | 1 - src/LocalPost/globalusings.cs | 3 + 67 files changed, 870 insertions(+), 2984 deletions(-) create mode 100644 src/LocalPost.KafkaConsumer/ClientFactory.cs create mode 100644 src/LocalPost.KafkaConsumer/Consumer.cs delete mode 100644 src/LocalPost.KafkaConsumer/KafkaTopicClient.cs delete mode 100644 src/LocalPost.KafkaConsumer/MessageSource.cs delete mode 100644 src/LocalPost.KafkaConsumer/OffsetManager.cs create mode 100644 src/LocalPost.KafkaConsumer/globalusings.cs delete mode 100644 src/LocalPost.Resilience/LocalPost.Resilience.csproj delete mode 100644 src/LocalPost.Resilience/README.md create mode 100644 src/LocalPost.SqsConsumer/Consumer.cs delete mode 100644 src/LocalPost.SqsConsumer/MessageSource.cs delete mode 100644 src/LocalPost.SqsConsumer/Middlewares.cs create mode 100644 src/LocalPost.SqsConsumer/globalusings.cs delete mode 100644 src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs delete mode 100644 src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs delete mode 100644 src/LocalPost/AsyncEnumerable/ConcurrentSet.cs delete mode 100644 src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs create mode 100644 src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecksBuilderEx.cs delete mode 100644 src/LocalPost/BackgroundQueueConsumer.cs delete mode 100644 src/LocalPost/ConcurrentHostedServices.cs delete mode 100644 src/LocalPost/DependencyInjection/IAssistantService.cs delete mode 100644 src/LocalPost/DependencyInjection/INamedService.cs delete mode 100644 src/LocalPost/DependencyInjection/ServiceCollectionEx.cs rename src/LocalPost/{HandlerStackEx.cs => HandlerStackOps.cs} (65%) delete mode 100644 src/LocalPost/Options.cs delete mode 100644 src/LocalPost/QueuePublisher.cs rename src/{LocalPost.Resilience => LocalPost/Resilience}/HandlerStackEx.cs (95%) create mode 100644 src/LocalPost/globalusings.cs diff --git a/.editorconfig b/.editorconfig index f48bc7e..6201ef8 100644 --- a/.editorconfig +++ b/.editorconfig @@ -35,8 +35,10 @@ trim_trailing_whitespace = true insert_final_newline = true # See https://github.com/dotnet/aspnetcore/blob/main/.editorconfig -[*.{cs,vb}] +[src/**/*.{cs,vb}] +# See https://www.jetbrains.com/help/resharper/ConfigureAwait_Analysis.html +configure_await_analysis_mode = library # CA2007: Consider calling ConfigureAwait on the awaited task #dotnet_diagnostic.CA2007.severity = warning diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index b59b95d..fb42371 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -9,7 +9,7 @@ jobs: env: DOTNET_NOLOGO: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: # Required for GitVersion fetch-depth: 0 @@ -18,6 +18,7 @@ jobs: dotnet-version: | 6.0.x 8.0.x + 9.0.x - run: dotnet restore - run: dotnet build -c Release --no-restore - run: dotnet test -c Release --no-build --verbosity=minimal @@ -27,13 +28,13 @@ jobs: env: DOTNET_NOLOGO: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: # Required for GitVersion fetch-depth: 0 - - uses: actions/setup-dotnet@v3 + - uses: actions/setup-dotnet@v4 with: - dotnet-version: 8.0.x + dotnet-version: 9.0.x - run: dotnet pack -c Release - name: Publish run: | diff --git a/.github/workflows/qa.yml b/.github/workflows/qa.yml index fac316e..05fbdd6 100644 --- a/.github/workflows/qa.yml +++ b/.github/workflows/qa.yml @@ -42,6 +42,7 @@ jobs: dotnet-version: | 6.0.x 8.0.x + 9.0.x - run: dotnet tool restore - run: dotnet gitversion /output buildserver - run: ./sonar-scan.sh diff --git a/Directory.Build.props b/Directory.Build.props index f39cb87..becbac8 100644 --- a/Directory.Build.props +++ b/Directory.Build.props @@ -1,7 +1,7 @@ - 12 + 13 enable enable true diff --git a/LocalPost.sln b/LocalPost.sln index 325365c..9d2db71 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -40,6 +40,8 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.NatsConsumer", "s EndProject Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "BackgroundQueueApp.FSharp", "samples\BackgroundQueueApp.FSharp\BackgroundQueueApp.FSharp.fsproj", "{79CF7EFF-860D-464F-B59A-55E48D25D70C}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Flow", "src\LocalPost.Flow\LocalPost.Flow.csproj", "{F726A4D7-C35A-417C-8E54-2B6D58FA2747}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -122,6 +124,10 @@ Global {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Debug|Any CPU.Build.0 = Debug|Any CPU {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.ActiveCfg = Release|Any CPU {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.Build.0 = Release|Any CPU + {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Debug|Any CPU.Build.0 = Debug|Any CPU + {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Release|Any CPU.ActiveCfg = Release|Any CPU + {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index e90e508..fe78cc7 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -82,7 +82,7 @@ public static HandlerFactory LogFingersCrossed(this HandlerFactory hf) } catch (OperationCanceledException e) when (e.CancellationToken == ct) { - throw; // Not a real error + throw; // Do not treat cancellation as an error } catch (Exception) { diff --git a/src/LocalPost.KafkaConsumer/ClientFactory.cs b/src/LocalPost.KafkaConsumer/ClientFactory.cs new file mode 100644 index 0000000..3127447 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/ClientFactory.cs @@ -0,0 +1,94 @@ +using Confluent.Kafka; + +namespace LocalPost.KafkaConsumer; + +internal sealed class Client +{ + private readonly ILogger _logger; + + public Client(ILogger logger, IConsumer consumer, ConsumerConfig config) + { + _logger = logger; + Consumer = consumer; + Config = config; + var server = config.BootstrapServers.Split(',')[0].Split(':'); + ServerAddress = server[0]; + if (server.Length > 1) + ServerPort = int.Parse(server[1]); + } + + public ConsumeResult Consume(CancellationToken ct) + { + while (true) + { + try + { + var result = Consumer.Consume(ct); + + if (result is not null && result.IsPartitionEOF) + _logger.LogInformation("End of {Partition} on {Topic}", + result.TopicPartition.Partition, result.TopicPartition.Topic); + else if (result?.Message is not null) + return result; + else + _logger.LogWarning("Kafka consumer empty receive"); + } + // catch (ConsumeException e) + catch (KafkaException e) when (!e.Error.IsFatal) + { + _logger.LogCritical(e, "Kafka consumer (retryable) error: {Reason}", e.Error.Reason); + // Just continue receiving + + // "generally, the producer should recover from all errors, except where marked fatal" as per + // https://github.com/confluentinc/confluent-kafka-dotnet/issues/1213#issuecomment-599772818, so + // just continue polling + } + } + } + + public IConsumer Consumer { get; } + public ConsumerConfig Config { get; } + public string ServerAddress { get; } + public int ServerPort { get; } = 9092; +} + +internal sealed class ClientFactory(ILogger logger, ILogger clientLogger) : IDisposable +{ + private List _clients = []; + + public Client Create(ConsumerConfig config, IEnumerable topics) + { + var consumer = new ConsumerBuilder(config) + .SetErrorHandler((_, e) => clientLogger.LogError("{Error}", e)) + .SetLogHandler((_, m) => clientLogger.LogDebug(m.Message)) + .Build(); + consumer.Subscribe(topics); + var client = new Client(clientLogger, consumer, config); + _clients.Add(client); + return client; + } + + private void Close(IConsumer consumer) + { + try + { + consumer.Close(); + } + catch (Exception e) + { + logger.LogError(e, "Error closing Kafka consumer"); + } + finally + { + consumer.Dispose(); + } + } + + public void Dispose() + { + // TODO Run in parallel?.. + foreach (var client in _clients) + Close(client.Consumer); + _clients = []; + } +} diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 82fab2d..8aac243 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -1,34 +1,24 @@ using Confluent.Kafka; -using JetBrains.Annotations; -using LocalPost.AsyncEnumerable; namespace LocalPost.KafkaConsumer; -// internal static class ConsumeContext -// { -// public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( -// MaxSize batchMaxSize, TimeSpan timeWindow) => ct => -// new BatchConsumeContext.Builder(batchMaxSize, timeWindow, ct); -// } - [PublicAPI] public readonly record struct ConsumeContext { - internal readonly KafkaTopicClient Client; // librdkafka docs: - // When consumer restarts this is where it will start consuming from. - // The committed offset should be last_message_offset+1. + // When consumer restarts this is where it will start consuming from. + // The committed offset should be last_message_offset+1. // See https://github.com/confluentinc/librdkafka/wiki/Consumer-offset-management#terminology - internal readonly TopicPartitionOffset NextOffset; - internal readonly Message Message; +// internal readonly TopicPartitionOffset NextOffset; + + internal readonly Client Client; + internal readonly ConsumeResult ConsumeResult; public readonly T Payload; - internal ConsumeContext(KafkaTopicClient client, TopicPartitionOffset nextOffset, Message message, - T payload) + internal ConsumeContext(Client client, ConsumeResult consumeResult, T payload) { Client = client; - NextOffset = nextOffset; - Message = message; + ConsumeResult = consumeResult; Payload = payload; } @@ -38,11 +28,15 @@ public void Deconstruct(out T payload, out IReadOnlyList headers) headers = Headers; } - public string Topic => Client.Topic; + public Offset NextOffset => ConsumeResult.Offset + 1; + + public Message Message => ConsumeResult.Message; + + public string Topic => ConsumeResult.Topic; public IReadOnlyList Headers => Message.Headers.BackingList; - public ConsumeContext Transform(TOut payload) => new(Client, NextOffset, Message, payload); + public ConsumeContext Transform(TOut payload) => new(Client, ConsumeResult, payload); public ConsumeContext Transform(Func, TOut> transform) => Transform(transform(this)); @@ -50,58 +44,9 @@ public async Task> Transform(Func, Transform(await transform(this)); public static implicit operator T(ConsumeContext context) => context.Payload; -} -// [PublicAPI] -// public readonly record struct BatchConsumeContext -// { -// internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration, CancellationToken ct = default) -// : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration, ct) -// { -// public override BatchConsumeContext Build() -// { -// // #if NET6_0_OR_GREATER -// // ReadOnlySpan s = CollectionsMarshal.AsSpan(Batch) -// // var ia = s.ToImmutableArray(); -// // return new BatchConsumeContext(Batch); -// // #else -// // return new BatchConsumeContext(Batch.ToImmutableArray()); -// // #endif -// return new BatchConsumeContext(Batch); -// } -// } -// -// // TODO ImmutableArray -// public readonly IReadOnlyList> Messages; -// -// internal BatchConsumeContext(IReadOnlyList> messages) -// { -// if (messages.Count == 0) -// throw new ArgumentException("Batch must contain at least one message", nameof(messages)); -// -// Messages = messages; -// } -// -// public BatchConsumeContext Transform(ConsumeContext[] payload) => new(payload); -// -// public BatchConsumeContext Transform(IEnumerable> payload) => -// Transform(payload.ToArray()); -// -// public BatchConsumeContext Transform(IEnumerable batchPayload) => -// Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); -// -// public BatchConsumeContext Transform(Func, TOut> transform) -// { -// // TODO Parallel LINQ -// var messages = Messages.Select(transform); -// return Transform(messages); -// } -// -// public async Task> Transform(Func, Task> transform) -// { -// var messages = await Task.WhenAll(Messages.Select(transform)); -// return Transform(messages); -// } -// -// internal KafkaTopicClient Client => Messages[^1].Client; -// } + public void StoreOffset() => Client.Consumer.StoreOffset(ConsumeResult); + + // To be consistent across different message brokers + public void Acknowledge() => StoreOffset(); +} diff --git a/src/LocalPost.KafkaConsumer/Consumer.cs b/src/LocalPost.KafkaConsumer/Consumer.cs new file mode 100644 index 0000000..e47a8dc --- /dev/null +++ b/src/LocalPost.KafkaConsumer/Consumer.cs @@ -0,0 +1,117 @@ +using Confluent.Kafka; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; + +namespace LocalPost.KafkaConsumer; + +internal sealed class Consumer(string name, ILogger logger, + ClientFactory clientFactory, ConsumerOptions settings, Handler> handler) + : IHostedService, IHealthAwareService, IDisposable +{ + private sealed class ReadinessHealthCheck(Consumer consumer) : IHealthCheck + { + public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => + Task.FromResult(consumer.Ready); + } + + private CancellationTokenSource? _execTokenSource; + private Task? _execution; + private Exception? _execException; + private string? _execExceptionDescription; + + public string Name { get; } = name; + + private HealthCheckResult Ready => (_execTokenSource, _execution, _execException) switch + { + (null, _, _) => HealthCheckResult.Unhealthy("Not started"), + (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), + (not null, null, _) => HealthCheckResult.Degraded("Starting"), + (not null, not null, null) => HealthCheckResult.Healthy("Running"), + (_, _, not null) => HealthCheckResult.Unhealthy(_execExceptionDescription, _execException), + }; + + public IHealthCheck ReadinessCheck => new ReadinessHealthCheck(this); + + private async Task RunConsumerAsync(Client client, CancellationToken execToken) + { + // (Optionally) wait for app start + + try + { + while (!execToken.IsCancellationRequested) + { + var result = client.Consume(execToken); + await handler(new ConsumeContext(client, result, result.Message.Value), CancellationToken.None); + } + } + catch (OperationCanceledException e) when (e.CancellationToken == execToken) + { + // logger.LogInformation("Kafka consumer shutdown"); + } + catch (KafkaException e) + { + logger.LogCritical(e, "Kafka consumer error: {Reason} (see {HelpLink})", e.Error.Reason, e.HelpLink); + (_execException, _execExceptionDescription) = (e, "Kafka consumer failed"); + } + catch (Exception e) + { + logger.LogCritical(e, "Kafka message handler error"); + // TODO Include headers or the partition key in check result's data + (_execException, _execExceptionDescription) = (e, "Message handler failed"); + } + finally + { + CancelExecution(); // Stop other consumers too + } + } + + public async Task StartAsync(CancellationToken ct) + { + if (_execTokenSource is not null) + throw new InvalidOperationException("Service is already started"); + + var execTokenSource = _execTokenSource = new CancellationTokenSource(); + var execution = settings.Consumers switch + { + 1 => await StartConsumerAsync(), + _ => Task.WhenAll( + await Task.WhenAll(Enumerable.Range(0, settings.Consumers).Select(_ => StartConsumerAsync()))) + }; + _execution = ObserveExecution(); + return; + + async Task StartConsumerAsync() + { + var kafkaClient = await Task.Run(() => clientFactory.Create(settings.ClientConfig, settings.Topics), ct); + + return Task.Run(() => RunConsumerAsync(kafkaClient, execTokenSource.Token), ct); + } + + async Task ObserveExecution() + { + await execution; + // Can happen before the service shutdown, in case of an error + logger.LogInformation("Kafka consumer stopped"); + } + } + + // await _execTokenSource.CancelAsync(); // .NET 8+ + private void CancelExecution() => _execTokenSource?.Cancel(); + + public async Task StopAsync(CancellationToken forceShutdownToken) + { + if (_execTokenSource is null) + throw new InvalidOperationException("Service has not been started"); + + logger.LogInformation("Shutting down Kafka consumer"); + CancelExecution(); + if (_execution is not null) + await _execution.ConfigureAwait(false); + } + + public void Dispose() + { + _execTokenSource?.Dispose(); + } +} diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs index c0b2245..a4fb1c3 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/HealthChecksBuilderEx.cs @@ -1,4 +1,3 @@ -using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; @@ -8,17 +7,16 @@ namespace LocalPost.KafkaConsumer.DependencyInjection; [PublicAPI] public static class HealthChecksBuilderEx { - // TODO AddKafkaConsumersLivenessCheck() — simply for all the registered consumers + public static IHealthChecksBuilder AddKafkaConsumer(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) => + builder.Add(HealthChecks.Readiness(name, failureStatus, tags)); - // Check if the same check is added twice?.. + public static IHealthChecksBuilder AddKafkaConsumers(this IHealthChecksBuilder builder, + HealthStatus? failureStatus = null, IEnumerable? tags = null) + { + foreach (var name in builder.Services.GetKeysFor().OfType()) + AddKafkaConsumer(builder, name, failureStatus, tags); - public static IHealthChecksBuilder AddKafkaConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(HealthChecks.LivenessCheck(name, failureStatus, tags)) - .AddPipelineLivenessCheck(name); - - // public static IHealthChecksBuilder AddKafkaBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, - // string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - // .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - // .AddNamedConsumerLivenessCheck>(name); + return builder; + } } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index bcfc19e..8188bca 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -1,6 +1,4 @@ -using System.Collections.Immutable; using Confluent.Kafka; -using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; @@ -13,114 +11,42 @@ public sealed class KafkaBuilder(IServiceCollection services) public OptionsBuilder Defaults { get; } = services.AddOptions(); /// - /// Default batch consumer pipeline. + /// Add a Kafka consumer with (should be registered separately) as a message handler. /// - /// Consumer name (also the default queue name). Should be unique in the application. - /// Handler factory. - /// Pipeline options builder. - public OptionsBuilder AddBatchConsumer(string name, - HandlerFactory>> hf) - { - var defaultPipeline = Pipeline - .Create(hf, provider => provider.GetOptions(name)) - .Batch(provider => provider.GetOptions(name)); - - Add(name, defaultPipeline) - .Configure>((options, pipelineOptions) => - options.UpdateFrom(pipelineOptions.Get(name).Consume)); - - return BatchedConsumerFor(name).Configure>((options, clientConfig) => - { - options.Consume.EnrichFrom(clientConfig.Value); - options.Consume.Topic = name; - }); - } + /// Consumer name (should be unique in the application). + /// Message handler type. + /// Consumer options builder. + public OptionsBuilder AddConsumer(string name) + where THandler : IHandler> + => AddConsumer(name, provider => provider.GetRequiredService().InvokeAsync); /// - /// Default consumer pipeline. + /// Add a Kafka consumer with a custom message handler. /// - /// Consumer name (also the default queue name). Should be unique in the application. - /// Handler factory. - /// Pipeline options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) - { - var defaultPipeline = Pipeline - .Create(hf, provider => provider.GetOptions(name)); - - Add(name, defaultPipeline) - .Configure>((options, pipelineOptions) => - options.UpdateFrom(pipelineOptions.Get(name).Consume)); - - return ConsumerFor(name).Configure>((options, clientConfig) => - { - options.Consume.EnrichFrom(clientConfig.Value); - options.Consume.Topic = name; - }); - } - - internal OptionsBuilder Add(string name, PipelineRegistration> pr) + /// Consumer name (should be unique in the application). + /// Message handler factory. + /// Consumer options builder. + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) { if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - if (!services.TryAddKafkaClient(name)) - throw new ArgumentException("Kafka consumer is already registered", nameof(name)); - - services.TryAddNamedSingleton(name, provider => - new MessageSource(provider.GetRequiredService(name))); - services.AddBackgroundService(name); - - pr(services.RegistrationContextFor(name), provider => - provider.GetRequiredService(name)); - - return PipelineFor(name).Configure>((options, clientConfig) => - { - options.EnrichFrom(clientConfig.Value); - options.Topic = name; - }); - } - - public OptionsBuilder PipelineFor(string name) => services.AddOptions(name); + services.TryAddSingleton(); - public OptionsBuilder ConsumerFor(string name) => - services.AddOptions(name); + var added = services.TryAddKeyedSingleton(name, (provider, _) => new Consumer(name, + provider.GetLoggerFor(), + provider.GetRequiredService(), + provider.GetOptions(name), + hf(provider) + )); - public OptionsBuilder BatchedConsumerFor(string name) => - services.AddOptions(name); - - // TODO Health checks + if (!added) + throw new ArgumentException("Consumer is already registered", nameof(name)); + services.AddHostedService(provider => provider.GetRequiredKeyedService(name)); + return OptionsFor(name).Configure>((co, defaults) => co.EnrichFrom(defaults.Value)); + } - // public OptionsBuilder AddBatchConsumer(string name, - // HandlerFactory> configure) - // { - // if (string.IsNullOrEmpty(name)) - // throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - // - // if (!services.TryAddKafkaClient(name)) - // throw new InvalidOperationException("Kafka consumer is already registered"); - // - // services.TryAddNamedSingleton(name, provider => - // { - // var options = provider.GetOptions(name); - // - // return new BatchMessageSource(provider.GetRequiredService(name), - // ConsumeContext.BatchBuilder( - // options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); - // }); - // services.AddBackgroundService(name); - // - // services.TryAddBackgroundConsumer, BatchMessageSource>(name, configure, provider => - // { - // var options = provider.GetOptions(name); - // return new ConsumerOptions(1, options.BreakOnException); - // }); - // - // return services.AddOptions(name).Configure>((options, commonConfig) => - // { - // options.EnrichFrom(commonConfig.Value); - // options.Topic = name; - // }); - // } + public OptionsBuilder OptionsFor(string name) => services.AddOptions(name); } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs index fa02ba6..67d65f4 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -1,5 +1,3 @@ -using JetBrains.Annotations; -using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; namespace LocalPost.KafkaConsumer.DependencyInjection; @@ -13,12 +11,4 @@ public static IServiceCollection AddKafkaConsumers(this IServiceCollection servi return services; } - - internal static bool TryAddKafkaClient(this IServiceCollection services, string name) => - services.TryAddNamedSingleton(name, provider => - { - var options = provider.GetOptions(name); - - return new KafkaTopicClient(provider.GetLoggerFor(), options, options.Topic, name); - }); } diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index df297e1..aabc18a 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -1,28 +1,14 @@ using System.Text.Json; using Confluent.Kafka; -using JetBrains.Annotations; namespace LocalPost.KafkaConsumer; -// [PublicAPI] -// public static class PipelineOps -// { -// public static PipelineRegistration Batch(this PipelineRegistration> next, -// ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Map>((stream, _) => -// stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration))); -// } - [PublicAPI] public static class HandlerStackEx { public static HandlerFactory> UseKafkaPayload(this HandlerFactory hf) => hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); - public static HandlerFactory>> UseKafkaPayload( - this HandlerFactory> hf) => - hf.Map>, IEnumerable>(next => - async (batch, ct) => await next(batch.Select(context => context.Payload), ct)); - public static HandlerFactory> Trace(this HandlerFactory> hf) => hf.Map, ConsumeContext>(next => async (context, ct) => @@ -40,43 +26,12 @@ public static HandlerFactory> Trace(this HandlerFactory>> Trace(this HandlerFactory>> hf) => - hf.Map>, IEnumerable>>(next => - async (batch, ct) => - { - var context = batch.ToList(); // TODO Optimize - using var activity = Tracing.StartProcessing(context); - try - { - await next(context, ct); - activity?.Success(); - } - catch (Exception e) - { - activity?.Error(e); - throw; - } - }); - public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => hf.Map, ConsumeContext>(next => async (context, ct) => { await next(context, ct); - context.Client.StoreOffset(context.NextOffset); - }); - - public static HandlerFactory>> Acknowledge( - this HandlerFactory>> hf) => - hf.Map>, IEnumerable>>(next => - async (batch, ct) => - { - var context = batch.ToList(); // TODO Optimize - await next(context, ct); - // Store all the offsets, as it can be a batch of messages from different partitions - // (even different topics, if subscribed using a regex) - foreach (var message in context) - message.Client.StoreOffset(message.NextOffset); + context.Acknowledge(); }); #region Deserialize() @@ -86,22 +41,11 @@ public static HandlerFactory> Deserialize( hf.Map, ConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct)); - public static HandlerFactory>> Deserialize( - this HandlerFactory>> hf, Func, T> deserialize) => - hf.Map>, IEnumerable>>(next => - async (batch, ct) => await next(batch.Select(context => context.Transform(deserialize)), ct)); - public static HandlerFactory> Deserialize( this HandlerFactory> hf, Func, Task> deserialize) => hf.Map, ConsumeContext>(next => async (context, ct) => await next(await context.Transform(deserialize), ct)); - public static HandlerFactory>> Deserialize( - this HandlerFactory>> hf, Func, Task> deserialize) => - hf.Map>, IEnumerable>>(next => - async (batch, ct) => - await next(await Task.WhenAll(batch.Select(context => context.Transform(deserialize))), ct)); - private static Func, Task> AsyncDeserializer(IAsyncDeserializer deserializer) => context => deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( MessageComponentType.Value, context.Topic, context.Message.Headers)); @@ -110,10 +54,6 @@ public static HandlerFactory> Deserialize( this HandlerFactory> hf, IAsyncDeserializer deserializer) => hf.Deserialize(AsyncDeserializer(deserializer)); - public static HandlerFactory>> Deserialize( - this HandlerFactory>> hf, IAsyncDeserializer deserializer) => - hf.Deserialize(AsyncDeserializer(deserializer)); - private static Func, T> Deserializer(IDeserializer deserializer) => context => deserializer.Deserialize(context.Payload, false, new SerializationContext( MessageComponentType.Value, context.Topic, context.Message.Headers)); @@ -122,17 +62,9 @@ public static HandlerFactory> Deserialize( this HandlerFactory> hf, IDeserializer deserializer) => hf.Deserialize(Deserializer(deserializer)); - public static HandlerFactory>> Deserialize( - this HandlerFactory>> hf, IDeserializer deserializer) => - hf.Deserialize(Deserializer(deserializer)); - #endregion public static HandlerFactory> DeserializeJson( this HandlerFactory> hf, JsonSerializerOptions? options = null) => hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); - - public static HandlerFactory>> DeserializeJson( - this HandlerFactory>> hf, JsonSerializerOptions? options = null) => - hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs b/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs deleted file mode 100644 index d1649eb..0000000 --- a/src/LocalPost.KafkaConsumer/KafkaTopicClient.cs +++ /dev/null @@ -1,97 +0,0 @@ -using Confluent.Kafka; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.Logging; - -namespace LocalPost.KafkaConsumer; - -internal static class KafkaLogging -{ - public static void LogKafkaMessage(this ILogger logger, string topic, LogMessage log) - { - var level = log.Level switch - { - SyslogLevel.Emergency => LogLevel.Critical, - SyslogLevel.Alert => LogLevel.Critical, - SyslogLevel.Critical => LogLevel.Critical, - SyslogLevel.Error => LogLevel.Error, - SyslogLevel.Warning => LogLevel.Warning, - SyslogLevel.Notice => LogLevel.Information, - SyslogLevel.Info => LogLevel.Information, - SyslogLevel.Debug => LogLevel.Debug, - _ => LogLevel.Information - }; - - logger.Log(level, "{Topic} (via librdkafka): {Message}", topic, log.Message); - } -} - -internal sealed class KafkaTopicClient : INamedService, IDisposable -{ - private readonly ILogger _logger; - private readonly IConsumer _client; - - public KafkaTopicClient(ILogger logger, ConsumerConfig config, string topic, string name) - { - _logger = logger; - - _client = new ConsumerBuilder(config) - .SetLogHandler((_, log) => _logger.LogKafkaMessage(topic, log)) - .Build(); - - Topic = topic; - GroupId = config.GroupId; - Name = name; - } - - public string Topic { get; } - - public string GroupId { get; } - - public string Name { get; } - - public void Subscribe() => _client.Subscribe(Topic); - - public void Close() - { - _logger.LogInformation("Stopping Kafka {Topic} consumer...", Topic); - - _client.Close(); // No need for additional .Dispose() call - } - - public void StoreOffset(TopicPartitionOffset topicPartitionOffset) => - _client.StoreOffset(topicPartitionOffset); - - public ConsumeContext Read(CancellationToken ct = default) - { - while (true) - { - try - { - var result = _client.Consume(ct); - - // Log an empty receive?.. - if (result is null || result.IsPartitionEOF || result.Message is null) - continue; // Continue waiting for a message - - return new ConsumeContext(this, - new TopicPartitionOffset(result.Topic, result.Partition, result.Offset + 1, result.LeaderEpoch), - result.Message, - result.Message.Value); - } - catch (ConsumeException e) when (!e.Error.IsFatal) - { - _logger.LogError(e, "Kafka {Topic} consumer error, more details: {HelpLink}", - Topic, e.HelpLink); - - // "generally, the producer should recover from all errors, except where marked fatal" as per - // https://github.com/confluentinc/confluent-kafka-dotnet/issues/1213#issuecomment-599772818, so - // just continue polling - } - } - } - - public void Dispose() - { - _client.Dispose(); - } -} diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index 82fc57c..0e9a064 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -44,7 +44,7 @@ - + diff --git a/src/LocalPost.KafkaConsumer/MessageSource.cs b/src/LocalPost.KafkaConsumer/MessageSource.cs deleted file mode 100644 index 76a114f..0000000 --- a/src/LocalPost.KafkaConsumer/MessageSource.cs +++ /dev/null @@ -1,123 +0,0 @@ -using LocalPost.DependencyInjection; - -namespace LocalPost.KafkaConsumer; - -internal sealed class MessageSource(KafkaTopicClient client) - : IBackgroundService, INamedService, IAsyncEnumerable> -{ - private bool _stopped; - - public string Name => client.Name; - - public async IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) - { - // TODO Transaction activity... - - // Give the control back in the beginning, just before blocking in the Kafka's consumer call - await Task.Yield(); - foreach (var result in Consume(ct)) - yield return result; - } - - // Run on a separate thread, as Confluent Kafka API is blocking - public Task StartAsync(CancellationToken ct) => Task.Run(client.Subscribe, ct); - - // Run this (possibly) blocking & long-running task in a separate thread?.. - public Task ExecuteAsync(CancellationToken ct) => Task.CompletedTask; - - // Run on a separate thread, as Confluent Kafka API is blocking - public Task StopAsync(CancellationToken ct) => Task.Run(() => - { - _stopped = true; - - // TODO Wait for all the pipelines to finish... - client.Close(); - }, ct); - - private IEnumerable> Consume(CancellationToken ct) - { - // TODO Transaction activity... - - while (!ct.IsCancellationRequested && !_stopped) - yield return client.Read(ct); - - ct.ThrowIfCancellationRequested(); - } -} - - - -// internal sealed class MessageSource : IAsyncEnumerable> -// { -// private readonly ConcurrentBuffer> _source; -// -// public MessageSource(KafkaTopicClient client) : base(client) -// { -// _source = ConsumeAsync().ToConcurrentBuffer(); -// } -// -// // Run this (possibly) blocking & long-running task in a separate thread?.. -// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); -// -// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => -// _source.GetAsyncEnumerator(ct); -// } -// -// internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> -// { -// private readonly ConcurrentBuffer> _source; -// -// public BatchMessageSource(KafkaTopicClient client, -// BatchBuilderFactory, BatchConsumeContext> factory) : base(client) -// { -// _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); -// } -// -// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); -// -// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => -// _source.GetAsyncEnumerator(ct); -// } -// -// internal abstract class MessageSourceBase(KafkaTopicClient client) : IBackgroundService, INamedService -// { -// private bool _stopped; -// -// // Some additional reading: https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/ -// // private readonly TaskCompletionSource _executionTcs = new(TaskCreationOptions.RunContinuationsAsynchronously); -// -// public string Name => client.Name; -// -// // Run on a separate thread, as Confluent Kafka API is blocking -// public Task StartAsync(CancellationToken ct) => Task.Run(client.Subscribe, ct); -// -// public abstract Task ExecuteAsync(CancellationToken ct); -// -// protected async IAsyncEnumerable> ConsumeAsync( -// [EnumeratorCancellation] CancellationToken ct = default) -// { -// // TODO Transaction activity... -// -// // Give the control back in the beginning, just before blocking in the Kafka's consumer call -// await Task.Yield(); -// foreach (var result in Consume(ct)) -// yield return result; -// } -// -// private IEnumerable> Consume(CancellationToken ct) -// { -// // TODO Transaction activity... -// -// while (!ct.IsCancellationRequested && !_stopped) -// yield return client.Read(ct); -// -// ct.ThrowIfCancellationRequested(); -// } -// -// // Run on a separate thread, as Confluent Kafka API is blocking -// public Task StopAsync(CancellationToken ct) => Task.Run(() => -// { -// _stopped = true; -// client.Close(); -// }, ct); -// } diff --git a/src/LocalPost.KafkaConsumer/OffsetManager.cs b/src/LocalPost.KafkaConsumer/OffsetManager.cs deleted file mode 100644 index 1e0d06c..0000000 --- a/src/LocalPost.KafkaConsumer/OffsetManager.cs +++ /dev/null @@ -1,58 +0,0 @@ -using Confluent.Kafka; - -namespace LocalPost.KafkaConsumer; - -internal class OffsetManager -{ - // partition -> next offset - private readonly Dictionary _offsets = new(); - - public void Register(TopicPartitionOffset topicPartitionOffset) - { - lock (_offsets) - { - if (!_offsets.ContainsKey(topicPartitionOffset.TopicPartition)) - _offsets[topicPartitionOffset.TopicPartition] = NextOffset.From(topicPartitionOffset); - } - } - - public async ValueTask WaitToStore(TopicPartitionOffset topicPartitionOffset) - { - var offset = topicPartitionOffset.Offset; - var topicPartition = topicPartitionOffset.TopicPartition; -// if (!_offsets.ContainsKey(topicPartition)) -// throw new ArgumentOutOfRangeException(nameof(topicPartitionOffset), "Unknown topic partition"); - - var completed = false; - while (!completed) - { - NextOffset nextOffset; - lock (_offsets) - { - nextOffset = _offsets[topicPartition]; - completed = nextOffset.Offset >= offset; - if (completed) - _offsets[topicPartition] = nextOffset.Next(); - } - - if (completed) - nextOffset.Complete(); - else - await nextOffset.Completed; - } - } -} - -internal readonly record struct NextOffset(Offset Offset) -{ - // See https://devblogs.microsoft.com/premier-developer/the-danger-of-taskcompletionsourcet-class/#conclusion - private readonly TaskCompletionSource _completionSource = new(TaskCreationOptions.RunContinuationsAsynchronously); - - public Task Completed => _completionSource.Task; - - public NextOffset Next() => new(Offset + 1); - - public void Complete() => _completionSource.SetResult(true); - - public static NextOffset From(TopicPartitionOffset topicPartitionOffset) => new(topicPartitionOffset.Offset); -} diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 7bab681..9168e7a 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -1,146 +1,25 @@ using System.ComponentModel.DataAnnotations; using Confluent.Kafka; -using JetBrains.Annotations; -using Microsoft.Extensions.Options; namespace LocalPost.KafkaConsumer; -[PublicAPI] -public sealed class ConsumerOptions : ConsumerConfig +public sealed record ConsumerOptions { - public ConsumerOptions() - { - EnableAutoOffsetStore = false; // We will store offsets manually, see Acknowledge middleware - } - - [Required] - public string Topic { get; set; } = null!; - - internal void EnrichFrom(Config config) - { - foreach (var kv in config) - Set(kv.Key, kv.Value); - } - - internal void UpdateFrom(ConsumerOptions other) - { - EnrichFrom(other); - Topic = other.Topic; - } -} + public ConsumerConfig ClientConfig { get; set; } = new(); + // public required ConsumerConfig ClientConfig { get; init; } = new() + // { + // EnableAutoOffsetStore = false // We will store offsets manually, see Acknowledge middleware + // }; -[PublicAPI] -public sealed class DefaultPipelineOptions -{ - public void Deconstruct(out ConsumerOptions consumer, out DefaultPipelineOptions pipeline) - { - consumer = Consume; - pipeline = this; - } - - public ConsumerOptions Consume { get; } = new(); + [MinLength(1)] + public ISet Topics { get; set; } = new HashSet(); [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 10; - - // [Range(1, ushort.MaxValue)] - // public ushort Prefetch { get; set; } = 10; - - // /// - // /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. - // /// Default is true. - // /// - // public bool BreakOnException { get; set; } = true; + public ushort Consumers { get; set; } = 1; - public static implicit operator Pipeline.ConsumerOptions(DefaultPipelineOptions options) => new() - { - MaxConcurrency = options.MaxConcurrency, - BreakOnException = false, - }; -} - -[PublicAPI] -public sealed record DefaultBatchPipelineOptions -{ - public void Deconstruct(out ConsumerOptions consumer, out DefaultBatchPipelineOptions pipeline) + internal void EnrichFrom(Config config) { - consumer = Consume; - pipeline = this; + foreach (var kv in config) + ClientConfig.Set(kv.Key, kv.Value); } - - public ConsumerOptions Consume { get; } = new(); - - [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 10; - - // [Range(1, ushort.MaxValue)] - // public ushort Prefetch { get; set; } = 10; - - // /// - // /// Stop the consumer in case of an exception in the handler, or just log it and continue the processing loop. - // /// Default is true. - // /// - // public bool BreakOnException { get; set; } = true; - - [Range(1, ushort.MaxValue)] - public ushort BatchMaxSize { get; set; } = 10; - - [Range(1, ushort.MaxValue)] - public int TimeWindowMs { get; set; } = 1_000; - - public static implicit operator Pipeline.ConsumerOptions(DefaultBatchPipelineOptions options) => new() - { - MaxConcurrency = options.MaxConcurrency, - BreakOnException = false, - }; - - public static implicit operator BatchOptions(DefaultBatchPipelineOptions options) => new() - { - MaxSize = options.BatchMaxSize, - TimeWindowDuration = options.TimeWindowMs, - }; -} - -[PublicAPI] -public static class OptionsBuilderEx -{ - public static OptionsBuilder Configure( - this OptionsBuilder builder, - Action configure) => - builder.Configure(options => - { - var (consumer, pipeline) = options; - - configure(consumer, pipeline); - }); - - public static OptionsBuilder ConfigureConsumer( - this OptionsBuilder builder, - Action configure) => - builder.Configure(options => - { - var (consumer, _) = options; - - configure(consumer); - }); - - public static OptionsBuilder Configure( - this OptionsBuilder builder, - Action configure) => - builder.Configure(options => - { - var (consumer, pipeline) = options; - - configure(consumer, pipeline); - }); - - public static OptionsBuilder ConfigureConsumer( - this OptionsBuilder builder, - Action configure) => - builder.Configure(options => - { - var (consumer, _) = options; - - configure(consumer); - }); } diff --git a/src/LocalPost.KafkaConsumer/Tracing.cs b/src/LocalPost.KafkaConsumer/Tracing.cs index 5eccf5a..8f2847a 100644 --- a/src/LocalPost.KafkaConsumer/Tracing.cs +++ b/src/LocalPost.KafkaConsumer/Tracing.cs @@ -10,7 +10,7 @@ internal static class MessageUtils public static void ExtractTraceFieldFromHeaders(object? carrier, string fieldName, out string? fieldValue, out IEnumerable? fieldValues) { - fieldValues = default; + fieldValues = null; fieldValue = null; if (carrier is not IEnumerable message) return; @@ -21,6 +21,7 @@ public static void ExtractTraceFieldFromHeaders(object? carrier, string fieldNam } } +// See https://opentelemetry.io/docs/specs/semconv/messaging/kafka/ internal static class KafkaActivityExtensions { public static void AcceptDistributedTracingFrom(this Activity activity, Message message) @@ -42,34 +43,43 @@ public static void AcceptDistributedTracingFrom(this Activity acti activity.AddBaggage(baggageItem.Key, baggageItem.Value); } - public static Activity? SetDefaultTags(this Activity? activity, KafkaTopicClient client) + public static Activity? SetDefaultTags(this Activity? activity, Client client) { // See https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/#messaging-attributes activity?.SetTag("messaging.system", "kafka"); - activity?.SetTag("messaging.destination.name", client.Topic); - activity?.SetTag("messaging.kafka.consumer.group", client.GroupId); + // activity?.SetTag("messaging.kafka.consumer.group", context.ClientConfig.GroupId); + activity?.SetTag("messaging.consumer.group.name", client.Config.GroupId); - // activity?.SetTag("messaging.client_id", "service_name"); - // activity?.SetTag("server.address", client.QueueUrl.Host); - // activity?.SetTag("server.port", client.QueueUrl.Port); + // activity?.SetTag("messaging.client.id", "service_name"); + // activity?.SetTag("server.address", context.ClientConfig.BootstrapServers); + // activity?.SetTag("server.port", context.ClientConfig.BootstrapServers); return activity; } public static Activity? SetTagsFor(this Activity? activity, ConsumeContext context) { + // See https://github.com/open-telemetry/opentelemetry-specification/issues/2971#issuecomment-1324621326 // activity?.SetTag("messaging.message.id", context.MessageId); - activity?.SetTag("messaging.kafka.message.offset", context.NextOffset.Offset.Value); + activity?.SetTag("messaging.destination.name", context.Topic); + activity?.SetTag("messaging.destination.partition.id", context.ConsumeResult.Partition.Value); + activity?.SetTag("messaging.kafka.message.offset", (long)context.ConsumeResult.Offset); + + activity?.SetTag("messaging.message.body.size", context.Message.Value.Length); + + // TODO messaging.operation.type // Skip, as we always ignore the key on consumption // activity.SetTag("messaging.kafka.message.key", context.Message.Key); + // TODO error.type + return activity; } - public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> batch) => - activity?.SetTag("messaging.batch.message_count", batch.Count); + // public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> batch) => + // activity?.SetTag("messaging.batch.message_count", batch.Count); } // Npgsql as an inspiration: @@ -92,7 +102,7 @@ static Tracing() public static Activity? StartProcessing(ConsumeContext context) { - var activity = Source.CreateActivity($"{context.Client.Topic} process", ActivityKind.Consumer); + var activity = Source.CreateActivity($"{context.Topic} process", ActivityKind.Consumer); if (activity is { IsAllDataRequested: true }) { activity.SetDefaultTags(context.Client); @@ -104,20 +114,4 @@ static Tracing() return activity; } - - public static Activity? StartProcessing(IReadOnlyCollection> batch) - { - var client = batch.First().Client; - // It is actually can be multiple topics, it is possible to subscribe using a pattern... - var activity = Source.StartActivity($"{client.Topic} process", ActivityKind.Consumer); - if (activity is not { IsAllDataRequested: true }) - return activity; - - activity.SetDefaultTags(client); - activity.SetTagsFor(batch); - - // TODO Accept distributed tracing headers, per each message... - - return activity; - } } diff --git a/src/LocalPost.KafkaConsumer/globalusings.cs b/src/LocalPost.KafkaConsumer/globalusings.cs new file mode 100644 index 0000000..2f865c2 --- /dev/null +++ b/src/LocalPost.KafkaConsumer/globalusings.cs @@ -0,0 +1,3 @@ +global using JetBrains.Annotations; +global using System.Diagnostics.CodeAnalysis; +global using Microsoft.Extensions.Logging; diff --git a/src/LocalPost.Resilience/LocalPost.Resilience.csproj b/src/LocalPost.Resilience/LocalPost.Resilience.csproj deleted file mode 100644 index 68ee3b2..0000000 --- a/src/LocalPost.Resilience/LocalPost.Resilience.csproj +++ /dev/null @@ -1,58 +0,0 @@ - - - - net6.0;net8.0 - true - - false - - LocalPost.Resilience - Alexey Shokov - Polly integration for LocalPost - https://github.com/alexeyshockov/LocalPost/v$(Version) - background;task;queue;retry;timeout - - README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true - - - - - - - - - - true - - - - true - true - true - true - snupkg - - - true - - - - - - - - - - - - - - - - - - diff --git a/src/LocalPost.Resilience/README.md b/src/LocalPost.Resilience/README.md deleted file mode 100644 index ef80096..0000000 --- a/src/LocalPost.Resilience/README.md +++ /dev/null @@ -1 +0,0 @@ -# LocalPost Polly integration diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index 3f2ca50..a2b082b 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -1,17 +1,7 @@ -using System.Collections.Immutable; using Amazon.SQS.Model; -using JetBrains.Annotations; -using LocalPost.AsyncEnumerable; namespace LocalPost.SqsConsumer; -// internal static class ConsumeContext -// { -// public static BatchBuilderFactory, BatchConsumeContext> BatchBuilder( -// MaxSize batchMaxSizeSize, TimeSpan timeWindow) => ct => -// new BatchConsumeContext.Builder(batchMaxSizeSize, timeWindow, ct); -// } - [PublicAPI] public readonly record struct ConsumeContext { @@ -57,45 +47,11 @@ public async Task> Transform(Func, Transform(await transform(this)); public static implicit operator T(ConsumeContext context) => context.Payload; -} -// [PublicAPI] -// public readonly record struct BatchConsumeContext -// { -// internal sealed class Builder(MaxSize batchMaxSize, TimeSpan timeWindowDuration) -// : BoundedBatchBuilderBase, BatchConsumeContext>(batchMaxSize, timeWindowDuration) -// { -// // TODO Batch.DrainToImmutable() -// public override BatchConsumeContext Build() => new(Batch.DrainToImmutable()); -// } -// -// public readonly ImmutableArray> Messages; -// -// public int Count => Messages.Length; -// -// internal BatchConsumeContext(ImmutableArray> messages) -// { -// if (messages.Length == 0) -// throw new ArgumentException("Batch must contain at least one message", nameof(messages)); -// -// Messages = messages; -// } -// -// public BatchConsumeContext Transform(IEnumerable> payload) => new(payload.ToImmutableArray()); -// -// public BatchConsumeContext Transform(IEnumerable batchPayload) => -// Transform(Messages.Zip(batchPayload, (message, payload) => message.Transform(payload))); -// -// public BatchConsumeContext Transform(Func, TOut> transform) => -// // Parallel LINQ?.. -// Transform(Messages.Select(transform)); -// -// public async Task> Transform(Func, Task> transform) -// { -// // TODO AsSpan, to immutable array without allocations -// var messages = await Task.WhenAll(Messages.Select(transform)); -// return Transform(messages); -// } -// -// internal QueueClient Client => Messages[0].Client; -// } + public Task DeleteMessage(CancellationToken ct = default) => Client.DeleteMessage(this, ct); + + public Task Acknowledge(CancellationToken ct = default) => DeleteMessage(ct); + + // public Task ChangeMessageVisibility(TimeSpan visibilityTimeout, CancellationToken ct = default) => + // Client.ChangeMessageVisibility(this, visibilityTimeout, ct); +} diff --git a/src/LocalPost.SqsConsumer/Consumer.cs b/src/LocalPost.SqsConsumer/Consumer.cs new file mode 100644 index 0000000..15044e8 --- /dev/null +++ b/src/LocalPost.SqsConsumer/Consumer.cs @@ -0,0 +1,118 @@ +using Amazon.Runtime; +using Amazon.SQS; +using LocalPost.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; + +namespace LocalPost.SqsConsumer; + +internal sealed class Consumer(string name, ILogger logger, IAmazonSQS sqs, + ConsumerOptions settings, Handler> handler) + : IHostedService, IHealthAwareService, IDisposable +{ + private sealed class ReadinessHealthCheck(Consumer consumer) : IHealthCheck + { + public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => + Task.FromResult(consumer.Ready); + } + + private CancellationTokenSource? _execTokenSource; + private Task? _execution; + private Exception? _execException; + private string? _execExceptionDescription; + + public string Name { get; } = name; + + private HealthCheckResult Ready => (_execTokenSource, _execution, _execException) switch + { + (null, _, _) => HealthCheckResult.Unhealthy("Not started"), + (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), + (not null, null, _) => HealthCheckResult.Degraded("Starting"), + (not null, not null, null) => HealthCheckResult.Healthy("Running"), + (_, _, not null) => HealthCheckResult.Unhealthy(_execExceptionDescription, _execException), + }; + + public IHealthCheck ReadinessCheck => new ReadinessHealthCheck(this); + + private async Task RunConsumerAsync(QueueClient client, CancellationToken execToken) + { + // (Optionally) wait for app start + + try + { + while (!execToken.IsCancellationRequested) + { + var messages = await client.PullMessages(execToken).ConfigureAwait(false); + await Task.WhenAll(messages + .Select(message => new ConsumeContext(client, message, message.Body)) + .Select(context => handler(context, CancellationToken.None).AsTask())) + .ConfigureAwait(false); + } + } + catch (OperationCanceledException e) when (e.CancellationToken == execToken) + { + // logger.LogInformation("SQS consumer shutdown"); + } + catch (AmazonServiceException e) + { + logger.LogCritical(e, "SQS consumer error: {ErrorCode} (see {HelpLink})", e.ErrorCode, e.HelpLink); + (_execException, _execExceptionDescription) = (e, "SQS consumer failed"); + } + catch (Exception e) + { + logger.LogCritical(e, "SQS message handler error"); + (_execException, _execExceptionDescription) = (e, "Message handler failed"); + } + finally + { + CancelExecution(); // Stop other consumers too + } + } + + public async Task StartAsync(CancellationToken ct) + { + if (_execTokenSource is not null) + throw new InvalidOperationException("Service is already started"); + + var execTokenSource = _execTokenSource = new CancellationTokenSource(); + + var client = new QueueClient(logger, sqs, settings); + await client.Connect(ct).ConfigureAwait(false); + + _execution = ObserveExecution(); + return; + + async Task ObserveExecution() + { + var execution = settings.Consumers switch + { + 1 => RunConsumerAsync(client, execTokenSource.Token), + _ => Task.WhenAll(Enumerable + .Range(0, settings.Consumers) + .Select(_ => RunConsumerAsync(client, execTokenSource.Token))) + }; + await execution.ConfigureAwait(false); + // Can happen before the service shutdown, in case of an error + logger.LogInformation("SQS consumer stopped"); + } + } + + // await _execTokenSource.CancelAsync(); // .NET 8+ + private void CancelExecution() => _execTokenSource?.Cancel(); + + public async Task StopAsync(CancellationToken forceShutdownToken) + { + if (_execTokenSource is null) + throw new InvalidOperationException("Service has not been started"); + + logger.LogInformation("Shutting down SQS consumer"); + CancelExecution(); + if (_execution is not null) + await _execution.ConfigureAwait(false); + } + + public void Dispose() + { + _execTokenSource?.Dispose(); + } +} diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs index a2b76a2..591cd0e 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/HealthChecksBuilderEx.cs @@ -4,19 +4,19 @@ namespace LocalPost.SqsConsumer.DependencyInjection; +[PublicAPI] public static class HealthChecksBuilderEx { - // TODO AddSqsConsumersLivenessCheck() — simply for all the registered consumers + public static IHealthChecksBuilder AddSqsConsumer(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) => + builder.Add(HealthChecks.Readiness(name, failureStatus, tags)); - // Check if the same check is added twice?.. + public static IHealthChecksBuilder AddSqsConsumers(this IHealthChecksBuilder builder, + HealthStatus? failureStatus = null, IEnumerable? tags = null) + { + foreach (var name in builder.Services.GetKeysFor().OfType()) + AddSqsConsumer(builder, name, failureStatus, tags); - public static IHealthChecksBuilder AddSqsConsumerLivenessCheck(this IHealthChecksBuilder builder, - string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .Add(HealthChecks.LivenessCheck(name, failureStatus, tags)) - .AddPipelineLivenessCheck(name); - - // public static IHealthChecksBuilder AddSqsBatchConsumerLivenessCheck(this IHealthChecksBuilder builder, - // string name, HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - // .Add(HealthChecks.LivenessCheckForNamed(name, failureStatus, tags)) - // .AddNamedConsumerLivenessCheck>(name); + return builder; + } } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs index 8846e39..8fa53ec 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/ServiceCollectionEx.cs @@ -1,5 +1,3 @@ -using JetBrains.Annotations; -using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; namespace LocalPost.SqsConsumer.DependencyInjection; @@ -13,8 +11,4 @@ public static IServiceCollection AddSqsConsumers(this IServiceCollection service return services; } - - internal static bool TryAddQueueClient(this IServiceCollection services, string name) => - services.TryAddNamedSingleton(name, provider => - ActivatorUtilities.CreateInstance(provider, name)); } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index 0994e15..3efdef2 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -1,5 +1,4 @@ -using System.Collections.Immutable; -using JetBrains.Annotations; +using Amazon.SQS; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; @@ -12,126 +11,34 @@ public sealed class SqsBuilder(IServiceCollection services) public OptionsBuilder Defaults { get; } = services.AddOptions(); /// - /// Default batch consumer pipeline. + /// Add an SQS consumer with a custom message handler. /// - /// Consumer name (also the default queue name). Should be unique in the application. - /// Handler factory. - /// Pipeline options builder. - public OptionsBuilder AddBatchConsumer(string name, - HandlerFactory>> hf) - { - var defaultPipeline = Pipeline - .Create(hf, provider => provider.GetOptions(name)) - .Batch(provider => provider.GetOptions(name)) - .Buffer(provider => provider.GetOptions(name).Prefetch); - - Add(name, defaultPipeline) - .Configure>((options, pipelineOptions) => - options.UpdateFrom(pipelineOptions.Get(name).Consume)); - - return BatchedConsumerFor(name).Configure>((options, globalOptions) => - { - options.Consume.UpdateFrom(globalOptions.Value); - options.Consume.QueueName = name; - }); - } - - /// - /// Default consumer pipeline. - /// - /// Consumer name (also the default queue name). Should be unique in the application. - /// Handler factory. - /// Pipeline options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) - { - var defaultPipeline = Pipeline - .Create(hf, provider => provider.GetOptions(name)) - .Buffer(provider => provider.GetOptions(name).Prefetch); - - Add(name, defaultPipeline) - .Configure>((options, pipelineOptions) => - options.UpdateFrom(pipelineOptions.Get(name).Consume)); - - return ConsumerFor(name).Configure>((options, globalOptions) => - { - options.Consume.UpdateFrom(globalOptions.Value); - options.Consume.QueueName = name; - }); - } - - /// - /// Custom consumer pipeline. - /// - /// Consumer name (also the default queue name). Should be unique in the application. - /// Pipeline registration. + /// Consumer name (should be unique in the application). Also, the default queue name. + /// Message handler factory. /// Consumer options builder. - internal OptionsBuilder Add(string name, PipelineRegistration> pr) + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) { - if (string.IsNullOrEmpty(name)) + if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - if (!services.TryAddQueueClient(name)) - // return ob; // Already added, don't register twice - throw new InvalidOperationException($"SQS consumer {name} is already registered"); - - services.TryAddNamedSingleton(name, provider => new MessageSource( - provider.GetRequiredService(name) + var added = services.TryAddKeyedSingleton(name, (provider, _) => new Consumer(name, + provider.GetLoggerFor(), + provider.GetRequiredService(), + provider.GetOptions(name), + hf(provider) )); - services.AddBackgroundService(name); - var context = services.RegistrationContextFor(name); - pr(context, provider => provider.GetRequiredService(name)); + if (!added) + throw new ArgumentException("Consumer is already registered", nameof(name)); + + services.AddHostedService(provider => provider.GetRequiredKeyedService(name)); - return PipelineFor(name).Configure>((options, globalOptions) => + return OptionsFor(name).Configure>((co, defaults) => { - options.UpdateFrom(globalOptions.Value); - options.QueueName = name; + co.UpdateFrom(defaults.Value); + co.QueueName = name; }); } - public OptionsBuilder PipelineFor(string name) => services.AddOptions(name); - - public OptionsBuilder ConsumerFor(string name) => - services.AddOptions(name); - - public OptionsBuilder BatchedConsumerFor(string name) => - services.AddOptions(name); - - // TODO Health checks - - - - // public OptionsBuilder AddBatchConsumer(string name, HandlerFactory> hf) - // { - // if (string.IsNullOrEmpty(name)) - // throw new ArgumentException("A proper (non empty) name is required", nameof(name)); - // - // if (!services.TryAddQueueClient(name)) - // // return ob; // Already added, don't register twice - // throw new InvalidOperationException("SQS consumer is already registered"); - // - // services.TryAddNamedSingleton(name, provider => - // { - // var options = provider.GetOptions(name); - // - // return new BatchMessageSource(provider.GetRequiredService(name), - // ConsumeContext.BatchBuilder( - // options.BatchMaxSize, TimeSpan.FromMilliseconds(options.BatchTimeWindowMilliseconds))); - // }); - // services.AddBackgroundService(name); - // - // // services.TryAddConsumerGroup, BatchMessageSource>(name, hf, - // // ConsumerOptions.From(o => new ConsumerOptions(o.MaxConcurrency, o.BreakOnException))); - // services.TryAddBackgroundConsumer, BatchMessageSource>(name, hf, provider => - // { - // var options = provider.GetOptions(name); - // return new ConsumerOptions(options.MaxConcurrency, options.BreakOnException); - // }); - // - // return services.AddOptions(name).Configure>((options, commonConfig) => - // { - // options.UpdateFrom(commonConfig.Value); - // options.QueueName = name; - // }); - // } + public OptionsBuilder OptionsFor(string name) => services.AddOptions(name); } diff --git a/src/LocalPost.SqsConsumer/HandlerStackEx.cs b/src/LocalPost.SqsConsumer/HandlerStackEx.cs index c009ca3..504d03c 100644 --- a/src/LocalPost.SqsConsumer/HandlerStackEx.cs +++ b/src/LocalPost.SqsConsumer/HandlerStackEx.cs @@ -1,16 +1,7 @@ using System.Text.Json; -using JetBrains.Annotations; namespace LocalPost.SqsConsumer; -// [PublicAPI] -// public static class PipelineOps -// { -// public static PipelineRegistration Batch(this PipelineRegistration> next, -// ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Map>((stream, _) => -// stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration))); -// } - [PublicAPI] public static class HandlerStackEx { @@ -38,69 +29,24 @@ public static HandlerFactory> Trace(this HandlerFactory>> Trace( - this HandlerFactory>> hf) => - hf.Touch(next => async (batch, ct) => + public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => + hf.Touch(next => async (context, ct) => { - var context = batch.ToList(); // TODO Optimize - // TODO Link distributed transactions from each message - using var activity = Tracing.StartProcessing(context); - try - { - await next(context, ct); - activity?.Success(); - } - catch (Exception e) - { - activity?.Error(e); - throw; - } + await next(context, ct); + await context.Client.DeleteMessage(context, ct); }); - public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => - hf.Touch(next => - async (context, ct) => - { - await next(context, ct); - await context.Client.DeleteMessageAsync(context); // TODO Instrument - }); - - public static HandlerFactory>> Acknowledge( - this HandlerFactory>> hf) => - hf.Touch(next => - async (batch, ct) => - { - var context = batch.ToList(); // TODO Optimize - var client = context.First().Client; - await next(context, ct); - await client.DeleteMessagesAsync(context); - }); - public static HandlerFactory> Deserialize( - this HandlerFactory> handlerStack, Func, T> deserialize) => - handlerStack.Map, ConsumeContext>(next => + this HandlerFactory> hf, Func, T> deserialize) => + hf.Map, ConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct)); - public static HandlerFactory>> Deserialize( - this HandlerFactory>> hf, Func, T> deserialize) => - hf.Map>, IEnumerable>>(next => - async (batch, ct) => await next(batch.Select(context => context.Transform(deserialize)), ct)); - - // public static HandlerFactory> Deserialize( - // this HandlerFactory> handlerStack, Func, Task> deserialize) => - // handlerStack.Map, ConsumeContext>(next => - // async (context, ct) => await next(await context.Transform(deserialize), ct)); - // - // public static HandlerFactory>> Deserialize( - // this HandlerFactory>> hf, Func, Task> deserialize) => - // hf.Map>, IEnumerable>>(next => - // async (context, ct) => await next(await context.Transform(deserialize), ct)); + public static HandlerFactory> Deserialize( + this HandlerFactory> hf, Func, Task> deserialize) => + hf.Map, ConsumeContext>(next => + async (context, ct) => await next(await context.Transform(deserialize), ct)); public static HandlerFactory> DeserializeJson( this HandlerFactory> hf, JsonSerializerOptions? options = null) => hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); - - public static HandlerFactory>> DeserializeJson( - this HandlerFactory>> hf, JsonSerializerOptions? options = null) => - hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index a6d6685..dfedf91 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -44,7 +44,7 @@ - + diff --git a/src/LocalPost.SqsConsumer/MessageSource.cs b/src/LocalPost.SqsConsumer/MessageSource.cs deleted file mode 100644 index cde0901..0000000 --- a/src/LocalPost.SqsConsumer/MessageSource.cs +++ /dev/null @@ -1,99 +0,0 @@ -using System.Runtime.CompilerServices; -using LocalPost.AsyncEnumerable; -using LocalPost.DependencyInjection; - -namespace LocalPost.SqsConsumer; - -internal sealed class MessageSource(QueueClient client) - : IBackgroundService, INamedService, IAsyncEnumerable> -{ - private bool _stopped; - - public string Name => client.Name; - - public async Task StartAsync(CancellationToken ct) => await client.ConnectAsync(ct); - - public Task ExecuteAsync(CancellationToken ct) => Task.CompletedTask; - - public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => - ConsumeAsync(ct).GetAsyncEnumerator(ct); - - public Task StopAsync(CancellationToken ct) - { - _stopped = true; - - return Task.CompletedTask; - } - - private async IAsyncEnumerable> ConsumeAsync( - [EnumeratorCancellation] CancellationToken ct = default) - { - while (!ct.IsCancellationRequested && !_stopped) - foreach (var message in await client.PullMessagesAsync(ct)) - yield return new ConsumeContext(client, message, message.Body); - - ct.ThrowIfCancellationRequested(); - } -} - - - -// internal sealed class MessageSource : MessageSourceBase, IAsyncEnumerable> -// { -// private readonly ConcurrentBuffer> _source; -// -// public MessageSource(QueueClient client, int prefetch) : base(client) -// { -// _source = ConsumeAsync().ToConcurrentBuffer(prefetch); -// } -// -// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); -// -// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => -// _source.GetAsyncEnumerator(ct); -// } -// -// internal sealed class BatchMessageSource : MessageSourceBase, IAsyncEnumerable> -// { -// private readonly ConcurrentBuffer> _source; -// -// // TODO Make a note that Prefetch does not play a role here, with batch processing... -// public BatchMessageSource(QueueClient client, -// BatchBuilderFactory, BatchConsumeContext> factory) : base(client) -// { -// _source = ConsumeAsync().Batch(factory).ToConcurrentBuffer(); -// } -// -// public override async Task ExecuteAsync(CancellationToken ct) => await _source.Run(ct); -// -// public IAsyncEnumerator> GetAsyncEnumerator(CancellationToken ct) => -// _source.GetAsyncEnumerator(ct); -// } -// -// internal abstract class MessageSourceBase(QueueClient client) : IBackgroundService, INamedService -// { -// private bool _stopped; -// -// public string Name => client.Name; -// -// public async Task StartAsync(CancellationToken ct) => await client.ConnectAsync(ct); -// -// public abstract Task ExecuteAsync(CancellationToken ct); -// -// protected async IAsyncEnumerable> ConsumeAsync( -// [EnumeratorCancellation] CancellationToken ct = default) -// { -// while (!ct.IsCancellationRequested && !_stopped) -// foreach (var message in await client.PullMessagesAsync(ct)) -// yield return new ConsumeContext(client, message, message.Body); -// -// ct.ThrowIfCancellationRequested(); -// } -// -// public Task StopAsync(CancellationToken ct) -// { -// _stopped = true; -// -// return Task.CompletedTask; -// } -// } diff --git a/src/LocalPost.SqsConsumer/Middlewares.cs b/src/LocalPost.SqsConsumer/Middlewares.cs deleted file mode 100644 index 4df5539..0000000 --- a/src/LocalPost.SqsConsumer/Middlewares.cs +++ /dev/null @@ -1,35 +0,0 @@ -using System.Collections.Immutable; -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost.SqsConsumer; - -// internal static class Middlewares -// { -// public static HandlerMiddleware, ConsumeContext> Acknowledge(IServiceProvider provider) => -// provider.GetRequiredService().Invoke; -// -// public static HandlerMiddleware, BatchConsumeContext> AcknowledgeBatch( -// IServiceProvider provider) => provider.GetRequiredService().Invoke; -// } -// -// internal sealed class AcknowledgeMiddleware -// { -// private readonly ImmutableDictionary _clients; -// -// public AcknowledgeMiddleware(IEnumerable clients) -// { -// _clients = clients.ToImmutableDictionary(client => client.Name, client => client); -// } -// -// public Handler> Invoke(Handler> next) => async (context, ct) => -// { -// await next(context, ct); -// await _clients[context.ClientName].DeleteMessageAsync(context); -// }; -// -// public Handler> Invoke(Handler> next) => async (context, ct) => -// { -// await next(context, ct); -// await _clients[context.ClientName].DeleteMessagesAsync(context); -// }; -// } diff --git a/src/LocalPost.SqsConsumer/Options.cs b/src/LocalPost.SqsConsumer/Options.cs index 9ce9e10..c106416 100644 --- a/src/LocalPost.SqsConsumer/Options.cs +++ b/src/LocalPost.SqsConsumer/Options.cs @@ -57,6 +57,10 @@ public sealed class EndpointOptions [PublicAPI] public sealed class ConsumerOptions { + [Range(1, ushort.MaxValue)] + // public ushort MaxConcurrency { get; set; } = 10; + public ushort Consumers { get; set; } = 1; + /// /// Time to wait for available messages in the queue. 0 is short pooling, where 1..20 activates long pooling. /// Default is 20. @@ -71,8 +75,11 @@ public sealed class ConsumerOptions public byte WaitTimeSeconds { get; set; } = 20; /// - /// The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, - /// fewer messages might be returned). Valid values: 1 to 10. Default is 1. + /// The maximum number of messages to return. Valid values: 1 to 10. Default is 10. + /// + /// Amazon SQS never returns more messages than this value (however, fewer messages might be returned). + /// + /// All the returned messages will be processed concurrently. /// /// /// Amazon SQS short and long polling @@ -129,51 +136,3 @@ public string? QueueUrl } } } - -[PublicAPI] -public sealed class DefaultPipelineOptions -{ - public ConsumerOptions Consume { get; } = new(); - - [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 10; - - [Range(1, ushort.MaxValue)] - public ushort Prefetch { get; set; } = 10; - - public static implicit operator Pipeline.ConsumerOptions(DefaultPipelineOptions options) => new() - { - MaxConcurrency = options.MaxConcurrency, - BreakOnException = false, - }; -} - -[PublicAPI] -public sealed record DefaultBatchPipelineOptions -{ - public ConsumerOptions Consume { get; } = new(); - - [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 10; - - [Range(1, ushort.MaxValue)] - public ushort Prefetch { get; set; } = 10; - - [Range(1, ushort.MaxValue)] - public ushort BatchMaxSize { get; set; } = 10; - - [Range(1, ushort.MaxValue)] - public int TimeWindowMs { get; set; } = 1_000; - - public static implicit operator Pipeline.ConsumerOptions(DefaultBatchPipelineOptions options) => new() - { - MaxConcurrency = options.MaxConcurrency, - BreakOnException = false, - }; - - public static implicit operator BatchOptions(DefaultBatchPipelineOptions options) => new() - { - MaxSize = options.BatchMaxSize, - TimeWindowDuration = options.TimeWindowMs, - }; -} diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index f8dfb32..2b6fbfa 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -1,25 +1,33 @@ +using Amazon.Runtime; using Amazon.SQS; using Amazon.SQS.Model; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; +using Polly; +using Polly.Retry; namespace LocalPost.SqsConsumer; -internal sealed class QueueClient(ILogger logger, IAmazonSQS sqs, ConsumerOptions options, string name) - : INamedService +internal sealed class QueueClient(ILogger logger, IAmazonSQS sqs, ConsumerOptions options) { - public QueueClient(ILogger logger, IAmazonSQS sqs, IOptionsMonitor options, string name) : - this(logger, sqs, options.Get(name), name) - { - } - - public string Name { get; } = name; - public string QueueName => options.QueueName; private GetQueueAttributesResponse? _queueAttributes; + private readonly ResiliencePipeline _pipeline = new ResiliencePipelineBuilder() + .AddRetry(new RetryStrategyOptions + { + ShouldHandle = new PredicateBuilder() + .Handle(e => e.Retryable is not null), + + Delay = TimeSpan.FromSeconds(1), + MaxRetryAttempts = byte.MaxValue, + BackoffType = DelayBackoffType.Exponential, + + // Initially, aim for an exponential backoff, but after a certain number of retries, set a maximum delay + MaxDelay = TimeSpan.FromMinutes(1), + UseJitter = true + }) + .Build(); + // TODO Use public TimeSpan? MessageVisibilityTimeout => _queueAttributes?.VisibilityTimeout switch { @@ -30,16 +38,16 @@ public QueueClient(ILogger logger, IAmazonSQS sqs, IOptionsMonitor< private string? _queueUrl; private string QueueUrl => _queueUrl ?? throw new InvalidOperationException("SQS queue client is not connected"); - public async Task ConnectAsync(CancellationToken ct) + public async Task Connect(CancellationToken ct) { if (string.IsNullOrEmpty(options.QueueUrl)) // Checking for a possible error in the response would be also good... _queueUrl = (await sqs.GetQueueUrlAsync(options.QueueName, ct)).QueueUrl; - await FetchQueueAttributesAsync(ct); + await FetchQueueAttributes(ct); } - private async Task FetchQueueAttributesAsync(CancellationToken ct) + private async Task FetchQueueAttributes(CancellationToken ct) { try { @@ -56,55 +64,45 @@ private async Task FetchQueueAttributesAsync(CancellationToken ct) } } - public async Task> PullMessagesAsync(CancellationToken ct) + public async Task> PullMessages(CancellationToken ct) => + await _pipeline.ExecuteAsync(PullMessagesCore, ct); + + private async ValueTask> PullMessagesCore(CancellationToken ct) { using var activity = Tracing.StartReceiving(this); - // var attributeNames = EndpointOptions.AllAttributes; // Make configurable, later - // var messageAttributeNames = EndpointOptions.AllMessageAttributes; // Make configurable, later - - // AWS SDK handles network failures, see - // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html - var response = await sqs.ReceiveMessageAsync(new ReceiveMessageRequest + try { - QueueUrl = QueueUrl, - WaitTimeSeconds = options.WaitTimeSeconds, - MaxNumberOfMessages = options.MaxNumberOfMessages, - AttributeNames = options.AttributeNames, - MessageAttributeNames = options.MessageAttributeNames, - }, ct); - - activity?.SetTagsFor(response); - - return response.Messages; - - // TODO Log failures?.. - -// catch (OverLimitException) -// { -// // Log and try again?.. -// } + // AWS SDK handles network failures, see + // https://docs.aws.amazon.com/sdkref/latest/guide/feature-retry-behavior.html + var response = await sqs.ReceiveMessageAsync(new ReceiveMessageRequest + { + QueueUrl = QueueUrl, + WaitTimeSeconds = options.WaitTimeSeconds, + MaxNumberOfMessages = options.MaxNumberOfMessages, + AttributeNames = options.AttributeNames, + MessageAttributeNames = options.MessageAttributeNames, + }, ct); + + activity?.SetTagsFor(response); + + return response.Messages; + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + activity?.Error(e); + throw; + } } - public async Task DeleteMessageAsync(ConsumeContext context) + public async Task DeleteMessage(ConsumeContext context, CancellationToken ct = default) { using var activity = Tracing.StartSettling(context); - await sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle); - - // TODO Log failures?.. - } - - public async Task DeleteMessagesAsync(IReadOnlyCollection> messages) - { - using var activity = Tracing.StartSettling(messages); - - var requests = messages - .Select((message, i) => new DeleteMessageBatchRequestEntry(i.ToString(), message.ReceiptHandle)) - .Chunk(10) - .Select(entries => entries.ToList()); - - await Task.WhenAll(requests.Select(entries => - sqs.DeleteMessageBatchAsync(QueueUrl, entries))); + await sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle, ct); // TODO Log failures?.. } diff --git a/src/LocalPost.SqsConsumer/Tracing.cs b/src/LocalPost.SqsConsumer/Tracing.cs index 6cdd2a2..3852e6f 100644 --- a/src/LocalPost.SqsConsumer/Tracing.cs +++ b/src/LocalPost.SqsConsumer/Tracing.cs @@ -9,7 +9,7 @@ internal static class MessageUtils public static void ExtractTraceField(object? carrier, string fieldName, out string? fieldValue, out IEnumerable? fieldValues) { - fieldValues = default; + fieldValues = null; fieldValue = null; if (carrier is not Message message) return; @@ -96,21 +96,6 @@ static Tracing() return activity; } - public static Activity? StartProcessing(IReadOnlyCollection> context) - { - var client = context.First().Client; - var activity = Source.StartActivity($"{client.QueueName} process", ActivityKind.Consumer); - if (activity is not { IsAllDataRequested: true }) - return activity; - - activity.SetDefaultTags(client); - activity.SetTagsFor(context); - - // TODO Accept distributed tracing headers, per each message... - - return activity; - } - public static Activity? StartSettling(ConsumeContext context) { var activity = Source.StartActivity($"{context.Client.QueueName} settle", ActivityKind.Consumer); @@ -123,19 +108,6 @@ static Tracing() return activity; } - public static Activity? StartSettling(IReadOnlyCollection> context) - { - var client = context.First().Client; - var activity = Source.StartActivity($"{client.QueueName} settle", ActivityKind.Consumer); - if (activity is not { IsAllDataRequested: true }) - return activity; - - activity.SetDefaultTags(client); - activity.SetTagsFor(context); - - return activity; - } - public static Activity? StartReceiving(QueueClient client) { var activity = Source.StartActivity($"{client.QueueName} receive", ActivityKind.Consumer); diff --git a/src/LocalPost.SqsConsumer/globalusings.cs b/src/LocalPost.SqsConsumer/globalusings.cs new file mode 100644 index 0000000..2f865c2 --- /dev/null +++ b/src/LocalPost.SqsConsumer/globalusings.cs @@ -0,0 +1,3 @@ +global using JetBrains.Annotations; +global using System.Diagnostics.CodeAnalysis; +global using Microsoft.Extensions.Logging; diff --git a/src/LocalPost/ActivityEx.cs b/src/LocalPost/ActivityEx.cs index 933ce2e..a59bba2 100644 --- a/src/LocalPost/ActivityEx.cs +++ b/src/LocalPost/ActivityEx.cs @@ -7,6 +7,10 @@ internal static class ActivityEx // See https://github.com/open-telemetry/opentelemetry-dotnet/blob/core-1.8.1/src/OpenTelemetry.Api/Trace/ActivityExtensions.cs#L81-L105 public static Activity? Error(this Activity? activity, Exception ex, bool escaped = true) { + activity?.SetTag("otel.status_code", "ERROR"); + // activity.SetTag("otel.status_description", ex is PostgresException pgEx ? pgEx.SqlState : ex.Message); + activity?.SetTag("otel.status_description", ex.Message); + var tags = new ActivityTagsCollection { { "exception.type", ex.GetType().FullName }, @@ -16,10 +20,6 @@ internal static class ActivityEx }; activity?.AddEvent(new ActivityEvent("exception", tags: tags)); - activity?.SetTag("otel.status_code", "ERROR"); - // activity.SetTag("otel.status_description", ex is PostgresException pgEx ? pgEx.SqlState : ex.Message); - activity?.SetTag("otel.status_description", ex.Message); - return activity; } diff --git a/src/LocalPost/AppHealthSupervisor.cs b/src/LocalPost/AppHealthSupervisor.cs index 49debf7..74c251a 100644 --- a/src/LocalPost/AppHealthSupervisor.cs +++ b/src/LocalPost/AppHealthSupervisor.cs @@ -1,22 +1,22 @@ using System.Collections.Immutable; -using JetBrains.Annotations; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; -using Microsoft.Extensions.Logging; namespace LocalPost; [UsedImplicitly] internal sealed class AppHealthSupervisor(ILogger logger, - HealthCheckService healthChecker, IHostApplicationLifetime appLifetime) : IBackgroundService + HealthCheckService healthChecker, IHostApplicationLifetime appLifetime) : BackgroundService { public TimeSpan CheckInterval { get; init; } = TimeSpan.FromSeconds(1); public int ExitCode { get; init; } = 1; public IImmutableSet Tags { get; init; } = ImmutableHashSet.Empty; - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + private Task Check(CancellationToken ct = default) => Tags.Count == 0 + ? healthChecker.CheckHealthAsync(ct) + : healthChecker.CheckHealthAsync(hcr => Tags.IsSubsetOf(hcr.Tags), ct); - public async Task ExecuteAsync(CancellationToken ct) + protected override async Task ExecuteAsync(CancellationToken ct) { while (!ct.IsCancellationRequested) { @@ -32,10 +32,4 @@ public async Task ExecuteAsync(CancellationToken ct) await Task.Delay(CheckInterval, ct); } } - - private Task Check(CancellationToken ct = default) => Tags.Count == 0 - ? healthChecker.CheckHealthAsync(ct) - : healthChecker.CheckHealthAsync(hcr => Tags.IsSubsetOf(hcr.Tags), ct); - - public Task StopAsync(CancellationToken ct) => Task.CompletedTask; } diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs deleted file mode 100644 index 889f1b5..0000000 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableEx.cs +++ /dev/null @@ -1,15 +0,0 @@ -using System.Collections.Immutable; - -namespace LocalPost.AsyncEnumerable; - -internal static class AsyncEnumerableEx -{ - public static IAsyncEnumerable> Batch(this IAsyncEnumerable source, - int maxSize, TimeSpan timeWindow) => new BatchingAsyncEnumerable(source, maxSize, timeWindow); - - public static IAsyncEnumerable> Batch(this IAsyncEnumerable source, - int maxSize, int timeWindowMs) => Batch(source, maxSize, TimeSpan.FromMilliseconds(timeWindowMs)); - - public static IAsyncEnumerable Merge(this IEnumerable> sources) => - new AsyncEnumerableMerger(sources); -} diff --git a/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs b/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs deleted file mode 100644 index a4c0747..0000000 --- a/src/LocalPost/AsyncEnumerable/AsyncEnumerableMerger.cs +++ /dev/null @@ -1,87 +0,0 @@ -using System.Collections.Immutable; -using System.Diagnostics.CodeAnalysis; -using System.Threading.Channels; - -namespace LocalPost.AsyncEnumerable; - -internal sealed class AsyncEnumerableMerger : IAsyncEnumerable, IDisposable -{ - private readonly ConcurrentSet> _sources; - - public AsyncEnumerableMerger(bool permanent = false) : this(ImmutableArray>.Empty, permanent) - { - } - - public AsyncEnumerableMerger(IEnumerable> sources, bool permanent = false) - { - if (permanent) - // This one IAsyncEnumerable will be there forever, so the wait will be indefinite (even if all other - // sources are completed) - sources = sources.Prepend(Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = false, - SingleWriter = true - }).Reader.ReadAllAsync()); - - _sources = new ConcurrentSet>(sources); - } - - public void Add(IAsyncEnumerable source) => _sources.Add(source); - - [SuppressMessage("ReSharper", "PossibleMultipleEnumeration")] - public async IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct = default) - { - async Task<(IAsyncEnumerable, IAsyncEnumerator, bool)> Wait(IAsyncEnumerable source, IAsyncEnumerator enumerator) => - (source, enumerator, !await enumerator.MoveNextAsync()); - - var sourcesSnapshot = _sources.Elements; - var waits = sourcesSnapshot - .Select(source => Wait(source, source.GetAsyncEnumerator(ct))) - .ToImmutableArray(); - - while (waits.Length > 0) - { - var modificationTrigger = Task.Delay(Timeout.Infinite, _sources.ModificationToken); - var waitTrigger = Task.WhenAny(waits); - - await Task.WhenAny(waitTrigger, modificationTrigger); - - if (waitTrigger.IsCompleted) // New element - { - var completedWait = await waitTrigger; - var (source, enumerator, completed) = await completedWait; - - if (!completed) - { - yield return enumerator.Current; - waits = waits.Replace(completedWait, Wait(source, enumerator)); - } - else - { - waits = waits.Remove(completedWait); - sourcesSnapshot = _sources.Remove(source); - } - } - - // Always check modification trigger explicitly, as both task can complete when the control is back - // (in this case we can miss the trigger completely) - // ReSharper disable once InvertIf - if (modificationTrigger.IsCompleted) - // Wait() _somehow_ can give the control away, so loop until we sure that all the changes are handled - while (sourcesSnapshot != _sources.Elements) - { - var previousSourcesSnapshot = sourcesSnapshot; - sourcesSnapshot = _sources.Elements; - - // ReSharper disable once ForeachCanBeConvertedToQueryUsingAnotherGetEnumerator - foreach (var newSource in sourcesSnapshot.Except(previousSourcesSnapshot)) - waits = waits.Add(Wait(newSource, newSource.GetAsyncEnumerator(ct))); - } - } - } - - public void Dispose() - { - _sources.Dispose(); - } -} diff --git a/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs b/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs deleted file mode 100644 index d6bd370..0000000 --- a/src/LocalPost/AsyncEnumerable/ConcurrentSet.cs +++ /dev/null @@ -1,50 +0,0 @@ -using System.Collections; -using System.Collections.Immutable; - -namespace LocalPost.AsyncEnumerable; - -internal sealed class ConcurrentSet(IEnumerable sources) : IEnumerable, IDisposable -{ - private readonly object _modificationLock = new(); - private ImmutableHashSet? _elements = sources.ToImmutableHashSet(); - private CancellationTokenSource _modificationTriggerSource = new(); - - public ImmutableHashSet Elements => _elements ?? throw new ObjectDisposedException(nameof(ConcurrentSet)); - - public CancellationToken ModificationToken => _modificationTriggerSource.Token; - - private ImmutableHashSet ChangeSources(Func, ImmutableHashSet> change) - { - ImmutableHashSet changedSources; - CancellationTokenSource trigger; - lock (_modificationLock) - { - changedSources = change(Elements); - if (changedSources == _elements) - return _elements; // Nothing has changed - - _elements = changedSources; - trigger = _modificationTriggerSource; - _modificationTriggerSource = new CancellationTokenSource(); - } - - trigger.Cancel(); // Notify about the modification - trigger.Dispose(); - - return changedSources; - } - - public ImmutableHashSet Add(T source) => ChangeSources(sources => sources.Add(source)); - - public ImmutableHashSet Remove(T source) => ChangeSources(sources => sources.Remove(source)); - - public IEnumerator GetEnumerator() => Elements.GetEnumerator(); - - IEnumerator IEnumerable.GetEnumerator() => GetEnumerator(); - - public void Dispose() - { - _modificationTriggerSource.Dispose(); - _elements = null; - } -} diff --git a/src/LocalPost/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue.cs index 80ae0b7..4900a82 100644 --- a/src/LocalPost/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue.cs @@ -1,20 +1,22 @@ +using System.Threading.Channels; +using LocalPost.BackgroundQueue; + namespace LocalPost; /// -/// Entrypoint for the background queue, inject it where you need to enqueue items. +/// Background queue publisher. /// -/// -public interface IBackgroundQueue +/// Payload type. +public interface IBackgroundQueue { - // TODO Custom exception when closed?.. Or just return true/false?.. ValueTask Enqueue(T payload, CancellationToken ct = default); -} - + ChannelWriter> Writer { get; } +} public delegate Task BackgroundJob(CancellationToken ct); /// -/// Just a convenient alias for . +/// Just a convenient alias for queue. /// public interface IBackgroundJobQueue : IBackgroundQueue; diff --git a/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs index 1f48f2d..a9520f9 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundJobQueue.cs @@ -1,11 +1,12 @@ -using JetBrains.Annotations; +using System.Threading.Channels; namespace LocalPost.BackgroundQueue; // Just a proxy to the actual queue, needed to expose IBackgroundJobQueue [UsedImplicitly] -internal sealed class BackgroundJobQueue(BackgroundQueue queue) - : IBackgroundJobQueue +internal sealed class BackgroundJobQueue(IBackgroundQueue queue) : IBackgroundJobQueue { public ValueTask Enqueue(BackgroundJob payload, CancellationToken ct = default) => queue.Enqueue(payload, ct); + + public ChannelWriter> Writer => queue.Writer; } diff --git a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs index 9e52afa..6bbbe43 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -1,130 +1,125 @@ using System.Threading.Channels; -using LocalPost.AsyncEnumerable; using LocalPost.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; +using Microsoft.Extensions.Hosting; namespace LocalPost.BackgroundQueue; -internal static class BackgroundQueue +internal sealed class BackgroundQueue(ILogger> logger, QueueOptions settings, + Handler> handler) : IBackgroundQueue, IHostedService, IHealthAwareService, IDisposable { - // public static BackgroundQueue> Create(Options options) => - // Create>(options, reader => reader.ReadAllAsync()); - - // public static BackgroundQueue>> CreateBatched( - // BatchedOptions options) => - // Create>>(options, - // reader => reader - // .ReadAllAsync() - // .Batch(ct => - // new BoundedBatchBuilder>(options.BatchMaxSize, options.BatchTimeWindow, ct)), - // true); - - public static BackgroundQueue Create(IServiceProvider provider) => - Create(provider.GetOptions>()); - - // To make the pipeline linear (single consumer), just add .ToConcurrent() to the end - public static BackgroundQueue Create(QueueOptions options) + private sealed class ReadinessHealthCheck(BackgroundQueue queue) : IHealthCheck { - // var channel = options.MaxSize switch - // { - // not null => Channel.CreateBounded>(new BoundedChannelOptions(options.MaxSize.Value) - // { - // SingleReader = options.MaxConcurrency == 1, // TODO Communicate SingleReader hint somehow... - // SingleWriter = false, // Accept it in options? Generally, we do not know how the queue will be used - // FullMode = options.FullMode, - // }), - // _ => Channel.CreateUnbounded>(new UnboundedChannelOptions - // { - // SingleReader = options.MaxConcurrency == 1, - // SingleWriter = false, // We do not know how it will be used - // }) - // }; - var channel = options.Channel switch + public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => + Task.FromResult(queue.Ready); + } + + private CancellationTokenSource? _execTokenSource; + private Task? _execution; + private Exception? _execException; + private string? _execExceptionDescription; + + private HealthCheckResult Ready => (_execTokenSource, _execution, _execException) switch + { + (null, _, _) => HealthCheckResult.Unhealthy("Not started"), + (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), + (not null, null, _) => HealthCheckResult.Degraded("Starting"), + (not null, not null, null) => HealthCheckResult.Healthy("Running"), + (_, _, not null) => HealthCheckResult.Unhealthy(_execExceptionDescription, _execException), + }; + + public IHealthCheck ReadinessCheck => new ReadinessHealthCheck(this); + + private readonly Channel> _queue = settings.BufferSize switch + { + null => Channel.CreateUnbounded>(new UnboundedChannelOptions { - BoundedChannelOptions channelOpt => Channel.CreateBounded>(channelOpt), - UnboundedChannelOptions channelOpt => Channel.CreateUnbounded>(channelOpt), - _ => throw new InvalidOperationException("Unknown channel options") - }; + SingleReader = settings.MaxConcurrency == 1, + SingleWriter = settings.SingleProducer, + }), + _ => Channel.CreateBounded>(new BoundedChannelOptions(settings.BufferSize.Value) + { + FullMode = settings.FullMode, + SingleReader = settings.MaxConcurrency == 1, + SingleWriter = settings.SingleProducer, + }) + }; - return new BackgroundQueue(channel, TimeSpan.FromMilliseconds(options.CompletionDelay)); - } -} + public ValueTask Enqueue(T payload, CancellationToken ct = default) => _queue.Writer.WriteAsync(payload, ct); -internal sealed class BackgroundQueue(Channel> channel, TimeSpan completionDelay) - : IBackgroundQueue, IAsyncEnumerable>, IBackgroundService -{ - public async IAsyncEnumerator> GetAsyncEnumerator(CancellationToken cancellationToken) + public ChannelWriter> Writer => _queue.Writer; + + private async Task RunAsync(CancellationToken execToken) { - var reader = channel.Reader; - while (await reader.WaitToReadAsync(cancellationToken).ConfigureAwait(false)) - while (reader.TryRead(out var item)) - yield return item; + // (Optionally) wait for app start + + try + { + await foreach (var message in _queue.Reader.ReadAllAsync(execToken)) + await handler(message, CancellationToken.None); + } + catch (OperationCanceledException e) when (e.CancellationToken == execToken) + { + // logger.LogInformation("Background queue consumer shutdown"); + } + catch (Exception e) + { + logger.LogCritical(e, "Background queue message handler error"); + (_execException, _execExceptionDescription) = (e, "Message handler failed"); + } + finally + { + CloseChannel(); // Stop other consumers too + } } - // Track full or not later - public ValueTask Enqueue(T item, CancellationToken ct = default) => - channel.Writer.WriteAsync(new ConsumeContext(item), ct); + private void CloseChannel() => _queue.Writer.Complete(); - public bool IsClosed { get; private set; } + private void CancelExecution() => _execTokenSource?.Cancel(); - private async ValueTask CompleteAsync(CancellationToken ct = default) + public async Task StartAsync(CancellationToken ct) { - if (IsClosed) - return; + if (_execTokenSource is not null) + throw new InvalidOperationException("Service is already started"); - if (completionDelay.TotalMilliseconds > 0) - await Task.Delay(completionDelay, ct); + var execTokenSource = _execTokenSource = new CancellationTokenSource(); + _execution = ObserveExecution(); + await Task.Yield(); + return; - channel.Writer.Complete(); - IsClosed = true; + async Task ObserveExecution() + { + var execution = settings.MaxConcurrency switch + { + 1 => RunAsync(execTokenSource.Token), + _ => Task.WhenAll(Enumerable.Range(0, settings.MaxConcurrency).Select(_ => RunAsync(execTokenSource.Token))) + }; + await execution.ConfigureAwait(false); + // Can happen before the service shutdown, in case of an error + logger.LogInformation("Background queue stopped"); + } } - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; + public async Task StopAsync(CancellationToken forceShutdownToken) + { + if (_execTokenSource is null) + throw new InvalidOperationException("Service has not been started"); - public Task ExecuteAsync(CancellationToken ct) => Task.CompletedTask; + logger.LogInformation("Shutting down background queue"); + try + { + await settings.CompletionTrigger(forceShutdownToken).ConfigureAwait(false); + } + finally + { + CloseChannel(); + } + if (_execution is not null) + await _execution.ConfigureAwait(false); + } - public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); + public void Dispose() + { + _execTokenSource?.Dispose(); + } } - - - -// internal static partial class BackgroundQueue -// { -// public static readonly string Name = "BackgroundQueue/" + Reflection.FriendlyNameOf(); -// } -// -// internal sealed class BackgroundQueue( -// ChannelWriter> input, -// IAsyncEnumerable pipeline, -// TimeSpan completionDelay) -// : IAsyncEnumerable, IBackgroundService, IBackgroundQueue -// { -// public IAsyncEnumerator GetAsyncEnumerator(CancellationToken ct) => pipeline.GetAsyncEnumerator(ct); -// -// // Track full or not later -// public ValueTask Enqueue(T item, CancellationToken ct = default) => -// input.WriteAsync(new ConsumeContext(item), ct); -// -// public bool IsClosed { get; private set; } -// -// private async ValueTask CompleteAsync(CancellationToken ct = default) -// { -// if (IsClosed) -// return; -// -// if (completionDelay.TotalMilliseconds > 0) -// await Task.Delay(completionDelay, ct); -// -// input.Complete(); -// IsClosed = true; -// } -// -// public Task StartAsync(CancellationToken ct) => Task.CompletedTask; -// -// public Task ExecuteAsync(CancellationToken ct) => pipeline switch -// { -// ConcurrentBuffer concurrent => concurrent.Run(ct), -// _ => Task.CompletedTask -// }; -// -// public async Task StopAsync(CancellationToken forceExitToken) => await CompleteAsync(forceExitToken); -// } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 17443d7..8a5861c 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -1,5 +1,3 @@ -using System.Collections.Immutable; -using JetBrains.Annotations; using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; @@ -9,102 +7,49 @@ namespace LocalPost.BackgroundQueue.DependencyInjection; [PublicAPI] public class BackgroundQueuesBuilder(IServiceCollection services) { - public OptionsBuilder> AddJobQueue() + public OptionsBuilder> AddDefaultJobQueue() => AddJobQueue( + HandlerStack.For(async (job, ct) => await job(ct).ConfigureAwait(false)) + .Scoped() + .UsePayload() + .Trace() + .LogExceptions() + ); + + // TODO Open later + internal OptionsBuilder> AddJobQueue(HandlerFactory> hf) { services.TryAddSingleton(); services.TryAddSingletonAlias(); - // TODO Allow to configure the handler somehow - return AddQueue( - HandlerStack.For(async (job, ct) => await job(ct)) - .Scoped() - .UsePayload() - .Trace() - ); + return AddQueue(hf); } - public OptionsBuilder> AddBatchedQueue( - HandlerFactory>> hf) + private OptionsBuilder> AddQueue(HandlerFactory> hf) { - var defaultPipeline = Pipeline - .Create(hf, provider => provider.GetOptions>()) - .Buffer(1) // To avoid buffering for each concurrent IAsyncEnumerable consumer - .Batch(provider => provider.GetOptions>()); + // TODO Check if a non-keyed service should be added - Add(defaultPipeline) - .Configure>((options, pipelineOptions) => - options.UpdateFrom(pipelineOptions.Queue)); - - return BatchedQueueFor(); - } - - // // THandler has to be registered by the user - // public OptionsBuilder> AddQueue() where THandler : IHandler => AddQueue( - // // A way to configure the handler?.. - // HandlerStack.From() - // .Scoped() - // .UsePayload() - // .Trace() - // ); - - public OptionsBuilder> AddQueue(HandlerFactory> hf) - { - var defaultPipeline = Pipeline - .Create(hf, provider => provider.GetOptions>()); - - Add(defaultPipeline) - .Configure>((options, pipelineOptions) => - options.UpdateFrom(pipelineOptions.Queue)); - - return QueueFor(); + return AddQueue(Options.DefaultName, hf); } - internal OptionsBuilder> Add(PipelineRegistration> pr) + private OptionsBuilder> AddQueue(string name, HandlerFactory> hf) { - if (!services.TryAddSingletonAlias, BackgroundQueue>()) - // return ob; // Already added, don't register twice + if (!services.TryAddSingletonAlias, BackgroundQueue>(name)) throw new InvalidOperationException( - $"{Reflection.FriendlyNameOf>()}> is already registered."); - - services.TryAddSingleton(BackgroundQueue.Create); - services.AddBackgroundService>(); + $"{Reflection.FriendlyNameOf>(name)}> is already registered."); - var context = services.RegistrationContextFor>(); - pr(context, provider => provider.GetRequiredService>()); - // services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => - // { - // var options = provider.GetOptions>(); - // return new ConsumerOptions(options.MaxConcurrency, false); - // }); + services.TryAddKeyedSingleton(name, (provider, _) => new BackgroundQueue( + provider.GetLoggerFor>(), + provider.GetOptions>(name), + hf(provider) + )); + services.AddHostedService(provider => provider.GetRequiredKeyedService>(name)); - return PipelineFor(); + return QueueFor(name); } - public OptionsBuilder> PipelineFor() => services.AddOptions>(); - - public OptionsBuilder> QueueFor() => - services.AddOptions>(); - - public OptionsBuilder> BatchedQueueFor() => - services.AddOptions>(); - - + public OptionsBuilder> QueueFor() => + services.AddOptions>(); - // public OptionsBuilder> AddQueue(HandlerFactory> hf) - // { - // if (!services.TryAddSingletonAlias, BackgroundQueue>>()) - // // return ob; // Already added, don't register twice - // throw new InvalidOperationException($"BackgroundQueue<{Reflection.FriendlyNameOf()}> is already registered."); - // - // services.TryAddSingleton(provider => BackgroundQueue.Create(provider.GetOptions>())); - // services.AddBackgroundService>>(); - // - // services.TryAddBackgroundConsumer, BackgroundQueue>>(hf, provider => - // { - // var options = provider.GetOptions>(); - // return new ConsumerOptions(options.MaxConcurrency, false); - // }); - // - // return services.AddOptions>(); - // } + public OptionsBuilder> QueueFor(string name) => + services.AddOptions>(name); } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs deleted file mode 100644 index 8ee2993..0000000 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecks.cs +++ /dev/null @@ -1,18 +0,0 @@ -using JetBrains.Annotations; -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Diagnostics.HealthChecks; - -namespace LocalPost.BackgroundQueue.DependencyInjection; - - -[PublicAPI] -public static class HealthChecksBuilderEx -{ - // Not needed, as there is no complex logic inside. It's either working, or dead. -// public static IHealthChecksBuilder AddBackgroundQueueReadinessCheck(... - - public static IHealthChecksBuilder AddBackgroundQueueLivenessCheck(this IHealthChecksBuilder builder, - HealthStatus? failureStatus = default, IEnumerable? tags = default) => builder - .AddPipelineLivenessCheck>(); -} diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecksBuilderEx.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecksBuilderEx.cs new file mode 100644 index 0000000..97fd455 --- /dev/null +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/HealthChecksBuilderEx.cs @@ -0,0 +1,27 @@ +using LocalPost.DependencyInjection; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.BackgroundQueue.DependencyInjection; + +[PublicAPI] +public static class HealthChecksBuilderEx +{ + public static IHealthChecksBuilder AddBackgroundQueue(this IHealthChecksBuilder builder, + string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) => + builder.Add(HealthChecks.Readiness>(name, failureStatus, tags)); + + public static IHealthChecksBuilder AddKafkaConsumers(this IHealthChecksBuilder builder, + HealthStatus? failureStatus = null, IEnumerable? tags = null) + { + var services = builder.Services + .Where(service => service is { IsKeyedService: true, ServiceType.IsGenericType: true } && + service.ServiceType.GetGenericTypeDefinition() == typeof(BackgroundQueue<>)) + .Select(service => (service.ServiceType, (string)service.ServiceKey!)); + + foreach (var (bqService, name) in services) + builder.Add(HealthChecks.Readiness(bqService, name, failureStatus, tags)); + + return builder; + } +} diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs index 15c0280..ac7c2af 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/ServiceCollectionEx.cs @@ -1,4 +1,3 @@ -using JetBrains.Annotations; using Microsoft.Extensions.DependencyInjection; namespace LocalPost.BackgroundQueue.DependencyInjection; diff --git a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs index dd8c562..3d862df 100644 --- a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs +++ b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs @@ -1,15 +1,7 @@ using System.Diagnostics; -using JetBrains.Annotations; namespace LocalPost.BackgroundQueue; -// public static class PipelineOps -// { -// public static PipelineRegistration Batch(this PipelineRegistration> next, -// ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => next.Map>((stream, _) => -// stream.Batch(() => new BoundedBatchBuilder(batchMaxSize, timeWindowDuration))); -// }w - [PublicAPI] public static class HandlerStackEx { diff --git a/src/LocalPost/BackgroundQueue/Options.cs b/src/LocalPost/BackgroundQueue/Options.cs index 5357fa4..a05d8b8 100644 --- a/src/LocalPost/BackgroundQueue/Options.cs +++ b/src/LocalPost/BackgroundQueue/Options.cs @@ -3,101 +3,29 @@ namespace LocalPost.BackgroundQueue; -// // For the DI container, to distinguish between different queues -// public sealed record QueueOptions : QueueOptions; - -// // For the DI container, to distinguish between different queues -// public sealed record BatchedOptions : BatchedOptions; -// -// public record BatchedOptions : Options -// { -// [Range(1, ushort.MaxValue)] public ushort BatchMaxSize { get; set; } = 10; -// -// // TODO Rename to BatchTimeWindowMs -// [Range(1, ushort.MaxValue)] public int BatchTimeWindowMilliseconds { get; set; } = 1_000; -// -// internal TimeSpan BatchTimeWindow => TimeSpan.FromMilliseconds(BatchTimeWindowMilliseconds); -// } - -/// -/// Background queue configuration. -/// -public sealed class QueueOptions +public sealed record QueueOptions { - // /// - // /// How to handle new messages when the queue (channel) is full. Default is to drop the oldest message (to not - // /// block the producer). - // /// - // public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; - // - // /// - // /// Maximum queue (channel) length, after which writes are blocked (see ). - // /// Default is unlimited. - // /// - // [Range(1, ushort.MaxValue)] - // public ushort? MaxSize { get; set; } = null; - - public ChannelOptions Channel { get; set; } = new UnboundedChannelOptions(); - /// - /// How long to wait before closing the queue (channel) on app shutdown. Default is 1 second. + /// How many messages to process concurrently. Default is 10. /// - public ushort CompletionDelay { get; set; } = 1_000; // Milliseconds - - // /// - // /// How many messages to process concurrently. Default is 10. - // /// - // [Required] - // [Range(1, ushort.MaxValue)] - // public ushort MaxConcurrency { get; set; } = 10; - - internal void UpdateFrom(QueueOptions options) - { - // FullMode = options.FullMode; - // MaxSize = options.MaxSize; - Channel = options.Channel; - CompletionDelay = options.CompletionDelay; - // MaxConcurrency = options.MaxConcurrency; - } -} - -public sealed class DefaultPipelineOptions -{ - public QueueOptions Queue { get; } = new(); - - [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 10; - - public static implicit operator Pipeline.ConsumerOptions(DefaultPipelineOptions options) => new() - { - MaxConcurrency = options.MaxConcurrency, - BreakOnException = false, - }; -} - -public sealed class DefaultBatchPipelineOptions -{ - // public QueueOptions> Queue { get; } = new(); - public QueueOptions Queue { get; } = new(); - + [Required] [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 10; + public ushort MaxConcurrency { get; set; } = 50; - [Range(1, ushort.MaxValue)] - public ushort BatchMaxSize { get; set; } = 10; + [Range(1, int.MaxValue)] + public int? BufferSize { get; set; } = 1000; - [Range(1, ushort.MaxValue)] - public int TimeWindowMs { get; set; } = 1_000; + /// + /// How to handle new messages when the underlying channel is full. Default is to drop the oldest message + /// (to not block the producer). + /// + public BoundedChannelFullMode FullMode { get; set; } = BoundedChannelFullMode.DropOldest; - public static implicit operator Pipeline.ConsumerOptions(DefaultBatchPipelineOptions options) => new() - { - MaxConcurrency = options.MaxConcurrency, - BreakOnException = false, - }; + public bool SingleProducer { get; set; } = false; - public static implicit operator BatchOptions(DefaultBatchPipelineOptions options) => new() - { - MaxSize = options.BatchMaxSize, - TimeWindowDuration = options.TimeWindowMs, - }; + /// + /// How long to wait before closing the queue (channel) on app shutdown. Default is 1 second. + /// + public Func CompletionTrigger { get; set; } = ct => Task.Delay(1000, ct); + // public ushort CompletionDelay { get; set; } = 1_000; // Milliseconds } diff --git a/src/LocalPost/BackgroundQueueConsumer.cs b/src/LocalPost/BackgroundQueueConsumer.cs deleted file mode 100644 index 87ce7de..0000000 --- a/src/LocalPost/BackgroundQueueConsumer.cs +++ /dev/null @@ -1,146 +0,0 @@ -using LocalPost.DependencyInjection; - -namespace LocalPost; - -internal interface IStreamRunner : IBackgroundService, IAssistantService; - -internal sealed class StreamRunner(IAsyncEnumerable stream, StreamProcessor consume) : IStreamRunner -{ - public required AssistedService Target { get; init; } - - private Task? _exec; - private CancellationTokenSource? _execCts; - - public Task StartAsync(CancellationToken ct) => Task.CompletedTask; - - public Task ExecuteAsync(CancellationToken ct) - { - if (_exec is not null) - return _exec; - - var execCts = _execCts = new CancellationTokenSource(); - return _exec = consume(stream, execCts.Token); - } - - // Process the rest (leftovers). Common cases: - // - SQS: message source (fetcher) has been stopped, so we just need to process leftovers from the channel - // - Kafka: message source (consumer) has been stopped, so we just need to process leftovers from the channel - // - Background (job) queue: hope that the producers are stopped, so no new messages should appear, so we - // just need to process leftovers from the queue - public Task StopAsync(CancellationToken ct) - { - if (_exec is null) - return Task.CompletedTask; - - ct.Register(() => _execCts?.Cancel()); - return _exec; - - // Cleanup the state?.. - } -} - - - -// internal sealed record ConsumerOptions(ushort MaxConcurrency, bool BreakOnException); -// -// internal static class Queue -// { -// internal interface IConsumer : IBackgroundService, IServiceFor; -// -// internal sealed class Consumer( -// ILogger> logger, -// IAsyncEnumerable queue, -// Handler handler, -// ushort maxConcurrency) -// : IConsumer //, IDisposable -// { -// public required string Target { get; init; } -// -// public bool BreakOnException { get; init; } = false; -// // private bool _broken = false; -// -// private Task? _exec; -// private CancellationTokenSource? _execCts; -// -// private async Task Execute(CancellationToken execCt) -// { -// // using var loopCts = new CancellationTokenSource(); -// using var loopCts = CancellationTokenSource.CreateLinkedTokenSource(execCt); -// // using var cts = CancellationTokenSource.CreateLinkedTokenSource(execCt, loopCts.Token); -// var loopCt = loopCts.Token; -// -// await Task.WhenAll(Enumerable.Range(1, maxConcurrency) -// .Select(_ => Loop())); -// -// return; -// -// async Task Loop() -// { -// try -// { -// await foreach (var message in queue.WithCancellation(loopCt)) -// await Handle(message); -// } -// catch (OperationCanceledException) when (loopCt.IsCancellationRequested) -// { -// // It is either: -// // - app shutdown timeout (force shutdown) -// // - handler exception (when BreakOnException is set) -// // Just break the loop -// } -// } -// -// async Task Handle(T message) -// { -// try -// { -// await handler(message, execCt); -// } -// catch (OperationCanceledException) when (execCt.IsCancellationRequested) -// { -// throw; // App shutdown timeout (force shutdown) -// } -// catch (Exception e) -// { -// if (BreakOnException) -// { -// // Break the loop (all the concurrent executions of it) -// // ReSharper disable once AccessToDisposedClosure -// loopCts.Cancel(); -// // Push it up, so the service is marked as unhealthy -// throw; -// } -// -// logger.LogError(e, "Failed to handle a message"); -// } -// } -// } -// -// public Task StartAsync(CancellationToken ct) => Task.CompletedTask; -// -// public Task ExecuteAsync(CancellationToken ct) -// { -// if (_exec is not null) -// return _exec; -// -// var execCts = _execCts = new CancellationTokenSource(); -// return _exec = Execute(execCts.Token); -// } -// -// // Process the rest (leftovers). Common cases: -// // - SQS: message source (fetcher) has been stopped, so we just need to process leftovers from the channel -// // - Kafka: message source (consumer) has been stopped, so we just need to process leftovers from the channel -// // - Background (job) queue: hope that the producers are stopped, so no new messages should appear, so we -// // just need to process leftovers from the queue -// public Task StopAsync(CancellationToken ct) -// { -// if (_exec is null) -// return Task.CompletedTask; -// -// ct.Register(() => _execCts?.Cancel()); -// return _exec; -// -// // Cleanup the state?.. -// } -// } -// } diff --git a/src/LocalPost/ConcurrentHostedServices.cs b/src/LocalPost/ConcurrentHostedServices.cs deleted file mode 100644 index b62e157..0000000 --- a/src/LocalPost/ConcurrentHostedServices.cs +++ /dev/null @@ -1,309 +0,0 @@ -using System.Collections.Immutable; -using System.Diagnostics.CodeAnalysis; -using Microsoft.Extensions.Diagnostics.HealthChecks; -using Microsoft.Extensions.Hosting; -using static Microsoft.Extensions.Diagnostics.HealthChecks.HealthCheckResult; - -namespace LocalPost; - -internal interface IBackgroundService -{ - Task StartAsync(CancellationToken ct); - - Task ExecuteAsync(CancellationToken ct); - - Task StopAsync(CancellationToken ct); -} - -internal sealed class BackgroundServicesMonitor(IReadOnlyCollection services) - : IBackgroundServiceMonitor -{ - public bool Started => services.All(s => s.Started); - public bool Running => services.All(s => s.Running); - public Task Stopped => Task.WhenAll(services.Select(s => s.Stopped)); - - public bool Crashed => services.Any(s => s.Crashed); - - public Exception? Exception => services.Select(s => s.Exception).FirstOrDefault(); -} - -internal interface IBackgroundServiceMonitor -{ - public sealed class LivenessCheck : IHealthCheck - { - public required IBackgroundServiceMonitor Service { get; init; } - - public Task CheckHealthAsync(HealthCheckContext context, - CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); - - private HealthCheckResult CheckHealth(HealthCheckContext _) => Service switch - { - { Crashed: true } => Unhealthy("Crashed", Service.Exception), -// { Running: false } => Degraded("Not (yet) running"), - // Started and running - _ => Healthy("Alive") - }; - } - - // Readiness like "ready to handle requests" is the same a liveness check here. At least at the moment. - public sealed class ReadinessCheck : IHealthCheck - { - public required IBackgroundServiceMonitor Service { get; init; } - - public Task CheckHealthAsync(HealthCheckContext context, - CancellationToken cancellationToken = default) => Task.FromResult(CheckHealth(context)); - - private HealthCheckResult CheckHealth(HealthCheckContext _) => Service switch - { - { Running: true } => Healthy("Running"), - _ => Unhealthy("Not (yet) running") - }; - } - - public bool Started { get; } - - public bool Running { get; } - - public Task Stopped { get; } - - [MemberNotNullWhen(true, nameof(Exception))] - public bool Crashed { get; } - - public Exception? Exception { get; } -} - -internal sealed class BackgroundServices : IConcurrentHostedService, IDisposable -{ - public readonly IReadOnlyCollection Runners; - - public BackgroundServices(IEnumerable services, IHostApplicationLifetime appLifetime) - { - Runners = services.Select(s => new BackgroundServiceRunner(s, appLifetime)).ToArray(); - } - - public Task StartAsync(CancellationToken cancellationToken) => - Task.WhenAll(Runners.Select(s => s.StartAsync(cancellationToken))); - - public Task StopAsync(CancellationToken cancellationToken) => - Task.WhenAll(Runners.Select(s => s.StopAsync(cancellationToken))); - - public void Dispose() - { - foreach (var service in Runners) - service.Dispose(); - } -} - -internal sealed class BackgroundServiceRunner(IBackgroundService service, IHostApplicationLifetime appLifetime) - : IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable -{ - private Task? _start; - private CancellationTokenSource? _executionCts; - private Task? _execution; - private Task? _executionWrapper; - - private readonly TaskCompletionSource _stopped = new(); - - public IBackgroundService Service => service; - - public bool Starting => _start is not null && !_start.IsCompleted; - - // StartedSuccessfully?.. - public bool Started => _start is not null && _start.Status == TaskStatus.RanToCompletion; - - public bool Running => _execution is not null && !_execution.IsCompleted; - - public Task Stopped => _stopped.Task; - - public bool StartCrashed => _start is not null && _start.Status == TaskStatus.Faulted; - public bool RunCrashed => _execution is not null && _execution.Status == TaskStatus.Faulted; - public bool Crashed => StartCrashed || RunCrashed; - - // TODO Test - public Exception? Exception => (StartCrashed ? _start?.Exception : _execution?.Exception)?.InnerException; - - private async Task WaitAppStartAsync(CancellationToken ct) - { - try - { - // Wait until all other services have started - await Task.Delay(Timeout.Infinite, appLifetime.ApplicationStarted).WaitAsync(ct); - } - catch (OperationCanceledException e) when (e.CancellationToken == appLifetime.ApplicationStarted) - { - // Startup completed, continue - } - } - - public async Task StartAsync(CancellationToken ct) - { - // All the services are started from the same (main) thread, so there are no races - if (_start is not null) - throw new InvalidOperationException("Service is already started"); - - await (_start = service.StartAsync(ct)); - - // Start execution in the background... - _executionCts = new CancellationTokenSource(); - _executionWrapper = ExecuteAsync(_executionCts.Token); - } - - private async Task ExecuteAsync(CancellationToken ct) - { - try - { - await WaitAppStartAsync(ct); - await (_execution = service.ExecuteAsync(ct)); - } - catch (OperationCanceledException) when (ct.IsCancellationRequested) - { - // Normal case, we trigger this token ourselves when stopping the service - } - catch (Exception) - { - // Otherwise it's an error, but swallow it silently (this method is called in "fire and forget" mode, not - // awaited, so any unhandled exception will arrive in TaskScheduler.UnobservedTaskException, which is not - // what we want). - // See also: https://stackoverflow.com/a/59300076/322079. - } - } - - public async Task StopAsync(CancellationToken forceExitToken) - { - if (_executionCts is null) - // Or simply ignore and return?.. - throw new InvalidOperationException("Service has not been started"); - - try - { - if (!_executionCts.IsCancellationRequested) - _executionCts.Cancel(); // Signal cancellation to the service - - if (_executionWrapper is not null) - // Wait until the execution completes or the app is forced to exit - await _executionWrapper.WaitAsync(forceExitToken); - - await service.StopAsync(forceExitToken); - } - finally - { - _stopped.TrySetResult(true); - } - } - - public void Dispose() - { - _executionCts?.Dispose(); - } -} - -// internal sealed class BackgroundServiceRunner(T service, IHostApplicationLifetime appLifetime) -// : IServiceFor, IConcurrentHostedService, IBackgroundServiceMonitor, IDisposable -// where T : class, IBackgroundService -// { -// private Task? _start; -// private CancellationTokenSource? _executionCts; -// private Task? _execution; -// private Task? _executionWrapper; -// -// public string Target => service switch -// { -// INamedService namedService => namedService.Name, -// IServiceFor serviceForNamed => serviceForNamed.Target, -// _ => Reflection.FriendlyNameOf() -// }; -// -// public bool Starting => _start is not null && !_start.IsCompleted; -// -// // StartedSuccessfully?.. -// public bool Started => _start is not null && _start.Status == TaskStatus.RanToCompletion; -// -// public bool Running => _execution is not null && !_execution.IsCompleted; -// -// public bool StartCrashed => _start is not null && _start.Status == TaskStatus.Faulted; -// public bool RunCrashed => _execution is not null && _execution.Status == TaskStatus.Faulted; -// public bool Crashed => StartCrashed || RunCrashed; -// -// // TODO Test -// public Exception? Exception => (StartCrashed ? _start?.Exception : _execution?.Exception)?.InnerException; -// -// private async Task WaitAppStartAsync(CancellationToken ct) -// { -// try -// { -// // Wait until all other services have started -// await Task.Delay(Timeout.Infinite, appLifetime.ApplicationStarted).WaitAsync(ct); -// } -// catch (OperationCanceledException e) when (e.CancellationToken == appLifetime.ApplicationStarted) -// { -// // Startup completed, continue -// } -// } -// -// public async Task StartAsync(CancellationToken ct) -// { -// // All the services are started from the same (main) thread, so there are no races -// if (_start is not null) -// throw new InvalidOperationException("Service is already started"); -// -// await (_start = service.StartAsync(ct)); -// -// // Start execution in the background... -// _executionCts = new CancellationTokenSource(); -// _executionWrapper = ExecuteAsync(_executionCts.Token); -// } -// -// private async Task ExecuteAsync(CancellationToken ct) -// { -// try -// { -// await WaitAppStartAsync(ct); -// await (_execution = service.ExecuteAsync(ct)); -// } -// catch (OperationCanceledException) when (ct.IsCancellationRequested) -// { -// // Normal case, we trigger this token ourselves when stopping the service -// } -// catch (Exception) -// { -// // Otherwise it's an error, but swallow it silently (this method is called in "fire and forget" mode, not -// // awaited, so any unhandled exception will arrive in TaskScheduler.UnobservedTaskException, which is not -// // what we want). -// // See also: https://stackoverflow.com/a/59300076/322079. -// } -// } -// -// public async Task StopAsync(CancellationToken forceExitToken) -// { -// if (_executionCts is null) -// // Or simply ignore and return?.. -// throw new InvalidOperationException("Service has not been started"); -// -// if (!_executionCts.IsCancellationRequested) -// _executionCts.Cancel(); // Signal cancellation to the service -// -// if (_executionWrapper is not null) -// // Wait until the execution completes or the app is forced to exit -// await _executionWrapper.WaitAsync(forceExitToken); -// -// await service.StopAsync(forceExitToken); -// } -// -// public void Dispose() -// { -// _executionCts?.Dispose(); -// } -// } - -internal interface IConcurrentHostedService : IHostedService; - -internal sealed class ConcurrentHostedServices(IEnumerable services) : IHostedService -{ - private readonly ImmutableArray _services = services.ToImmutableArray(); - - public Task StartAsync(CancellationToken cancellationToken) => - Task.WhenAll(_services.Select(c => c.StartAsync(cancellationToken))); - - public Task StopAsync(CancellationToken cancellationToken) => - Task.WhenAll(_services.Select(c => c.StopAsync(cancellationToken))); -} diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index 841ce30..0e4c80b 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -1,11 +1,15 @@ using System.Collections.Immutable; -using JetBrains.Annotations; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; namespace LocalPost.DependencyInjection; +internal interface IHealthAwareService +{ + IHealthCheck ReadinessCheck { get; } +} + [PublicAPI] public static partial class ServiceCollectionEx { @@ -20,105 +24,23 @@ public static IServiceCollection AddAppHealthSupervisor(this IServiceCollection Tags = tags?.ToImmutableHashSet() ?? ImmutableHashSet.Empty }); - services.AddBackgroundService(); + services.AddHostedService(); return services; } } -internal static class HealthChecksBuilderEx -{ - internal static IHealthChecksBuilder AddPipelineLivenessCheck(this IHealthChecksBuilder builder, - HealthStatus? failureStatus = default, IEnumerable? tags = default) - { - var check = HealthChecks.PipelineLivenessCheckFor(failureStatus, tags); - // if (name is not null) - // check.Name = name; - - return builder.Add(check); - } - - internal static IHealthChecksBuilder AddPipelineLivenessCheck(this IHealthChecksBuilder builder, string name, - HealthStatus? failureStatus = default, IEnumerable? tags = default) - where T : INamedService - { - var check = HealthChecks.PipelineLivenessCheckFor(name, failureStatus, tags); - - return builder.Add(check); - } -} - -internal static class HealthChecks +internal static partial class HealthChecks { - public static HealthCheckRegistration LivenessCheck( - HealthStatus? failureStatus = null, IEnumerable? tags = null) - where T : class, IBackgroundService => - new(Reflection.FriendlyNameOf(), // Can be overwritten later - provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetBackgroundServiceRunner() }, - failureStatus, // Can be overwritten later - tags); - - public static HealthCheckRegistration LivenessCheck(string name, - HealthStatus? failureStatus = null, IEnumerable? tags = null) - where T : class, IBackgroundService, INamedService => - new(name, // Can be overwritten later - provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetBackgroundServiceRunner(name) }, - failureStatus, // Can be overwritten later - tags); - - public static HealthCheckRegistration ReadinessCheck(string name, + public static HealthCheckRegistration Readiness(string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) - where T : class, IBackgroundService, INamedService => - new(name, // Can be overwritten later - provider => new IBackgroundServiceMonitor.ReadinessCheck - { Service = provider.GetBackgroundServiceRunner(name) }, - failureStatus, // Can be overwritten later - tags); - - // public static HealthCheckRegistration LivenessCheckFor(string target, - // HealthStatus? failureStatus = null, IEnumerable? tags = null) - // where T : class, IBackgroundService, IAssistantService => - // new(target, // Can be overwritten later - // provider => new IBackgroundServiceMonitor.LivenessCheck - // { Service = provider.GetBackgroundServiceRunnerFor(target) }, - // failureStatus, // Can be overwritten later - // tags); - // - // public static HealthCheckRegistration ReadinessCheckFor(string target, - // HealthStatus? failureStatus = null, IEnumerable? tags = null) - // where T : class, IBackgroundService, IAssistantService => - // new(target, // Can be overwritten later - // provider => new IBackgroundServiceMonitor.ReadinessCheck - // { Service = provider.GetBackgroundServiceRunnerFor(target) }, - // failureStatus, // Can be overwritten later - // tags); + where T : IHealthAwareService => + Readiness(typeof(T), name, failureStatus, tags); - public static HealthCheckRegistration PipelineLivenessCheckFor( + public static HealthCheckRegistration Readiness(Type bqService, string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) => - // TODO Make it like "MessageSource:pipeline"... - new(Reflection.FriendlyNameOf(), // Can be overwritten later - provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetPipelineMonitorFor() }, - failureStatus, // Can be overwritten later - tags); - - public static HealthCheckRegistration PipelineLivenessCheckFor(string name, - HealthStatus? failureStatus = null, IEnumerable? tags = null) - where T : INamedService => - // TODO Make it like "MessageSource:pipeline:name"... - new(Reflection.FriendlyNameOf(name), // Can be overwritten later - provider => new IBackgroundServiceMonitor.LivenessCheck - { Service = provider.GetPipelineMonitorFor(name) }, + new(name, // Can be overwritten later + provider => ((IHealthAwareService)provider.GetRequiredKeyedService(bqService, name)).ReadinessCheck, failureStatus, // Can be overwritten later tags); - - // public static HealthCheckRegistration PipelineReadinessCheckFor(string name, - // HealthStatus? failureStatus = null, IEnumerable? tags = null) => - // new(AssistedService.From(), // Can be overwritten later - // provider => new IBackgroundServiceMonitor.ReadinessCheck - // { Service = provider.GetPipelineMonitorFor(name) }, - // failureStatus, // Can be overwritten later - // tags); } diff --git a/src/LocalPost/DependencyInjection/IAssistantService.cs b/src/LocalPost/DependencyInjection/IAssistantService.cs deleted file mode 100644 index 510da1d..0000000 --- a/src/LocalPost/DependencyInjection/IAssistantService.cs +++ /dev/null @@ -1,28 +0,0 @@ -namespace LocalPost.DependencyInjection; - -public readonly record struct AssistedService -{ - private readonly Type _type; - - private readonly string? _name; - - private AssistedService(Type type, string? name = null) - { - _type = type; - _name = name; - } - - internal static AssistedService From() => new(typeof(T)); - - internal static AssistedService From(string name) where T : INamedService => new(typeof(T), name); - - public static implicit operator string(AssistedService service) => service.ToString(); - - public override string ToString() => Reflection.FriendlyNameOf(_type, _name); -} - -internal interface IAssistantService -{ - // string Target { get; } - AssistedService Target { get; } -} diff --git a/src/LocalPost/DependencyInjection/INamedService.cs b/src/LocalPost/DependencyInjection/INamedService.cs deleted file mode 100644 index d54f873..0000000 --- a/src/LocalPost/DependencyInjection/INamedService.cs +++ /dev/null @@ -1,34 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost.DependencyInjection; - -internal interface INamedService -{ - string Name { get; } -} - -// internal sealed class NamedServiceDescriptor : ServiceDescriptor -// { -// public static NamedServiceDescriptor Singleton(string name, Func iFactory) -// where TService : class, INamedService => -// new(typeof(TService), name, iFactory, ServiceLifetime.Singleton); -// -// public string Name { get; init; } -// -// public NamedServiceDescriptor(Type sType, string name, Type iType, ServiceLifetime lifetime) : -// base(sType, iType, lifetime) -// { -// Name = name; -// } -// -// public NamedServiceDescriptor(Type sType, string name, object instance) : base(sType, instance) -// { -// Name = name; -// } -// -// public NamedServiceDescriptor(Type sType, string name, Func factory, -// ServiceLifetime lifetime) : base(sType, factory, lifetime) -// { -// Name = name; -// } -// } diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs b/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs deleted file mode 100644 index 40bd93d..0000000 --- a/src/LocalPost/DependencyInjection/ServiceCollectionEx.cs +++ /dev/null @@ -1,67 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; - -namespace LocalPost.DependencyInjection; - -public static partial class ServiceCollectionEx -{ - internal static RegistrationContext RegistrationContextFor(this IServiceCollection services) => - new(services, AssistedService.From()); - - internal static RegistrationContext RegistrationContextFor(this IServiceCollection services, string name) - where T : INamedService => - new(services, AssistedService.From(name)); - - // internal static bool TryAddBackgroundConsumer(this IServiceCollection services, string name, - // HandlerFactory hf, Func of) - // where TQ : IAsyncEnumerable, INamedService - // { - // if (!services.TryAddNamedSingleton(name, CreateConsumer)) - // return false; - // - // services.AddBackgroundService>(name); - // - // return true; - // - // Queue.NamedConsumer CreateConsumer(IServiceProvider provider) - // { - // var options = of(provider); - // var handler = hf(provider); - // - // return new Queue.NamedConsumer( - // provider.GetRequiredService>>(), - // provider.GetRequiredService(name), handler, options.MaxConcurrency) - // { - // BreakOnException = options.BreakOnException - // }; - // } - // } - // - // internal static bool TryAddBackgroundConsumer(this IServiceCollection services, - // HandlerFactory hf, Func of) - // where TQ : IAsyncEnumerable - // { - // if (!services.TryAddSingleton(CreateConsumer)) - // return false; - // - // services.AddBackgroundService>(); - // - // return true; - // - // Queue.Consumer CreateConsumer(IServiceProvider provider) - // { - // var options = of(provider); - // var handler = hf(provider); - // - // return new Queue.Consumer( - // provider.GetRequiredService>>(), - // provider.GetRequiredService(), handler, options.MaxConcurrency) - // { - // BreakOnException = options.BreakOnException - // }; - // } - // } - - // Just register a background service directly - // internal static IServiceCollection AddBackgroundPipeline(this IServiceCollection services, string target, - // IAsyncEnumerable stream, PipelineConsumer consume) => ... -} diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs index 466a823..2870147 100644 --- a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs @@ -4,83 +4,16 @@ namespace LocalPost.DependencyInjection; internal static class ServiceCollectionTools { -// public static void AddBackgroundServiceFor(this IServiceCollection services, string name) -// where T : class, IBackgroundService, IServiceFor -// { -// services.AddConcurrentHostedServices(); -// -// var added = services.TryAddNamedSingleton>(name, provider => -// new BackgroundServiceRunner(provider.GetRequiredService(name), -// provider.GetRequiredService())); -// if (!added) -// return; -// -// services.AddSingletonAlias>(name); -// } -// -// public static void AddBackgroundService(this IServiceCollection services, string name) -// where T : class, IBackgroundService, INamedService -// { -// services.AddConcurrentHostedServices(); -// -// var added = services.TryAddNamedSingleton>(name, provider => -// new BackgroundServiceRunner(provider.GetRequiredService(name), -// provider.GetRequiredService())); -// if (!added) -// return; -// -// services.AddSingletonAlias>(name); -// } -// -// public static void AddBackgroundService(this IServiceCollection services) -// where T : class, IBackgroundService -// { -// services.AddConcurrentHostedServices(); -// -// // We DO expect that this service is registered by the user... -// // services.AddSingleton(); -// // services.AddSingleton(); -// -// var added = services.TryAddSingleton>(provider => -// new BackgroundServiceRunner(provider.GetRequiredService(), -// provider.GetRequiredService())); -// if (!added) -// return; -// -// services.AddSingletonAlias>(); -// -// -// // FIXME Remove and check -// // services.AddSingleton(provider => -// // provider.GetRequiredService>()); -// } - - public static void AddBackgroundService(this IServiceCollection services, - Func factory) => - services.AddConcurrentHostedServices().AddSingleton(factory); - - public static void AddBackgroundService(this IServiceCollection services) - where T : class, IBackgroundService => - services.AddConcurrentHostedServices().AddSingletonAlias(); - - public static void AddBackgroundService(this IServiceCollection services, string name) - where T : class, IBackgroundService, INamedService => - services.AddConcurrentHostedServices().AddSingletonAlias(name); - - public static IServiceCollection AddConcurrentHostedServices(this IServiceCollection services) - { - if (!services.TryAddSingleton()) - return services; - - return services - .AddHostedService() - .AddSingletonAlias(); - } - - public static bool TryAddNamedSingleton(this IServiceCollection services, string name, - Func factory) - where TService : class, INamedService => - services.TryAdd(ServiceDescriptor.KeyedSingleton(name, factory)); + public static IEnumerable GetKeysFor(this IServiceCollection services) => services + // See https://github.com/dotnet/runtime/issues/95789#issuecomment-2274223124 + .Where(service => service.IsKeyedService && service.ServiceType == typeof(T)) + .Select(service => service.ServiceKey); + + public static bool TryAddKeyedSingleton(this IServiceCollection services, object key, + Func factory) + // where TService : class, INamedService => + where TService : class => + services.TryAdd(ServiceDescriptor.KeyedSingleton(key, factory)); public static bool TryAddSingleton(this IServiceCollection services) where TService : class => services.TryAdd(ServiceDescriptor.Singleton()); @@ -116,20 +49,18 @@ public static IServiceCollection AddSingletonAlias(th where TImplementation : class, TService => services.AddSingleton(provider => provider.GetRequiredService()); - public static IServiceCollection AddSingletonAlias(this IServiceCollection services, - string name) - where TService : class - where TImplementation : class, TService, INamedService => - services.AddSingleton(provider => provider.GetRequiredService(name)); + public static IServiceCollection AddSingletonAlias(this IServiceCollection services, object key) + where TAlias : class + where TService : class, TAlias => + services.AddKeyedSingleton(key, (provider, _) => provider.GetRequiredKeyedService(key)); - public static bool TryAddSingletonAlias(this IServiceCollection services) - where TService : class - where TImplementation : class, TService => - services.TryAddSingleton(provider => provider.GetRequiredService()); + public static bool TryAddSingletonAlias(this IServiceCollection services) + where TAlias : class + where TService : class, TAlias => + services.TryAddSingleton(provider => provider.GetRequiredService()); - public static bool TryAddSingletonAlias(this IServiceCollection services, - string name) - where TService : class - where TImplementation : class, TService, INamedService => - services.TryAddSingleton(provider => provider.GetRequiredService(name)); + public static bool TryAddSingletonAlias(this IServiceCollection services, object key) + where TAlias : class + where TService : class, TAlias => + services.TryAddKeyedSingleton(key, (provider, _) => provider.GetRequiredKeyedService(key)); } diff --git a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs index 518e35e..de178c1 100644 --- a/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs +++ b/src/LocalPost/DependencyInjection/ServiceProviderLookups.cs @@ -1,19 +1,10 @@ using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; internal static class ServiceProviderLookups { - public static T GetRequiredService(this IServiceProvider provider, string name) - where T : INamedService => - provider.GetServices().First(service => service.Name == name); - - // public static T GetRequiredServiceFor(this IServiceProvider provider, string target) - // where T : IAssistantService => - // provider.GetServices().First(service => service.Target == target); - public static T GetOptions(this IServiceProvider provider) where T : class => provider.GetRequiredService>().Value; @@ -22,61 +13,4 @@ public static T GetOptions(this IServiceProvider provider, string name) where public static ILogger GetLoggerFor(this IServiceProvider provider) => provider.GetRequiredService>(); - - public static BackgroundServiceRunner GetBackgroundServiceRunner(this IServiceProvider provider) - where T : IBackgroundService => - provider.GetRequiredService().Runners - .First(runner => runner.Service is T); - - public static BackgroundServiceRunner GetBackgroundServiceRunner(this IServiceProvider provider, - string name) - where T : IBackgroundService, INamedService => - provider.GetRequiredService().Runners - .First(runner => runner.Service is T s && s.Name == name); - - // public static BackgroundServiceRunner GetBackgroundServiceRunnerFor(this IServiceProvider provider, - // string target) - // where T : IBackgroundService, IAssistantService => - // provider.GetRequiredService().Runners - // .First(runner => runner.Service is T s && s.Target == target); - - public static IBackgroundServiceMonitor GetPipelineMonitorFor(this IServiceProvider provider) - { - var target = AssistedService.From(); - var services = provider.GetRequiredService(); - var runners = services.Runners - .Where(runner => runner.Service is IStreamRunner pr && pr.Target == target) - .ToArray(); - - return new BackgroundServicesMonitor(runners); - } - - public static IBackgroundServiceMonitor GetPipelineMonitorFor(this IServiceProvider provider, string name) - where T : INamedService - { - var target = AssistedService.From(name); - var services = provider.GetRequiredService(); - var runners = services.Runners - .Where(runner => runner.Service is IStreamRunner pr && pr.Target == target) - .ToArray(); - - return new BackgroundServicesMonitor(runners); - } - - public static IEnumerable GetPipelineRunnersFor(this IServiceProvider provider) - { - var target = AssistedService.From(); - return provider.GetServices() - .OfType() - .Where(runner => runner.Target == target); - } - - public static IEnumerable GetPipelineRunnersFor(this IServiceProvider provider, string name) - where T : INamedService - { - var target = AssistedService.From(name); - return provider.GetServices() - .OfType() - .Where(runner => runner.Target == target); - } } diff --git a/src/LocalPost/Handler.cs b/src/LocalPost/Handler.cs index bd291f7..7071d06 100644 --- a/src/LocalPost/Handler.cs +++ b/src/LocalPost/Handler.cs @@ -1,30 +1,10 @@ -using LocalPost.DependencyInjection; -using Microsoft.Extensions.DependencyInjection; - namespace LocalPost; -internal readonly record struct RegistrationContext(IServiceCollection Services, AssistedService Target); - -// TODO Make internal -internal delegate Task StreamProcessor(IAsyncEnumerable stream, CancellationToken ct); - -internal delegate IAsyncEnumerable PipelineFactory(IServiceProvider provider); - -internal delegate void PipelineRegistration(RegistrationContext services, PipelineFactory source); - - - -internal delegate IAsyncEnumerable PipelineMiddleware(IAsyncEnumerable source, - CancellationToken ct = default); - - - public delegate ValueTask Handler(T context, CancellationToken ct); public delegate Handler HandlerFactory(IServiceProvider provider); - public delegate Handler HandlerMiddleware(Handler next); // Too narrow use case @@ -37,9 +17,3 @@ public interface IHandler { ValueTask InvokeAsync(TOut payload, CancellationToken ct); } - -// Too narrow use case -// public interface IHandlerMiddleware -// { -// Handler Invoke(Handler next); -// } diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index 74e0251..446b7ff 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -1,11 +1,4 @@ -using System.Collections.Immutable; -using System.Runtime.CompilerServices; -using System.Threading.Channels; -using JetBrains.Annotations; -using LocalPost.AsyncEnumerable; -using LocalPost.DependencyInjection; using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; namespace LocalPost; @@ -23,253 +16,3 @@ public static class HandlerStack public static HandlerFactory From() where THandler : IHandler => provider => provider.GetRequiredService().InvokeAsync; } - - - -[PublicAPI] -public static class Pipeline -{ - public sealed record ConsumerOptions(ushort MaxConcurrency = 1, bool BreakOnException = false); - - internal sealed class Consumer( - ILogger> logger, - Handler handler, - ushort maxConcurrency = 1, - bool breakOnException = false) - { - public async Task Consume(IAsyncEnumerable queue, CancellationToken execCt) - { - // using var loopCts = new CancellationTokenSource(); - using var loopCts = CancellationTokenSource.CreateLinkedTokenSource(execCt); - // using var cts = CancellationTokenSource.CreateLinkedTokenSource(execCt, loopCts.Token); - var loopCt = loopCts.Token; - - await Task.WhenAll(Enumerable.Range(1, maxConcurrency) - .Select(_ => Loop())); - - return; - - async Task Loop() - { - try - { - await foreach (var message in queue.WithCancellation(loopCt)) - await Handle(message); - } - catch (OperationCanceledException) when (loopCt.IsCancellationRequested) - { - // It is either: - // - app shutdown timeout (force shutdown) - // - handler exception (when BreakOnException is set) - // Just break the loop - } - } - - async Task Handle(T message) - { - try - { - await handler(message, execCt); - } - catch (OperationCanceledException) when (execCt.IsCancellationRequested) - { - throw; // App shutdown timeout (force shutdown) - } - catch (Exception e) - { - if (breakOnException) - { - // Break the loop (all the concurrent executions of it) - // ReSharper disable once AccessToDisposedClosure - loopCts.Cancel(); - // Push it up, so the service is marked as unhealthy - throw; - } - - logger.LogError(e, "Failed to handle a message"); - } - } - } - } - - internal static PipelineRegistration Create(HandlerFactory hf, - ushort maxConcurrency, bool breakOnException) => - Create(hf, _ => new ConsumerOptions(maxConcurrency, breakOnException)); - - internal static PipelineRegistration Create(HandlerFactory hf, - Func config) => (context, pf) => - context.Services.AddBackgroundService(provider => - { - var stream = pf(provider); - var (maxConcurrency, breakOnException) = config(provider); - var consumer = new Consumer( - provider.GetRequiredService>>(), - hf(provider), - maxConcurrency, - breakOnException); - - return new StreamRunner(stream, consumer.Consume) - { - Target = context.Target, - }; - }); -} - -[PublicAPI] -internal static class PipelineOps -{ - public static PipelineRegistration Where(this PipelineRegistration next, - Func pred) - { - return next.Map(Filter); - - async IAsyncEnumerable Filter(IAsyncEnumerable source, [EnumeratorCancellation] CancellationToken ct) - { - await foreach (var item in source.WithCancellation(ct)) - if (pred(item)) - yield return item; - } - } - - // TODO Option with a service from the DI provider (add IPipelineMiddleware interface) - public static PipelineRegistration Map(this PipelineRegistration next, - PipelineMiddleware middleware) => - (services, pf) => next(services, pf.Map(middleware)); - - public static PipelineFactory Map(this PipelineFactory pf, - PipelineMiddleware middleware) => provider => - { - var source = pf(provider); - return middleware(source); - }; - - private sealed class SharedBuffer(Func config) - { - private Channel? _buffer; - - public Channel GetOrCreate(IServiceProvider provider) - { - if (_buffer is not null) - return _buffer; - - var capacity = config(provider); - return _buffer = Channel.CreateBounded(new BoundedChannelOptions(capacity) - { - FullMode = BoundedChannelFullMode.Wait, - // This is the point in most of the cases, like batching, to have a simple source reader to a buffer, - // so that buffer can be read by multiple consumers - SingleReader = false, - SingleWriter = true, - }); - - } - } - - public static PipelineRegistration Buffer(this PipelineRegistration next, ushort capacity) => - next.Buffer(_ => capacity); - - public static PipelineRegistration Buffer(this PipelineRegistration next, - Func config) - { - var sharedBuffer = new SharedBuffer(config); - - return (context, source) => - { - var (services, target) = context; - - services.AddBackgroundService(provider => - { - // Freeze (resolve) the current pipeline - var stream = source(provider); - // And drain it to the channel, in the background - return new StreamRunner(stream, BufferWriter(provider)) - { - Target = target, - }; - }); - - // Create a new pipeline, from the channel - next(context, provider => - { - var buffer = sharedBuffer.GetOrCreate(provider); - return buffer.Reader.ReadAllAsync(); - }); - }; - - StreamProcessor BufferWriter(IServiceProvider provider) - { - var buffer = sharedBuffer.GetOrCreate(provider); - - return async (source, ct) => - { - try - { - await foreach (var item in source.WithCancellation(ct)) - await buffer.Writer.WriteAsync(item, ct); - } - finally - { - buffer.Writer.Complete(); - } - }; - } - } - - // public static PipelineRegistration Buffer(this PipelineRegistration next, int capacity = 1) - // { - // var buffer = Channel.CreateBounded(new BoundedChannelOptions(capacity) - // { - // FullMode = BoundedChannelFullMode.Wait, - // SingleReader = false, // Configure somehow... - // SingleWriter = true, - // }); - // - // return (context, source) => - // { - // var (services, target) = context; - // - // services.AddBackgroundService(provider => - // { - // // Freeze (resolve) the current pipeline - // var stream = source(provider); - // // And drain it to the channel, in the background - // return new PipelineRunner(stream, BufferWriter(provider)) - // { - // Target = target, - // }; - // }); - // - // // Create a new pipeline, from the channel - // next(context, provider => - // { - // var buffer = sharedBuffer.GetOrCreate(provider); - // return buffer.Reader.ReadAllAsync(); - // }); - // }; - // - // async Task WriteToBuffer(IAsyncEnumerable source, CancellationToken ct) - // { - // try - // { - // await foreach (var item in source.WithCancellation(ct)) - // await buffer.Writer.WriteAsync(item, ct); - // } - // finally - // { - // buffer.Writer.Complete(); - // } - // } - // } - - public static PipelineRegistration Batch(this PipelineRegistration> next, - ushort batchMaxSize = 10, int timeWindowDuration = 1_000) => - next.Batch(_ => new BatchOptions(batchMaxSize, timeWindowDuration)); - - public static PipelineRegistration Batch(this PipelineRegistration> next, - Func config) => (context, source) => next(context, provider => - { - var stream = source(provider); - var (batchMaxSize, timeWindowDuration) = config(provider); - return stream.Batch(batchMaxSize, timeWindowDuration); - }); -} diff --git a/src/LocalPost/HandlerStackEx.cs b/src/LocalPost/HandlerStackOps.cs similarity index 65% rename from src/LocalPost/HandlerStackEx.cs rename to src/LocalPost/HandlerStackOps.cs index 2725fac..8aa7e75 100644 --- a/src/LocalPost/HandlerStackEx.cs +++ b/src/LocalPost/HandlerStackOps.cs @@ -1,18 +1,9 @@ -using JetBrains.Annotations; -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Hosting; - namespace LocalPost; [PublicAPI] -public static partial class HandlerStackEx +public static class HandlerStackOps { - // Better use a lambda in place, see Scoped() middleware - // public static HandlerFactory Map(this HandlerFactory hf, - // HandlerFactoryMiddleware middleware) => middleware(hf); - - // Just resolve it manually, it's one line longer, same cognitive load or even less, - // and one additional type less + // Just resolve it manually, it's one line longer, same cognitive load or even less, and one additional type less // public static HandlerFactory Map(this HandlerFactory hf, // HandlerMiddlewareFactory middlewareFactory) => provider => // { @@ -22,7 +13,7 @@ public static partial class HandlerStackEx // return m(h); // }; - // Too narrow use case, but makes the Map() method inconvenient to use + // Too narrow use case, and makes Map() inconvenient to use // public static HandlerFactory Map(this HandlerFactory hf, // Func> middlewareFactory) => hf.Map(provider => // middlewareFactory(provider).Invoke); @@ -32,6 +23,9 @@ public static partial class HandlerStackEx // var handler = hf(provider); // return middlewareFactory(provider).Invoke(handler); // }; + // public static HandlerFactory Map(this HandlerFactory hf, + // where T : IHandlerMiddleware => hf.Map(provider => + // ActivatorUtilities.CreateInstance(provider).Invoke); public static HandlerFactory Map(this HandlerFactory hf, HandlerMiddleware middleware) => provider => @@ -43,13 +37,6 @@ public static HandlerFactory Map(this HandlerFactory hf, public static HandlerFactory Touch(this HandlerFactory hf, HandlerMiddleware middleware) => hf.Map(middleware); - // No need, just use a lambda in place - // public static HandlerFactory Map(this HandlerFactory hf, - // where T : IHandlerMiddleware => hf.Map(provider => - // ActivatorUtilities.CreateInstance(provider).Invoke); - // - // public static HandlerFactory Scoped(this HandlerFactory hf) => hf.Map(ScopedHandler.Wrap); - public static HandlerFactory Dispose(this HandlerFactory hf) where T : IDisposable => hf.Touch(next => async (context, ct) => { @@ -84,27 +71,4 @@ public static HandlerFactory SkipWhen(this HandlerFactory hf, Func ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => provider => - { - var appLifetime = provider.GetRequiredService(); - var next = hf(provider); - - return async (context, ct) => - { - try - { - await next(context, ct); - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; - } - catch - { - appLifetime.StopApplication(); - Environment.ExitCode = exitCode; - } - }; - }; } diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index 8ea7f14..8ab7ce7 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -52,11 +52,16 @@ + + + + + - + @@ -67,6 +72,9 @@ + + <_Parameter1>$(MSBuildProjectName).Flow + <_Parameter1>$(MSBuildProjectName).SqsConsumer diff --git a/src/LocalPost/Middlewares.cs b/src/LocalPost/Middlewares.cs index 1e25980..afb659c 100644 --- a/src/LocalPost/Middlewares.cs +++ b/src/LocalPost/Middlewares.cs @@ -1,16 +1,75 @@ using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; namespace LocalPost; public static partial class HandlerStackEx { - // public static HandlerFactory LogErrors(this HandlerFactory hf) => hf.Map(provider => - // ActivatorUtilities.CreateInstance>(provider)); + /// + /// Handle exceptions and log them, to not break the consumer loop. + /// + /// Handler factory to wrap. + /// Handler's payload type. + /// Wrapped handler factory. + public static HandlerFactory LogExceptions(this HandlerFactory hf) => provider => + { + var logger = provider.GetRequiredService>(); + var next = hf(provider); + + return async (context, ct) => + { + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + logger.LogError(e, "Unhandled exception while processing a message"); + } + }; + }; + /// + /// Create a DI scope for every message and resolve the handler from it. + /// + /// Handler factory to wrap. + /// Handler's payload type. + /// Wrapped handler factory. public static HandlerFactory Scoped(this HandlerFactory hf) => provider => + new ScopedHandler(provider.GetRequiredService(), hf).InvokeAsync; + + /// + /// Shutdown the whole app on error. + /// + /// Handler factory to wrap. + /// Process exit code. + /// Handler's payload type. + /// Wrapped handler factory. + public static HandlerFactory ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => provider => { - var scopeFactory = provider.GetRequiredService(); - return new ScopedHandler(scopeFactory, hf).InvokeAsync; + var appLifetime = provider.GetRequiredService(); + var next = hf(provider); + + return async (context, ct) => + { + try + { + await next(context, ct); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch + { + appLifetime.StopApplication(); + Environment.ExitCode = exitCode; + } + }; }; } @@ -27,36 +86,3 @@ public async ValueTask InvokeAsync(T payload, CancellationToken ct) await handler(payload, ct); } } - -// Too narrow use case in the first place, also easier to implement using a lambda -// internal class ErrorLoggingHandler(ILogger logger) : IHandlerMiddleware -// { -// public Handler Invoke(Handler next) => async (context, ct) => -// { -// try -// { -// await next(context, ct); -// } -// catch (OperationCanceledException e) when (e.CancellationToken == ct) -// { -// throw; -// } -// catch (Exception e) -// { -// logger.LogError(e, "Unhandled exception while processing a message"); -// } -// }; -// } - -// TODO Just add it as an example, also using Polly -//[PublicAPI] -//public static class Middlewares -//{ -// public static Middleware Timeout(TimeSpan timeout) => next => async (context, ct) => -// { -// using var cts = CancellationTokenSource.CreateLinkedTokenSource(ct); -// cts.CancelAfter(timeout); -// -// await next(context, cts.Token); -// }; -//} diff --git a/src/LocalPost/Options.cs b/src/LocalPost/Options.cs deleted file mode 100644 index 2141de4..0000000 --- a/src/LocalPost/Options.cs +++ /dev/null @@ -1,3 +0,0 @@ -namespace LocalPost; - -public sealed record BatchOptions(int MaxSize = 10, int TimeWindowDuration = 1_000); diff --git a/src/LocalPost/QueuePublisher.cs b/src/LocalPost/QueuePublisher.cs deleted file mode 100644 index 0150c76..0000000 --- a/src/LocalPost/QueuePublisher.cs +++ /dev/null @@ -1,8 +0,0 @@ -namespace LocalPost; - -// TODO Remove?.. -public interface IQueuePublisher -{ - // TODO Custom exception when closed?.. Or just return true/false?.. - ValueTask Enqueue(T item, CancellationToken ct = default); -} diff --git a/src/LocalPost/Reflection.cs b/src/LocalPost/Reflection.cs index 913bea4..4f48d08 100644 --- a/src/LocalPost/Reflection.cs +++ b/src/LocalPost/Reflection.cs @@ -1,21 +1,15 @@ -using System.Diagnostics.CodeAnalysis; -using LocalPost.DependencyInjection; - -namespace LocalPost; +namespace LocalPost; [ExcludeFromCodeCoverage] internal static class Reflection { - public static string FriendlyNameOf(string name) where T : INamedService => - FriendlyNameOf(typeof(T)) + ":" + name; + // public static string FriendlyNameOf(string name) where T : INamedService => + public static string FriendlyNameOf(string? name) => FriendlyNameOf(typeof(T), name); public static string FriendlyNameOf() => FriendlyNameOf(typeof(T)); - public static string FriendlyNameOf(Type type, string? instanceName) - { - var name = FriendlyNameOf(type); - return instanceName is null ? name : $"{name}:{instanceName}"; - } + public static string FriendlyNameOf(Type type, string? name) => + FriendlyNameOf(type) + (string.IsNullOrEmpty(name) ? "" : $" (\"{name}\")"); public static string FriendlyNameOf(Type type) => type.IsGenericType switch { diff --git a/src/LocalPost.Resilience/HandlerStackEx.cs b/src/LocalPost/Resilience/HandlerStackEx.cs similarity index 95% rename from src/LocalPost.Resilience/HandlerStackEx.cs rename to src/LocalPost/Resilience/HandlerStackEx.cs index cc91907..41e2cf4 100644 --- a/src/LocalPost.Resilience/HandlerStackEx.cs +++ b/src/LocalPost/Resilience/HandlerStackEx.cs @@ -1,4 +1,3 @@ -using JetBrains.Annotations; using Polly; namespace LocalPost.Resilience; diff --git a/src/LocalPost/globalusings.cs b/src/LocalPost/globalusings.cs new file mode 100644 index 0000000..2f865c2 --- /dev/null +++ b/src/LocalPost/globalusings.cs @@ -0,0 +1,3 @@ +global using JetBrains.Annotations; +global using System.Diagnostics.CodeAnalysis; +global using Microsoft.Extensions.Logging; From 5850617a8ba90c7ba7d2719c3e93809169ac7152 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Tue, 7 Jan 2025 12:42:22 +0000 Subject: [PATCH 22/33] WIP --- Directory.Build.props | 18 -- LocalPost.sln | 6 - docker-compose.yml | 23 +- .../init/ready.d/sqs.sh | 0 .../BackgroundQueueApp.csproj | 3 +- .../Controllers/WeatherForecastController.cs | 15 +- samples/BackgroundQueueApp/Program.cs | 3 +- .../Properties/launchSettings.json | 2 +- samples/Directory.Build.props | 16 ++ .../KafkaConsumerApp/KafkaConsumerApp.csproj | 7 +- samples/KafkaConsumerApp/Program.cs | 27 ++- samples/SqsConsumerApp/Program.cs | 27 +-- samples/SqsConsumerApp/SqsConsumerApp.csproj | 3 +- src/Directory.Build.props | 45 ++++ .../{ClientFactory.cs => Client.cs} | 80 ++++--- src/LocalPost.KafkaConsumer/Consumer.cs | 72 +++--- .../DependencyInjection/KafkaBuilder.cs | 61 +++-- src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 17 +- .../LocalPost.KafkaConsumer.csproj | 40 +--- src/LocalPost.SqsConsumer/Consumer.cs | 61 +++-- .../DependencyInjection/SqsBuilder.cs | 23 +- src/LocalPost.SqsConsumer/HandlerStackEx.cs | 20 +- .../LocalPost.SqsConsumer.csproj | 44 +--- src/LocalPost.SqsConsumer/QueueClient.cs | 13 +- src/LocalPost/AppHealthSupervisor.cs | 4 +- .../BackgroundQueue/BackgroundQueue.cs | 103 +------- .../BackgroundQueue/ConsumeContext.cs | 2 +- .../BackgroundQueuesBuilder.cs | 51 ++-- .../BackgroundQueue/HandlerStackEx.cs | 7 +- src/LocalPost/BackgroundQueue/Options.cs | 6 +- .../DependencyInjection/HealthChecks.cs | 9 + .../ServiceCollectionTools.cs | 18 +- src/LocalPost/Flow/Event.cs | 17 ++ src/LocalPost/Flow/HandlerStackEx.cs | 222 ++++++++++++++++++ src/LocalPost/HandlerStack.cs | 6 + src/LocalPost/HandlerStackOps.cs | 8 +- src/LocalPost/LocalPost.csproj | 36 +-- src/LocalPost/Middlewares.cs | 6 +- src/LocalPost/Primitives.cs | 25 +- tests/Directory.Build.props | 25 ++ .../ConsumerTests.cs | 15 +- .../LocalPost.KafkaConsumer.Tests.csproj | 15 +- .../RedpandaContainer.cs | 2 +- tests/LocalPost.KafkaConsumer.Tests/Usings.cs | 2 - .../globalusings.cs | 4 + .../ConsumerTests.cs | 27 +-- .../LocalPost.SqsConsumer.Tests.csproj | 17 +- tests/LocalPost.SqsConsumer.Tests/Usings.cs | 2 - .../globalusings.cs | 4 + .../AsyncEnumerableMergerTests.cs | 147 ------------ .../BatchingAsyncEnumerableTests.cs | 48 ---- tests/LocalPost.Tests/LocalPost.Tests.csproj | 15 -- tests/LocalPost.Tests/PrimitivesTests.cs | 2 +- tests/LocalPost.Tests/Usings.cs | 2 - tests/LocalPost.Tests/globalusings.cs | 4 + 55 files changed, 735 insertions(+), 742 deletions(-) delete mode 100644 Directory.Build.props rename {localstack_bootstrap => localstack}/init/ready.d/sqs.sh (100%) create mode 100644 samples/Directory.Build.props create mode 100644 src/Directory.Build.props rename src/LocalPost.KafkaConsumer/{ClientFactory.cs => Client.cs} (57%) create mode 100644 src/LocalPost/Flow/Event.cs create mode 100644 src/LocalPost/Flow/HandlerStackEx.cs create mode 100644 tests/Directory.Build.props delete mode 100644 tests/LocalPost.KafkaConsumer.Tests/Usings.cs create mode 100644 tests/LocalPost.KafkaConsumer.Tests/globalusings.cs delete mode 100644 tests/LocalPost.SqsConsumer.Tests/Usings.cs create mode 100644 tests/LocalPost.SqsConsumer.Tests/globalusings.cs delete mode 100644 tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs delete mode 100644 tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs delete mode 100644 tests/LocalPost.Tests/Usings.cs create mode 100644 tests/LocalPost.Tests/globalusings.cs diff --git a/Directory.Build.props b/Directory.Build.props deleted file mode 100644 index becbac8..0000000 --- a/Directory.Build.props +++ /dev/null @@ -1,18 +0,0 @@ - - - - 13 - enable - enable - true - - - - - <_Parameter1>$(MSBuildProjectName).Tests - - - <_Parameter1>DynamicProxyGenAssembly2 - - - diff --git a/LocalPost.sln b/LocalPost.sln index 9d2db71..325365c 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -40,8 +40,6 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.NatsConsumer", "s EndProject Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "BackgroundQueueApp.FSharp", "samples\BackgroundQueueApp.FSharp\BackgroundQueueApp.FSharp.fsproj", "{79CF7EFF-860D-464F-B59A-55E48D25D70C}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Flow", "src\LocalPost.Flow\LocalPost.Flow.csproj", "{F726A4D7-C35A-417C-8E54-2B6D58FA2747}" -EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -124,10 +122,6 @@ Global {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Debug|Any CPU.Build.0 = Debug|Any CPU {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.ActiveCfg = Release|Any CPU {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.Build.0 = Release|Any CPU - {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Debug|Any CPU.Build.0 = Debug|Any CPU - {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Release|Any CPU.ActiveCfg = Release|Any CPU - {F726A4D7-C35A-417C-8E54-2B6D58FA2747}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} diff --git a/docker-compose.yml b/docker-compose.yml index 04c0910..a50a691 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -10,24 +10,25 @@ volumes: services: localstack: # https://docs.localstack.cloud/getting-started/installation/#docker-compose - image: localstack/localstack:3.4 + image: localstack/localstack:4 ports: - - 127.0.0.1:4566:4566 # LocalStack Gateway - - 127.0.0.1:4510-4559:4510-4559 # External services port range + - "127.0.0.1:4566:4566" # LocalStack Gateway + - "127.0.0.1:4510-4559:4510-4559" # External services port range environment: # LocalStack configuration: https://docs.localstack.cloud/references/configuration/ - DEBUG=${DEBUG:-0} - SERVICES=sqs volumes: + # Local volume - localstack:/var/lib/localstack - # https://docs.localstack.cloud/references/init-hooks/ + # Fixtures, see https://docs.localstack.cloud/references/init-hooks/ - ./localstack/init/ready.d:/etc/localstack/init/ready.d" # SQS hooks # Only needed for Lambdas # - /var/run/docker.sock:/var/run/docker.sock redpanda: # Mainly from: https://docs.redpanda.com/redpanda-labs/docker-compose/single-broker/ # See also: https://docs.redpanda.com/current/deploy/deployment-option/self-hosted/docker-image/ - image: docker.redpanda.com/redpandadata/redpanda:v24.1.5 + image: docker.redpanda.com/redpandadata/redpanda:v24.3.2 container_name: redpanda command: - redpanda start @@ -48,10 +49,10 @@ services: - --rpc-addr redpanda:33145 - --advertise-rpc-addr redpanda:33145 ports: - - 18081:18081 - - 18082:18082 - - 19092:19092 - - 19644:9644 + - "18081:18081" + - "18082:18082" + - "19092:19092" + - "19644:9644" volumes: - redpanda:/var/lib/redpanda/data networks: @@ -63,7 +64,7 @@ services: # retries: 5 # start_period: 5s redpanda-console: - image: docker.redpanda.com/redpandadata/console:v2.5.2 + image: docker.redpanda.com/redpandadata/console:v2.8.1 entrypoint: /bin/sh command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console" environment: @@ -84,7 +85,7 @@ services: - name: local-connect-cluster url: http://connect:8083 ports: - - 8080:8080 + - "8080:8080" networks: - redpanda_network depends_on: diff --git a/localstack_bootstrap/init/ready.d/sqs.sh b/localstack/init/ready.d/sqs.sh similarity index 100% rename from localstack_bootstrap/init/ready.d/sqs.sh rename to localstack/init/ready.d/sqs.sh diff --git a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj index 4440646..b0638c4 100644 --- a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj +++ b/samples/BackgroundQueueApp/BackgroundQueueApp.csproj @@ -9,12 +9,11 @@ - + - diff --git a/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs b/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs index 1e84d13..c3e88e1 100644 --- a/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs +++ b/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs @@ -5,19 +5,12 @@ namespace BackgroundQueueApp.Controllers; [ApiController] [Route("[controller]")] -public class WeatherForecastController : ControllerBase +public class WeatherForecastController(IBackgroundQueue queue) : ControllerBase { private static readonly string[] Summaries = - { + [ "Freezing", "Bracing", "Chilly", "Cool", "Mild", "Warm", "Balmy", "Hot", "Sweltering", "Scorching" - }; - - private readonly IBackgroundQueue _queue; - - public WeatherForecastController(IBackgroundQueue queue) - { - _queue = queue; - } + ]; [HttpGet(Name = "GetWeatherForecast")] public async ValueTask> Get() @@ -29,7 +22,7 @@ public async ValueTask> Get() Summary = Summaries[Random.Shared.Next(Summaries.Length)] }).ToArray(); - await _queue.Enqueue(forecasts[0]); + await queue.Enqueue(forecasts[0]); return forecasts; } diff --git a/samples/BackgroundQueueApp/Program.cs b/samples/BackgroundQueueApp/Program.cs index 14044f4..b3247e9 100644 --- a/samples/BackgroundQueueApp/Program.cs +++ b/samples/BackgroundQueueApp/Program.cs @@ -27,10 +27,11 @@ await Task.Delay(TimeSpan.FromSeconds(2), ct); Console.WriteLine(weather.Summary); }) + .UseMessagePayload() .Scoped() - .UsePayload() .Trace() .UsePollyPipeline(resiliencePipeline) + .LogExceptions() ) ); diff --git a/samples/BackgroundQueueApp/Properties/launchSettings.json b/samples/BackgroundQueueApp/Properties/launchSettings.json index 510119c..70edaa4 100644 --- a/samples/BackgroundQueueApp/Properties/launchSettings.json +++ b/samples/BackgroundQueueApp/Properties/launchSettings.json @@ -8,7 +8,7 @@ "launchUrl": "swagger", "applicationUrl": "https://localhost:7003;http://localhost:5103", "environmentVariables": { - "ASPNETCORE_ENVIRONMENT": "Development", + "ASPNETCORE_ENVIRONMENT": "Development" } } } diff --git a/samples/Directory.Build.props b/samples/Directory.Build.props new file mode 100644 index 0000000..aad1d7c --- /dev/null +++ b/samples/Directory.Build.props @@ -0,0 +1,16 @@ + + + + 13 + enable + enable + true + + false + + + + + + + diff --git a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj index 79aacc5..d25a35e 100644 --- a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj +++ b/samples/KafkaConsumerApp/KafkaConsumerApp.csproj @@ -11,13 +11,10 @@ - - - - - + + diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index 8f1af4c..65bce59 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -1,4 +1,5 @@ using Confluent.Kafka; +using JetBrains.Annotations; using LocalPost; using LocalPost.KafkaConsumer; using LocalPost.KafkaConsumer.DependencyInjection; @@ -12,29 +13,29 @@ kafka.Defaults .Bind(builder.Configuration.GetSection("Kafka")) .ValidateDataAnnotations(); - kafka.AddConsumer("example-consumer-group", HandlerStack.From() - .UseKafkaPayload() - .DeserializeJson() - .Acknowledge() - .Scoped() - .Trace() + kafka.AddConsumer("example-consumer-group", + HandlerStack.From() + .UseKafkaPayload() + .Scoped() + .DeserializeJson() + .Trace() + .Acknowledge() + .LogExceptions() ) .Bind(builder.Configuration.GetSection("Kafka:Consumer")) - .ConfigureConsumer(options => + .Configure(options => { - options.AutoOffsetReset = AutoOffsetReset.Earliest; - // options.EnableAutoCommit = false; // TODO DryRun + options.ClientConfig.AutoOffsetReset = AutoOffsetReset.Earliest; + // options.ClientConfig.EnableAutoCommit = false; // DryRun }) .ValidateDataAnnotations(); }); -// TODO Health + Supervisor -var host = builder.Build(); - -await host.RunAsync(); +await builder.Build().RunAsync(); +[UsedImplicitly] public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); internal sealed class MessageHandler : IHandler diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index fe78cc7..96c4683 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -19,28 +19,16 @@ .AddScoped() .AddSqsConsumers(sqs => { - sqs.Defaults.Configure(options => options.MaxConcurrency = 100); + sqs.Defaults.Configure(options => options.MaxNumberOfMessages = 1); sqs.AddConsumer("weather-forecasts", HandlerStack.From() .UseSqsPayload() - .DeserializeJson() - .Acknowledge() .Scoped() + .DeserializeJson() + .Trace() + .Acknowledge() // Do not include DeleteMessage call in the OpenTelemetry root span (transaction) .LogFingersCrossed() - .Trace()); - sqs.Defaults.Configure(options => options.MaxConcurrency = 100); - sqs.AddConsumer("weather-forecasts", - Pipeline.Create( - HandlerStack.From() - .UseSqsPayload() - .DeserializeJson() - .Acknowledge() - .Scoped() - .LogFingersCrossed() - .Trace(), - maxConcurrency: 100, - breakOnException: false - ).Buffer(100) + .LogExceptions() ); }); @@ -49,11 +37,6 @@ -record ConsumerOptions -{ - public int MaxConcurrency { get; set; } = 1; -} - [UsedImplicitly] public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/samples/SqsConsumerApp/SqsConsumerApp.csproj index 9f6f3e9..b09212a 100644 --- a/samples/SqsConsumerApp/SqsConsumerApp.csproj +++ b/samples/SqsConsumerApp/SqsConsumerApp.csproj @@ -13,9 +13,8 @@ - - + diff --git a/src/Directory.Build.props b/src/Directory.Build.props new file mode 100644 index 0000000..dc110be --- /dev/null +++ b/src/Directory.Build.props @@ -0,0 +1,45 @@ + + + + 13 + enable + enable + true + + true + + false + + Alexey Shokov + https://github.com/alexeyshockov/LocalPost/v$(Version) + MIT + https://github.com/alexeyshockov/LocalPost + git + true + + + + + true + + + + true + true + true + true + snupkg + + + true + + + + + <_Parameter1>$(MSBuildProjectName).Tests + + + <_Parameter1>DynamicProxyGenAssembly2 + + + diff --git a/src/LocalPost.KafkaConsumer/ClientFactory.cs b/src/LocalPost.KafkaConsumer/Client.cs similarity index 57% rename from src/LocalPost.KafkaConsumer/ClientFactory.cs rename to src/LocalPost.KafkaConsumer/Client.cs index 3127447..f31938e 100644 --- a/src/LocalPost.KafkaConsumer/ClientFactory.cs +++ b/src/LocalPost.KafkaConsumer/Client.cs @@ -1,12 +1,45 @@ +using System.Collections; using Confluent.Kafka; namespace LocalPost.KafkaConsumer; +internal sealed class ClientFactory(ILogger logger, ConsumerOptions settings) +{ + public async Task Create(CancellationToken ct) + { + return new Clients(await Task.WhenAll(Enumerable + .Range(0, settings.Consumers) + .Select(_ => Task.Run(CreateClient, ct)) + ).ConfigureAwait(false)); + + Client CreateClient() + { + var consumer = new ConsumerBuilder(settings.ClientConfig) + .SetErrorHandler((_, e) => logger.LogError("{Error}", e)) + .SetLogHandler((_, m) => logger.LogDebug(m.Message)) + .Build(); + consumer.Subscribe(settings.Topics); + return new Client(logger, consumer, settings.ClientConfig); + } + } +} + +internal sealed class Clients(Client[] clients) : IReadOnlyCollection +{ + public Task Close(CancellationToken ct) => Task.WhenAll(clients.Select(client => Task.Run(client.Close, ct))); + + public IEnumerator GetEnumerator() => ((IEnumerable)clients).GetEnumerator(); + + IEnumerator IEnumerable.GetEnumerator() => clients.GetEnumerator(); + + public int Count => clients.Length; +} + internal sealed class Client { - private readonly ILogger _logger; + private readonly ILogger _logger; - public Client(ILogger logger, IConsumer consumer, ConsumerConfig config) + public Client(ILogger logger, IConsumer consumer, ConsumerConfig config) { _logger = logger; Consumer = consumer; @@ -46,49 +79,20 @@ public ConsumeResult Consume(CancellationToken ct) } } - public IConsumer Consumer { get; } - public ConsumerConfig Config { get; } - public string ServerAddress { get; } - public int ServerPort { get; } = 9092; -} - -internal sealed class ClientFactory(ILogger logger, ILogger clientLogger) : IDisposable -{ - private List _clients = []; - - public Client Create(ConsumerConfig config, IEnumerable topics) - { - var consumer = new ConsumerBuilder(config) - .SetErrorHandler((_, e) => clientLogger.LogError("{Error}", e)) - .SetLogHandler((_, m) => clientLogger.LogDebug(m.Message)) - .Build(); - consumer.Subscribe(topics); - var client = new Client(clientLogger, consumer, config); - _clients.Add(client); - return client; - } - - private void Close(IConsumer consumer) + public void Close() { try { - consumer.Close(); - } - catch (Exception e) - { - logger.LogError(e, "Error closing Kafka consumer"); + Consumer.Close(); } finally { - consumer.Dispose(); + Consumer.Dispose(); } } - public void Dispose() - { - // TODO Run in parallel?.. - foreach (var client in _clients) - Close(client.Consumer); - _clients = []; - } + public IConsumer Consumer { get; } + public ConsumerConfig Config { get; } + public string ServerAddress { get; } + public int ServerPort { get; } = 9092; } diff --git a/src/LocalPost.KafkaConsumer/Consumer.cs b/src/LocalPost.KafkaConsumer/Consumer.cs index e47a8dc..69666db 100644 --- a/src/LocalPost.KafkaConsumer/Consumer.cs +++ b/src/LocalPost.KafkaConsumer/Consumer.cs @@ -1,28 +1,25 @@ using Confluent.Kafka; using LocalPost.DependencyInjection; +using LocalPost.Flow; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; namespace LocalPost.KafkaConsumer; -internal sealed class Consumer(string name, ILogger logger, - ClientFactory clientFactory, ConsumerOptions settings, Handler> handler) +internal sealed class Consumer(string name, ILogger logger, + ClientFactory clientFactory, Handler>> handler) : IHostedService, IHealthAwareService, IDisposable { - private sealed class ReadinessHealthCheck(Consumer consumer) : IHealthCheck - { - public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => - Task.FromResult(consumer.Ready); - } + private Clients _clients = new([]); private CancellationTokenSource? _execTokenSource; - private Task? _execution; + private Task? _exec; private Exception? _execException; private string? _execExceptionDescription; - public string Name { get; } = name; + private CancellationToken _completionToken = CancellationToken.None; - private HealthCheckResult Ready => (_execTokenSource, _execution, _execException) switch + private HealthCheckResult Ready => (_execTokenSource, _execution: _exec, _execException) switch { (null, _, _) => HealthCheckResult.Unhealthy("Not started"), (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), @@ -31,7 +28,7 @@ public Task CheckHealthAsync(HealthCheckContext context, Canc (_, _, not null) => HealthCheckResult.Unhealthy(_execExceptionDescription, _execException), }; - public IHealthCheck ReadinessCheck => new ReadinessHealthCheck(this); + public IHealthCheck ReadinessCheck => HealthChecks.From(() => Ready); private async Task RunConsumerAsync(Client client, CancellationToken execToken) { @@ -42,7 +39,8 @@ private async Task RunConsumerAsync(Client client, CancellationToken execToken) while (!execToken.IsCancellationRequested) { var result = client.Consume(execToken); - await handler(new ConsumeContext(client, result, result.Message.Value), CancellationToken.None); + await handler(new ConsumeContext(client, result, result.Message.Value), CancellationToken.None) + .ConfigureAwait(false); } } catch (OperationCanceledException e) when (e.CancellationToken == execToken) @@ -72,27 +70,36 @@ public async Task StartAsync(CancellationToken ct) throw new InvalidOperationException("Service is already started"); var execTokenSource = _execTokenSource = new CancellationTokenSource(); - var execution = settings.Consumers switch - { - 1 => await StartConsumerAsync(), - _ => Task.WhenAll( - await Task.WhenAll(Enumerable.Range(0, settings.Consumers).Select(_ => StartConsumerAsync()))) - }; - _execution = ObserveExecution(); - return; - async Task StartConsumerAsync() - { - var kafkaClient = await Task.Run(() => clientFactory.Create(settings.ClientConfig, settings.Topics), ct); + logger.LogInformation("Starting Kafka consumer..."); + var clients = _clients = await clientFactory.Create(ct).ConfigureAwait(false); + logger.LogInformation("Kafka consumer started"); - return Task.Run(() => RunConsumerAsync(kafkaClient, execTokenSource.Token), ct); - } + logger.LogDebug("Invoking the event handler..."); + await handler(Event>.Begin, ct).ConfigureAwait(false); + logger.LogDebug("Event handler started"); + + _exec = ObserveExecution(); + return; async Task ObserveExecution() { - await execution; - // Can happen before the service shutdown, in case of an error - logger.LogInformation("Kafka consumer stopped"); + try + { + var executions = clients.Select(client => + Task.Run(() => RunConsumerAsync(client, execTokenSource.Token), ct) + ).ToArray(); + await (executions.Length == 1 ? executions[0] : Task.WhenAll(executions)).ConfigureAwait(false); + + // TODO Pass the exception (if any) to the handler + await handler(Event>.End, _completionToken).ConfigureAwait(false); + } + finally + { + // Can happen before the service shutdown, in case of an error + await _clients.Close(_completionToken).ConfigureAwait(false); + logger.LogInformation("Kafka consumer stopped"); + } } } @@ -104,14 +111,17 @@ public async Task StopAsync(CancellationToken forceShutdownToken) if (_execTokenSource is null) throw new InvalidOperationException("Service has not been started"); - logger.LogInformation("Shutting down Kafka consumer"); + logger.LogInformation("Shutting down Kafka consumer..."); + + _completionToken = forceShutdownToken; CancelExecution(); - if (_execution is not null) - await _execution.ConfigureAwait(false); + if (_exec is not null) + await _exec.ConfigureAwait(false); } public void Dispose() { _execTokenSource?.Dispose(); + _exec?.Dispose(); } } diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 8188bca..209db93 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -1,5 +1,6 @@ using Confluent.Kafka; using LocalPost.DependencyInjection; +using LocalPost.Flow; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; @@ -11,41 +12,63 @@ public sealed class KafkaBuilder(IServiceCollection services) public OptionsBuilder Defaults { get; } = services.AddOptions(); /// - /// Add a Kafka consumer with (should be registered separately) as a message handler. + /// Add a Kafka consumer with a custom message handler. /// - /// Consumer name (should be unique in the application). - /// Message handler type. + /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name) - where THandler : IHandler> - => AddConsumer(name, provider => provider.GetRequiredService().InvokeAsync); + public OptionsBuilder AddConsumer(HandlerFactory> hf) => + AddConsumer(Options.DefaultName, hf); /// /// Add a Kafka consumer with a custom message handler. /// - /// Consumer name (should be unique in the application). /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) - { - if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... - throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + public OptionsBuilder AddConsumer(HandlerFactory>> hf) => + AddConsumer(Options.DefaultName, hf); - services.TryAddSingleton(); + /// + /// Add a Kafka consumer with a custom message handler. + /// + /// Consumer name (should be unique in the application). Also, the default group ID. + /// Message handler factory. + /// Consumer options builder. + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) => + AddConsumer(name, hf.SelectMessageEvent()); + + /// + /// Add a Kafka consumer with a custom message handler. + /// + /// Consumer name (should be unique in the application). Also, the default group ID. + /// Message handler factory. + /// Consumer options builder. + public OptionsBuilder AddConsumer(string name, HandlerFactory>> hf) + { + var added = services.TryAddKeyedSingleton(name, (provider, _) => + { + var clientFactory = new ClientFactory( + provider.GetLoggerFor(), + provider.GetOptions(name) + ); - var added = services.TryAddKeyedSingleton(name, (provider, _) => new Consumer(name, - provider.GetLoggerFor(), - provider.GetRequiredService(), - provider.GetOptions(name), - hf(provider) - )); + return new Consumer(name, + provider.GetLoggerFor(), + clientFactory, + hf(provider) + ); + }); if (!added) throw new ArgumentException("Consumer is already registered", nameof(name)); services.AddHostedService(provider => provider.GetRequiredKeyedService(name)); - return OptionsFor(name).Configure>((co, defaults) => co.EnrichFrom(defaults.Value)); + return OptionsFor(name).Configure>((co, defaults) => + { + co.EnrichFrom(defaults.Value); + if (!string.IsNullOrEmpty(name)) + co.ClientConfig.GroupId = name; + }); } public OptionsBuilder OptionsFor(string name) => services.AddOptions(name); diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index aabc18a..c812e3e 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -7,7 +7,8 @@ namespace LocalPost.KafkaConsumer; public static class HandlerStackEx { public static HandlerFactory> UseKafkaPayload(this HandlerFactory hf) => - hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); + hf.Map, T>(next => async (context, ct) => + await next(context.Payload, ct).ConfigureAwait(false)); public static HandlerFactory> Trace(this HandlerFactory> hf) => hf.Map, ConsumeContext>(next => @@ -16,7 +17,7 @@ public static HandlerFactory> Trace(this HandlerFactory> Acknowledge(this HandlerFacto hf.Map, ConsumeContext>(next => async (context, ct) => { - await next(context, ct); + await next(context, ct).ConfigureAwait(false); context.Acknowledge(); }); #region Deserialize() - + public static HandlerFactory> Deserialize( this HandlerFactory> hf, Func, T> deserialize) => - hf.Map, ConsumeContext>(next => - async (context, ct) => await next(context.Transform(deserialize), ct)); + hf.Map, ConsumeContext>(next => async (context, ct) => + await next(context.Transform(deserialize), ct).ConfigureAwait(false)); public static HandlerFactory> Deserialize( this HandlerFactory> hf, Func, Task> deserialize) => - hf.Map, ConsumeContext>(next => - async (context, ct) => await next(await context.Transform(deserialize), ct)); + hf.Map, ConsumeContext>(next => async (context, ct) => + await next(await context.Transform(deserialize).ConfigureAwait(false), ct).ConfigureAwait(false)); private static Func, Task> AsyncDeserializer(IAsyncDeserializer deserializer) => context => deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( diff --git a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj index 0e9a064..9e4add7 100644 --- a/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj +++ b/src/LocalPost.KafkaConsumer/LocalPost.KafkaConsumer.csproj @@ -1,22 +1,14 @@ - net6.0;net8.0 - true - - false + net6;net8 LocalPost.KafkaConsumer - Alexey Shokov - Opinionated Kafka Consumer library, build to be simple, but yet flexible. - https://github.com/alexeyshockov/LocalPost/v$(Version) + + Opinionated Kafka consumer library, build to be simple, but yet flexible. background;task;queue;kafka README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true @@ -24,35 +16,15 @@ - - - true - - - - true - true - true - true - snupkg - - - true - - - - - - - - + + - + diff --git a/src/LocalPost.SqsConsumer/Consumer.cs b/src/LocalPost.SqsConsumer/Consumer.cs index 15044e8..1709955 100644 --- a/src/LocalPost.SqsConsumer/Consumer.cs +++ b/src/LocalPost.SqsConsumer/Consumer.cs @@ -1,29 +1,24 @@ using Amazon.Runtime; using Amazon.SQS; using LocalPost.DependencyInjection; +using LocalPost.Flow; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; namespace LocalPost.SqsConsumer; internal sealed class Consumer(string name, ILogger logger, IAmazonSQS sqs, - ConsumerOptions settings, Handler> handler) + ConsumerOptions settings, Handler>> handler) : IHostedService, IHealthAwareService, IDisposable { - private sealed class ReadinessHealthCheck(Consumer consumer) : IHealthCheck - { - public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => - Task.FromResult(consumer.Ready); - } - private CancellationTokenSource? _execTokenSource; - private Task? _execution; + private Task? _exec; private Exception? _execException; private string? _execExceptionDescription; - public string Name { get; } = name; + private CancellationToken _completionToken = CancellationToken.None; - private HealthCheckResult Ready => (_execTokenSource, _execution, _execException) switch + private HealthCheckResult Ready => (_execTokenSource, _execution: _exec, _execException) switch { (null, _, _) => HealthCheckResult.Unhealthy("Not started"), (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), @@ -32,7 +27,7 @@ public Task CheckHealthAsync(HealthCheckContext context, Canc (_, _, not null) => HealthCheckResult.Unhealthy(_execExceptionDescription, _execException), }; - public IHealthCheck ReadinessCheck => new ReadinessHealthCheck(this); + public IHealthCheck ReadinessCheck => HealthChecks.From(() => Ready); private async Task RunConsumerAsync(QueueClient client, CancellationToken execToken) { @@ -72,28 +67,39 @@ await Task.WhenAll(messages public async Task StartAsync(CancellationToken ct) { if (_execTokenSource is not null) - throw new InvalidOperationException("Service is already started"); + throw new InvalidOperationException("Already started"); var execTokenSource = _execTokenSource = new CancellationTokenSource(); var client = new QueueClient(logger, sqs, settings); await client.Connect(ct).ConfigureAwait(false); - _execution = ObserveExecution(); + await handler(Event>.Begin, ct).ConfigureAwait(false); + + _exec = ObserveExecution(); return; async Task ObserveExecution() { - var execution = settings.Consumers switch + try { - 1 => RunConsumerAsync(client, execTokenSource.Token), - _ => Task.WhenAll(Enumerable - .Range(0, settings.Consumers) - .Select(_ => RunConsumerAsync(client, execTokenSource.Token))) - }; - await execution.ConfigureAwait(false); - // Can happen before the service shutdown, in case of an error - logger.LogInformation("SQS consumer stopped"); + var execution = settings.Consumers switch + { + 1 => RunConsumerAsync(client, execTokenSource.Token), + _ => Task.WhenAll(Enumerable + .Range(0, settings.Consumers) + .Select(_ => RunConsumerAsync(client, execTokenSource.Token))) + }; + await execution.ConfigureAwait(false); + + // TODO Pass the exception (if any) to the handler + await handler(Event>.End, _completionToken).ConfigureAwait(false); + } + finally + { + // Can happen before the service shutdown, in case of an error + logger.LogInformation("SQS consumer stopped"); + } } } @@ -103,16 +109,19 @@ async Task ObserveExecution() public async Task StopAsync(CancellationToken forceShutdownToken) { if (_execTokenSource is null) - throw new InvalidOperationException("Service has not been started"); + throw new InvalidOperationException("Has not been started"); + + logger.LogInformation("Shutting down SQS consumer..."); - logger.LogInformation("Shutting down SQS consumer"); + _completionToken = forceShutdownToken; CancelExecution(); - if (_execution is not null) - await _execution.ConfigureAwait(false); + if (_exec is not null) + await _exec.ConfigureAwait(false); } public void Dispose() { _execTokenSource?.Dispose(); + _exec?.Dispose(); } } diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index 3efdef2..8adcc5e 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -1,5 +1,6 @@ using Amazon.SQS; using LocalPost.DependencyInjection; +using LocalPost.Flow; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; @@ -10,17 +11,31 @@ public sealed class SqsBuilder(IServiceCollection services) { public OptionsBuilder Defaults { get; } = services.AddOptions(); + /// + /// Add an SQS consumer with a custom message handler. + /// + /// Message handler factory. + /// Consumer options builder. + public OptionsBuilder AddConsumer(HandlerFactory> hf) => + AddConsumer(Options.DefaultName, hf); + /// /// Add an SQS consumer with a custom message handler. /// /// Consumer name (should be unique in the application). Also, the default queue name. /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) - { - if (string.IsNullOrEmpty(name)) // TODO Just default (empty?) name... - throw new ArgumentException("A proper (non empty) name is required", nameof(name)); + public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) => + AddConsumer(name, hf.SelectMessageEvent()); + /// + /// Add an SQS consumer with a custom message handler. + /// + /// Consumer name (should be unique in the application). Also, the default queue name. + /// Message handler factory. + /// Consumer options builder. + public OptionsBuilder AddConsumer(string name, HandlerFactory>> hf) + { var added = services.TryAddKeyedSingleton(name, (provider, _) => new Consumer(name, provider.GetLoggerFor(), provider.GetRequiredService(), diff --git a/src/LocalPost.SqsConsumer/HandlerStackEx.cs b/src/LocalPost.SqsConsumer/HandlerStackEx.cs index 504d03c..36c896b 100644 --- a/src/LocalPost.SqsConsumer/HandlerStackEx.cs +++ b/src/LocalPost.SqsConsumer/HandlerStackEx.cs @@ -6,12 +6,13 @@ namespace LocalPost.SqsConsumer; public static class HandlerStackEx { public static HandlerFactory> UseSqsPayload(this HandlerFactory hf) => - hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); + hf.Map, T>(next => async (context, ct) => + await next(context.Payload, ct).ConfigureAwait(false)); public static HandlerFactory>> UseSqsPayload( this HandlerFactory> hf) => - hf.Map>, IEnumerable>(next => - async (batch, ct) => await next(batch.Select(context => context.Payload), ct)); + hf.Map>, IEnumerable>(next => async (batch, ct) => + await next(batch.Select(context => context.Payload), ct).ConfigureAwait(false)); public static HandlerFactory> Trace(this HandlerFactory> hf) => hf.Touch(next => async (context, ct) => @@ -19,7 +20,7 @@ public static HandlerFactory> Trace(this HandlerFactory> Trace(this HandlerFactory> Acknowledge(this HandlerFactory> hf) => hf.Touch(next => async (context, ct) => { - await next(context, ct); - await context.Client.DeleteMessage(context, ct); + await next(context, ct).ConfigureAwait(false); + await context.Client.DeleteMessage(context, ct).ConfigureAwait(false); }); public static HandlerFactory> Deserialize( this HandlerFactory> hf, Func, T> deserialize) => hf.Map, ConsumeContext>(next => - async (context, ct) => await next(context.Transform(deserialize), ct)); - - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, Func, Task> deserialize) => - hf.Map, ConsumeContext>(next => - async (context, ct) => await next(await context.Transform(deserialize), ct)); + async (context, ct) => await next(context.Transform(deserialize), ct).ConfigureAwait(false)); public static HandlerFactory> DeserializeJson( this HandlerFactory> hf, JsonSerializerOptions? options = null) => diff --git a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj index dfedf91..c038360 100644 --- a/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj +++ b/src/LocalPost.SqsConsumer/LocalPost.SqsConsumer.csproj @@ -1,22 +1,14 @@ - net6.0;net8.0 - true - - false + net6;net8 LocalPost.SqsConsumer - Alexey Shokov + Local (in-process) background queue for sending to Amazon SNS. - https://github.com/alexeyshockov/LocalPost/releases/v$(Version) background;task;queue;amazon;sqs;aws README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true @@ -24,35 +16,23 @@ - - - true - - - - true - true - true - true - snupkg - - - true - - - + + - - + + - - + + - + diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index 2b6fbfa..df4b41e 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -42,9 +42,9 @@ public async Task Connect(CancellationToken ct) { if (string.IsNullOrEmpty(options.QueueUrl)) // Checking for a possible error in the response would be also good... - _queueUrl = (await sqs.GetQueueUrlAsync(options.QueueName, ct)).QueueUrl; + _queueUrl = (await sqs.GetQueueUrlAsync(options.QueueName, ct).ConfigureAwait(false)).QueueUrl; - await FetchQueueAttributes(ct); + await FetchQueueAttributes(ct).ConfigureAwait(false); } private async Task FetchQueueAttributes(CancellationToken ct) @@ -52,7 +52,7 @@ private async Task FetchQueueAttributes(CancellationToken ct) try { // Checking for a possible error in the response would be also good... - _queueAttributes = await sqs.GetQueueAttributesAsync(QueueUrl, ["All"], ct); + _queueAttributes = await sqs.GetQueueAttributesAsync(QueueUrl, ["All"], ct).ConfigureAwait(false); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { @@ -65,7 +65,7 @@ private async Task FetchQueueAttributes(CancellationToken ct) } public async Task> PullMessages(CancellationToken ct) => - await _pipeline.ExecuteAsync(PullMessagesCore, ct); + await _pipeline.ExecuteAsync(PullMessagesCore, ct).ConfigureAwait(false); private async ValueTask> PullMessagesCore(CancellationToken ct) { @@ -82,9 +82,10 @@ private async ValueTask> PullMessagesCore(CancellationToken MaxNumberOfMessages = options.MaxNumberOfMessages, AttributeNames = options.AttributeNames, MessageAttributeNames = options.MessageAttributeNames, - }, ct); + }, ct).ConfigureAwait(false); activity?.SetTagsFor(response); + activity?.Success(); return response.Messages; } @@ -102,7 +103,7 @@ private async ValueTask> PullMessagesCore(CancellationToken public async Task DeleteMessage(ConsumeContext context, CancellationToken ct = default) { using var activity = Tracing.StartSettling(context); - await sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle, ct); + await sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle, ct).ConfigureAwait(false); // TODO Log failures?.. } diff --git a/src/LocalPost/AppHealthSupervisor.cs b/src/LocalPost/AppHealthSupervisor.cs index 74c251a..a01a47f 100644 --- a/src/LocalPost/AppHealthSupervisor.cs +++ b/src/LocalPost/AppHealthSupervisor.cs @@ -20,7 +20,7 @@ protected override async Task ExecuteAsync(CancellationToken ct) { while (!ct.IsCancellationRequested) { - var result = await Check(ct); + var result = await Check(ct).ConfigureAwait(false); if (result.Status == HealthStatus.Unhealthy) { logger.LogError("Health check failed, stopping the application..."); @@ -29,7 +29,7 @@ protected override async Task ExecuteAsync(CancellationToken ct) break; } - await Task.Delay(CheckInterval, ct); + await Task.Delay(CheckInterval, ct).ConfigureAwait(false); } } } diff --git a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs index 6bbbe43..3997f36 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -1,125 +1,42 @@ using System.Threading.Channels; using LocalPost.DependencyInjection; +using LocalPost.Flow; using Microsoft.Extensions.Diagnostics.HealthChecks; using Microsoft.Extensions.Hosting; namespace LocalPost.BackgroundQueue; internal sealed class BackgroundQueue(ILogger> logger, QueueOptions settings, - Handler> handler) : IBackgroundQueue, IHostedService, IHealthAwareService, IDisposable + Channel> queue, ChannelRunner, ConsumeContext> runner) + : IBackgroundQueue, IHostedService, IHealthAwareService, IDisposable { - private sealed class ReadinessHealthCheck(BackgroundQueue queue) : IHealthCheck - { - public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => - Task.FromResult(queue.Ready); - } - - private CancellationTokenSource? _execTokenSource; - private Task? _execution; - private Exception? _execException; - private string? _execExceptionDescription; - - private HealthCheckResult Ready => (_execTokenSource, _execution, _execException) switch - { - (null, _, _) => HealthCheckResult.Unhealthy("Not started"), - (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), - (not null, null, _) => HealthCheckResult.Degraded("Starting"), - (not null, not null, null) => HealthCheckResult.Healthy("Running"), - (_, _, not null) => HealthCheckResult.Unhealthy(_execExceptionDescription, _execException), - }; - - public IHealthCheck ReadinessCheck => new ReadinessHealthCheck(this); - - private readonly Channel> _queue = settings.BufferSize switch - { - null => Channel.CreateUnbounded>(new UnboundedChannelOptions - { - SingleReader = settings.MaxConcurrency == 1, - SingleWriter = settings.SingleProducer, - }), - _ => Channel.CreateBounded>(new BoundedChannelOptions(settings.BufferSize.Value) - { - FullMode = settings.FullMode, - SingleReader = settings.MaxConcurrency == 1, - SingleWriter = settings.SingleProducer, - }) - }; + public IHealthCheck ReadinessCheck => HealthChecks.From(() => runner.Ready); - public ValueTask Enqueue(T payload, CancellationToken ct = default) => _queue.Writer.WriteAsync(payload, ct); + public ValueTask Enqueue(T payload, CancellationToken ct = default) => queue.Writer.WriteAsync(payload, ct); - public ChannelWriter> Writer => _queue.Writer; - - private async Task RunAsync(CancellationToken execToken) - { - // (Optionally) wait for app start - - try - { - await foreach (var message in _queue.Reader.ReadAllAsync(execToken)) - await handler(message, CancellationToken.None); - } - catch (OperationCanceledException e) when (e.CancellationToken == execToken) - { - // logger.LogInformation("Background queue consumer shutdown"); - } - catch (Exception e) - { - logger.LogCritical(e, "Background queue message handler error"); - (_execException, _execExceptionDescription) = (e, "Message handler failed"); - } - finally - { - CloseChannel(); // Stop other consumers too - } - } - - private void CloseChannel() => _queue.Writer.Complete(); - - private void CancelExecution() => _execTokenSource?.Cancel(); + public ChannelWriter> Writer => queue.Writer; public async Task StartAsync(CancellationToken ct) { - if (_execTokenSource is not null) - throw new InvalidOperationException("Service is already started"); - - var execTokenSource = _execTokenSource = new CancellationTokenSource(); - _execution = ObserveExecution(); - await Task.Yield(); - return; - - async Task ObserveExecution() - { - var execution = settings.MaxConcurrency switch - { - 1 => RunAsync(execTokenSource.Token), - _ => Task.WhenAll(Enumerable.Range(0, settings.MaxConcurrency).Select(_ => RunAsync(execTokenSource.Token))) - }; - await execution.ConfigureAwait(false); - // Can happen before the service shutdown, in case of an error - logger.LogInformation("Background queue stopped"); - } + await runner.Start(ct).ConfigureAwait(false); } public async Task StopAsync(CancellationToken forceShutdownToken) { - if (_execTokenSource is null) - throw new InvalidOperationException("Service has not been started"); - logger.LogInformation("Shutting down background queue"); try { + // Wait until all the producers are done await settings.CompletionTrigger(forceShutdownToken).ConfigureAwait(false); } finally { - CloseChannel(); + await runner.Stop(forceShutdownToken).ConfigureAwait(false); } - if (_execution is not null) - await _execution.ConfigureAwait(false); } public void Dispose() { - _execTokenSource?.Dispose(); + runner?.Dispose(); } } diff --git a/src/LocalPost/BackgroundQueue/ConsumeContext.cs b/src/LocalPost/BackgroundQueue/ConsumeContext.cs index f7cac0d..8cd77f0 100644 --- a/src/LocalPost/BackgroundQueue/ConsumeContext.cs +++ b/src/LocalPost/BackgroundQueue/ConsumeContext.cs @@ -4,7 +4,7 @@ namespace LocalPost.BackgroundQueue; [PublicAPI] -public readonly record struct ConsumeContext +public readonly record struct ConsumeContext // TODO Rename { public readonly ActivityContext? ActivityContext; public readonly T Payload; diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 8a5861c..4da019f 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -1,4 +1,6 @@ +using System.Threading.Channels; using LocalPost.DependencyInjection; +using LocalPost.Flow; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Options; @@ -10,7 +12,7 @@ public class BackgroundQueuesBuilder(IServiceCollection services) public OptionsBuilder> AddDefaultJobQueue() => AddJobQueue( HandlerStack.For(async (job, ct) => await job(ct).ConfigureAwait(false)) .Scoped() - .UsePayload() + .UseMessagePayload() .Trace() .LogExceptions() ); @@ -24,27 +26,46 @@ internal OptionsBuilder> AddJobQueue(HandlerFactory< return AddQueue(hf); } - private OptionsBuilder> AddQueue(HandlerFactory> hf) - { - // TODO Check if a non-keyed service should be added + public OptionsBuilder> AddQueue(HandlerFactory> hf) => + AddQueue(Options.DefaultName, hf); - return AddQueue(Options.DefaultName, hf); - } + public OptionsBuilder> AddQueue(string name, HandlerFactory> hf) => + AddQueue(name, hf.SelectMessageEvent()); - private OptionsBuilder> AddQueue(string name, HandlerFactory> hf) + public OptionsBuilder> AddQueue(string name, HandlerFactory>> hf) { if (!services.TryAddSingletonAlias, BackgroundQueue>(name)) - throw new InvalidOperationException( - $"{Reflection.FriendlyNameOf>(name)}> is already registered."); - - services.TryAddKeyedSingleton(name, (provider, _) => new BackgroundQueue( - provider.GetLoggerFor>(), - provider.GetOptions>(name), - hf(provider) - )); + // throw new InvalidOperationException( + // $"{Reflection.FriendlyNameOf>(name)}> is already registered."); + throw new ArgumentException("Queue is already registered", nameof(name)); + + services.TryAddKeyedSingleton(name, CreateQueue); services.AddHostedService(provider => provider.GetRequiredKeyedService>(name)); return QueueFor(name); + + BackgroundQueue CreateQueue(IServiceProvider provider, object? key) + { + var settings = provider.GetOptions>(name); + var channel = settings.Capacity switch + { + null => Channel.CreateUnbounded>(new UnboundedChannelOptions + { + SingleReader = settings.MaxConcurrency == 1, + SingleWriter = settings.SingleProducer, + }), + _ => Channel.CreateBounded>(new BoundedChannelOptions(settings.Capacity.Value) + { + FullMode = settings.FullMode, + SingleReader = settings.MaxConcurrency == 1, + SingleWriter = settings.SingleProducer, + }) + }; + var handler = hf(provider); + var runner = ChannelRunner.Create(channel, handler, settings.MaxConcurrency, settings.ProcessLeftovers); + + return new BackgroundQueue(provider.GetLoggerFor>(), settings, channel, runner); + } } public OptionsBuilder> QueueFor() => diff --git a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs index 3d862df..604bed2 100644 --- a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs +++ b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs @@ -5,8 +5,9 @@ namespace LocalPost.BackgroundQueue; [PublicAPI] public static class HandlerStackEx { - public static HandlerFactory> UsePayload(this HandlerFactory hf) => - hf.Map, T>(next => async (context, ct) => await next(context.Payload, ct)); + public static HandlerFactory> UseMessagePayload(this HandlerFactory hf) => + hf.Map, T>(next => async (context, ct) => + await next(context.Payload, ct).ConfigureAwait(false)); public static HandlerFactory> Trace(this HandlerFactory> hf) { @@ -20,7 +21,7 @@ public static HandlerFactory> Trace(this HandlerFactory /// [Required] [Range(1, ushort.MaxValue)] - public ushort MaxConcurrency { get; set; } = 50; + public ushort MaxConcurrency { get; set; } = 10; + + public bool ProcessLeftovers { get; set; } = true; [Range(1, int.MaxValue)] - public int? BufferSize { get; set; } = 1000; + public int? Capacity { get; set; } = 1000; /// /// How to handle new messages when the underlying channel is full. Default is to drop the oldest message diff --git a/src/LocalPost/DependencyInjection/HealthChecks.cs b/src/LocalPost/DependencyInjection/HealthChecks.cs index 0e4c80b..07ada20 100644 --- a/src/LocalPost/DependencyInjection/HealthChecks.cs +++ b/src/LocalPost/DependencyInjection/HealthChecks.cs @@ -32,6 +32,15 @@ public static IServiceCollection AddAppHealthSupervisor(this IServiceCollection internal static partial class HealthChecks { + private sealed class LambdaHealthCheck(Func check) : IHealthCheck + { + public Task CheckHealthAsync(HealthCheckContext context, CancellationToken ct = default) => + Task.FromResult(check()); + } + + public static IHealthCheck From(Func check) => + new LambdaHealthCheck(check); + public static HealthCheckRegistration Readiness(string name, HealthStatus? failureStatus = null, IEnumerable? tags = null) where T : IHealthAwareService => diff --git a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs index 2870147..644d1c7 100644 --- a/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs +++ b/src/LocalPost/DependencyInjection/ServiceCollectionTools.cs @@ -1,4 +1,5 @@ using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Options; namespace LocalPost.DependencyInjection; @@ -36,11 +37,9 @@ public static bool TryAdd(this IServiceCollection services, ServiceDescriptor de static bool IsEqual(ServiceDescriptor a, ServiceDescriptor b) { - var equal = a.ServiceType == b.ServiceType; // && a.Lifetime == b.Lifetime; - if (equal && a is { IsKeyedService: true } && b is { IsKeyedService: true }) - return a.ServiceKey == b.ServiceKey; + var equal = a.ServiceType == b.ServiceType && a.IsKeyedService == b.IsKeyedService; - return equal; + return equal && a.IsKeyedService ? a.ServiceKey == b.ServiceKey : equal; } } @@ -61,6 +60,13 @@ public static bool TryAddSingletonAlias(this IServiceCollectio public static bool TryAddSingletonAlias(this IServiceCollection services, object key) where TAlias : class - where TService : class, TAlias => - services.TryAddKeyedSingleton(key, (provider, _) => provider.GetRequiredKeyedService(key)); + where TService : class, TAlias + { + var added = services.TryAddKeyedSingleton(key, (provider, _) => + provider.GetRequiredKeyedService(key)); + if (added && key is string name && name == Options.DefaultName) + services.TryAddSingleton(provider => + provider.GetRequiredKeyedService(Options.DefaultName)); + return added; + } } diff --git a/src/LocalPost/Flow/Event.cs b/src/LocalPost/Flow/Event.cs new file mode 100644 index 0000000..4bde1ab --- /dev/null +++ b/src/LocalPost/Flow/Event.cs @@ -0,0 +1,17 @@ +namespace LocalPost.Flow; + +public enum EventType : byte +{ + Message, + Begin, + End, +} + +[PublicAPI] +public readonly record struct Event(EventType Type, T Payload = default) +{ + public static Event Begin => new(EventType.Begin); + public static Event End => new(EventType.End); + + public static implicit operator Event(T payload) => new(EventType.Message, payload); +} diff --git a/src/LocalPost/Flow/HandlerStackEx.cs b/src/LocalPost/Flow/HandlerStackEx.cs new file mode 100644 index 0000000..46edbc5 --- /dev/null +++ b/src/LocalPost/Flow/HandlerStackEx.cs @@ -0,0 +1,222 @@ +using System.Collections.Immutable; +using System.Threading.Channels; +using Microsoft.Extensions.Diagnostics.HealthChecks; + +namespace LocalPost.Flow; + +[PublicAPI] +public static partial class HandlerStackEx +{ + /// + /// Gateway from T to Event{T}. + /// + /// Message handler factory. + /// Message payload type. + /// Wrapped handler factory. + public static HandlerFactory> SelectMessageEvent(this HandlerFactory hf) => hf.Map, T>( + next => (flowEvent, ct) => flowEvent.Type switch + { + EventType.Message => next(flowEvent.Payload!, ct), + _ => ValueTask.CompletedTask, + }); + + public static HandlerFactory> Buffer(this HandlerFactory> hf, + int capacity, + int consumers = 1, bool singleProducer = false) + { + var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) + { + FullMode = BoundedChannelFullMode.Wait, + SingleReader = consumers == 1, + SingleWriter = singleProducer, + }); + + return hf.Buffer(channel, consumers); + } + + private static HandlerFactory> Buffer(this HandlerFactory> hf, Channel channel, + int consumers = 1) => provider => + { + var handler = hf(provider); + var buffer = new ChannelRunner(channel, Consume, handler) { Consumers = consumers }; + + return (flowEvent, ct) => flowEvent.Type switch + { + EventType.Begin => buffer.Start(ct), + EventType.Message => channel.Writer.WriteAsync(flowEvent.Payload, ct), + EventType.End => buffer.Stop(ct), + _ => ValueTask.CompletedTask, + }; + + async Task Consume(CancellationToken execToken) + { + await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) + await handler(message, CancellationToken.None).ConfigureAwait(false); + } + }; + + public static HandlerFactory> Batch(this HandlerFactory>> hf, + int size, TimeSpan window, + int capacity = 1, int consumers = 1, bool singleProducer = false) => provider => + { + var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) + { + FullMode = BoundedChannelFullMode.Wait, + SingleReader = consumers == 1, + SingleWriter = singleProducer, + }); + var handler = hf(provider); + var buffer = new ChannelRunner>(channel, Consume, handler) { Consumers = consumers }; + + return (flowEvent, ct) => flowEvent.Type switch + { + EventType.Begin => buffer.Start(ct), + EventType.Message => channel.Writer.WriteAsync(flowEvent.Payload, ct), + EventType.End => buffer.Stop(ct), + _ => ValueTask.CompletedTask, + }; + + async Task Consume(CancellationToken execToken) + { + var reader = channel.Reader; + + var completed = false; + var batchBuilder = ImmutableArray.CreateBuilder(size); + + while (!completed) + { + using var timeWindowCts = CancellationTokenSource.CreateLinkedTokenSource(execToken); + timeWindowCts.CancelAfter(window); + try + { + while (batchBuilder.Count < size) + { + var item = await reader.ReadAsync(timeWindowCts.Token).ConfigureAwait(false); + batchBuilder.Add(item); + } + } + catch (OperationCanceledException) when (!execToken.IsCancellationRequested) + { + // Batch window is closed + } + catch (Exception) // execToken.IsCancellationRequested + ChannelClosedException + { + completed = true; + } + + if (batchBuilder.Count == 0) + continue; + + // If Capacity equals Count, the internal array will be extracted without copying the contents. + // Otherwise, the contents will be copied into a new array. The internal buffer will then be set to a + // zero length array. + var batch = batchBuilder.DrainToImmutable(); + await handler(batch, CancellationToken.None).ConfigureAwait(false); + } + } + }; +} + +internal static class ChannelRunner +{ + public static ChannelRunner Create(Channel channel, Handler> handler, + int consumers = 1, bool processLeftovers = true) + { + return new ChannelRunner(channel, Consume, handler) + { Consumers = consumers, ProcessLeftovers = processLeftovers }; + + async Task Consume(CancellationToken execToken) + { + await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) + await handler(message, CancellationToken.None).ConfigureAwait(false); + } + } +} + +internal sealed class ChannelRunner(Channel channel, + Func consumer, Handler> handler) : IDisposable +{ + public HealthCheckResult Ready => (_execTokenSource, _exec, _execException) switch + { + (null, _, _) => HealthCheckResult.Unhealthy("Not started"), + (_, { IsCompleted: true }, _) => HealthCheckResult.Unhealthy("Stopped"), + (not null, null, _) => HealthCheckResult.Degraded("Starting"), + (not null, not null, null) => HealthCheckResult.Healthy("Running"), + (_, _, not null) => HealthCheckResult.Unhealthy(null, _execException), + }; + + public PositiveInt Consumers { get; init; } = 1; + public bool ProcessLeftovers { get; init; } = true; + + private CancellationTokenSource? _execTokenSource; + private Task? _exec; + private Exception? _execException; + + private CancellationToken _completionToken = CancellationToken.None; + + public async ValueTask Start(CancellationToken ct) + { + if (_execTokenSource is not null) + throw new InvalidOperationException("Already started"); + + var execTokenSource = _execTokenSource = new CancellationTokenSource(); + + await handler(Event.Begin, ct).ConfigureAwait(false); + + _exec = Run(execTokenSource.Token); + } + + private async Task Run(CancellationToken execToken) + { + var exec = Consumers.Value switch + { + 1 => RunConsumer(execToken), + _ => Task.WhenAll(Enumerable.Range(0, Consumers).Select(_ => RunConsumer(execToken))) + }; + await exec.ConfigureAwait(false); + + await handler(Event.End, _completionToken).ConfigureAwait(false); + } + + private async Task RunConsumer(CancellationToken execToken) + { + try + { + await consumer(execToken).ConfigureAwait(false); + } + catch (OperationCanceledException e) when (e.CancellationToken == execToken) + { + // OK, fine + } + catch (ChannelClosedException) + { + // OK, fine + } + catch (Exception e) + { + Close(e); + } + } + + public async ValueTask Stop(CancellationToken ct) + { + _completionToken = ct; + Close(); + if (_exec is not null) + await _exec.ConfigureAwait(false); + } + + public void Dispose() + { + _execTokenSource?.Dispose(); + _exec?.Dispose(); + } + + private void Close(Exception? e = null) + { + channel.Writer.TryComplete(e); + _execException ??= e; + if (!ProcessLeftovers) + _execTokenSource?.Cancel(); + } +} diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index 446b7ff..c03a4b1 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -11,6 +11,12 @@ public static class HandlerStack [PublicAPI] public static class HandlerStack { + public static HandlerFactory For(Action syncHandler) => For((payload, _) => + { + syncHandler(payload); + return default; + }); + public static HandlerFactory For(Handler handler) => _ => handler; public static HandlerFactory From() where THandler : IHandler => diff --git a/src/LocalPost/HandlerStackOps.cs b/src/LocalPost/HandlerStackOps.cs index 8aa7e75..7d234e5 100644 --- a/src/LocalPost/HandlerStackOps.cs +++ b/src/LocalPost/HandlerStackOps.cs @@ -42,7 +42,7 @@ public static HandlerFactory Dispose(this HandlerFactory hf) where T : { try { - await next(context, ct); + await next(context, ct).ConfigureAwait(false); } finally { @@ -55,11 +55,11 @@ public static HandlerFactory DisposeAsync(this HandlerFactory hf) where { try { - await next(context, ct); + await next(context, ct).ConfigureAwait(false); } finally { - await context.DisposeAsync(); + await context.DisposeAsync().ConfigureAwait(false); } }); @@ -69,6 +69,6 @@ public static HandlerFactory SkipWhen(this HandlerFactory hf, Func - net6.0;net8.0 - true - LocalPost - - false + net6;net8 + LocalPost LocalPost - Alexey Shokov + Local (in-process) background queue. - https://github.com/alexeyshockov/LocalPost/v$(Version) background;task;queue;coravel;hangfire README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true @@ -25,22 +17,6 @@ - - - true - - - - true - true - true - true - snupkg - - - true - - @@ -65,10 +41,10 @@ - - + + - + diff --git a/src/LocalPost/Middlewares.cs b/src/LocalPost/Middlewares.cs index afb659c..0d28cdb 100644 --- a/src/LocalPost/Middlewares.cs +++ b/src/LocalPost/Middlewares.cs @@ -3,7 +3,7 @@ namespace LocalPost; -public static partial class HandlerStackEx +public static partial class Middlewares { /// /// Handle exceptions and log them, to not break the consumer loop. @@ -20,7 +20,7 @@ public static HandlerFactory LogExceptions(this HandlerFactory hf) => p { try { - await next(context, ct); + await next(context, ct).ConfigureAwait(false); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { @@ -58,7 +58,7 @@ public static HandlerFactory ShutdownOnError(this HandlerFactory hf, in { try { - await next(context, ct); + await next(context, ct).ConfigureAwait(false); } catch (OperationCanceledException e) when (e.CancellationToken == ct) { diff --git a/src/LocalPost/Primitives.cs b/src/LocalPost/Primitives.cs index 08dec0e..5ecd97f 100644 --- a/src/LocalPost/Primitives.cs +++ b/src/LocalPost/Primitives.cs @@ -1,22 +1,27 @@ namespace LocalPost; // int, 1 <= value <= int.MaxValue -internal readonly record struct MaxSize +internal readonly record struct PositiveInt { - public static implicit operator int(MaxSize batchSize) => batchSize.Value; + public static implicit operator int(PositiveInt num) => num.Value; - public static implicit operator MaxSize(int batchSize) => new(batchSize); - public static implicit operator MaxSize(short batchSize) => new(batchSize); - public static implicit operator MaxSize(ushort batchSize) => new(batchSize); + public static implicit operator PositiveInt(int num) => new(num); + public static implicit operator PositiveInt(short num) => new(num); + public static implicit operator PositiveInt(ushort num) => new(num); private readonly int _value; - public int Value => _value == 0 ? 1 : _value; // Default value... + public int Value => _value == 0 ? 1 : _value; // Default value - public MaxSize(int value) + private PositiveInt(int num) { - if (value < 1) - throw new ArgumentOutOfRangeException(nameof(value), value, "Value must be greater than or equal to 1"); + if (num < 1) + throw new ArgumentOutOfRangeException(nameof(num), num, "Must be greater than or equal to 1"); - _value = value; + _value = num; + } + + public void Deconstruct(out int value) + { + value = Value; } } diff --git a/tests/Directory.Build.props b/tests/Directory.Build.props new file mode 100644 index 0000000..5852105 --- /dev/null +++ b/tests/Directory.Build.props @@ -0,0 +1,25 @@ + + + + 13 + enable + enable + true + + false + + + + + + + + + + + + + + + + diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index 5f766c0..c47b93d 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -3,7 +3,6 @@ using LocalPost.KafkaConsumer.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; -using Xunit.Abstractions; namespace LocalPost.KafkaConsumer.Tests; @@ -64,18 +63,14 @@ public async Task handles_messages() .Scoped() .Trace() ) - .ConfigureConsumer(consumer => + .Configure(co => { - consumer.BootstrapServers = _container.GetBootstrapAddress(); + co.ClientConfig.BootstrapServers = _container.GetBootstrapAddress(); // This is the default value, from the name parameter above - // consumer.GroupId = "test-consumer"; - consumer.Topic = Topic; + // co.ClientConfig.GroupId = "test-consumer"; + co.Topics.Add(Topic); // Otherwise the client attaches to the end of the topic, skipping all the published messages - consumer.AutoOffsetReset = AutoOffsetReset.Earliest; - }) - .Configure(pipeline => - { - pipeline.MaxConcurrency = 2; + co.ClientConfig.AutoOffsetReset = AutoOffsetReset.Earliest; }) .ValidateDataAnnotations()); diff --git a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj index 7df79fe..ca9fa2d 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj +++ b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj @@ -2,27 +2,14 @@ net6;net8 - - false - - - - - - - + - - - - - diff --git a/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs index cf96115..ad34474 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs @@ -9,7 +9,7 @@ namespace LocalPost.KafkaConsumer.Tests; // See also https://github.com/testcontainers/testcontainers-dotnet/blob/develop/src/Testcontainers.Kafka/KafkaBuilder.cs public sealed class RedpandaBuilder : ContainerBuilder { - public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.1.7"; + public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.3.2"; public const ushort KafkaPort = 9092; public const ushort KafkaAdminPort = 9644; diff --git a/tests/LocalPost.KafkaConsumer.Tests/Usings.cs b/tests/LocalPost.KafkaConsumer.Tests/Usings.cs deleted file mode 100644 index 91743bb..0000000 --- a/tests/LocalPost.KafkaConsumer.Tests/Usings.cs +++ /dev/null @@ -1,2 +0,0 @@ -global using Xunit; -global using FluentAssertions; diff --git a/tests/LocalPost.KafkaConsumer.Tests/globalusings.cs b/tests/LocalPost.KafkaConsumer.Tests/globalusings.cs new file mode 100644 index 0000000..8ab2b64 --- /dev/null +++ b/tests/LocalPost.KafkaConsumer.Tests/globalusings.cs @@ -0,0 +1,4 @@ +global using System.Threading.Tasks; +global using Xunit; +global using Xunit.Abstractions; +global using FluentAssertions; diff --git a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs index e5356c9..dbf1434 100644 --- a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs @@ -5,7 +5,6 @@ using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; using Testcontainers.LocalStack; -using Xunit.Abstractions; namespace LocalPost.SqsConsumer.Tests; @@ -13,18 +12,18 @@ public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime { // Called for each test, since each test instantiates a new class instance private readonly LocalStackContainer _container = new LocalStackBuilder() - .WithImage("localstack/localstack:3.4") + .WithImage("localstack/localstack:4") .WithEnvironment("SERVICES", "sqs") .Build(); + private readonly AWSCredentials _credentials = new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any"); private const string QueueName = "weather-forecasts"; private string? _queueUrl; - private IAmazonSQS CreateClient() => - new AmazonSQSClient(new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any"), - new AmazonSQSConfig { ServiceURL = _container.GetConnectionString() }); + private IAmazonSQS CreateClient() => new AmazonSQSClient(_credentials, + new AmazonSQSConfig { ServiceURL = _container.GetConnectionString() }); public async Task InitializeAsync() { @@ -48,17 +47,17 @@ public async Task handles_messages() .AddDefaultAWSOptions(new AWSOptions() { DefaultClientConfig = { ServiceURL = _container.GetConnectionString() }, - Credentials = new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any") + Credentials = _credentials, }) .AddAWSService() - .AddSqsConsumers(sqs => sqs.AddConsumer(QueueName, HandlerStack.For(async (payload, _) => - { - received.Add(payload); - }) - .UseSqsPayload() - .Acknowledge() - .Scoped() - .Trace())); + .AddSqsConsumers(sqs => sqs.AddConsumer(QueueName, + HandlerStack.For(payload => received.Add(payload)) + .Scoped() + .UseSqsPayload() + .Trace() + .LogExceptions() + .Acknowledge() // Acknowledge in any case, because we caught any possible exceptions before + )); var host = hostBuilder.Build(); diff --git a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj index b1a9237..dca90b3 100644 --- a/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj +++ b/tests/LocalPost.SqsConsumer.Tests/LocalPost.SqsConsumer.Tests.csproj @@ -2,27 +2,14 @@ net6;net8 - - false - - - - - - - - + + - - - - - diff --git a/tests/LocalPost.SqsConsumer.Tests/Usings.cs b/tests/LocalPost.SqsConsumer.Tests/Usings.cs deleted file mode 100644 index 91743bb..0000000 --- a/tests/LocalPost.SqsConsumer.Tests/Usings.cs +++ /dev/null @@ -1,2 +0,0 @@ -global using Xunit; -global using FluentAssertions; diff --git a/tests/LocalPost.SqsConsumer.Tests/globalusings.cs b/tests/LocalPost.SqsConsumer.Tests/globalusings.cs new file mode 100644 index 0000000..8ab2b64 --- /dev/null +++ b/tests/LocalPost.SqsConsumer.Tests/globalusings.cs @@ -0,0 +1,4 @@ +global using System.Threading.Tasks; +global using Xunit; +global using Xunit.Abstractions; +global using FluentAssertions; diff --git a/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs deleted file mode 100644 index 394827f..0000000 --- a/tests/LocalPost.Tests/AsyncEnumerable/AsyncEnumerableMergerTests.cs +++ /dev/null @@ -1,147 +0,0 @@ -using System.Threading.Channels; -using FluentAssertions; -using LocalPost.AsyncEnumerable; - -namespace LocalPost.Tests.AsyncEnumerable; - -public class AsyncEnumerableMergerTests -{ - [Fact] - internal async Task aggregates_multiple_channels() - { - var source1 = Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false - }); - var source2 = Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false - }); - var results = new AsyncEnumerableMerger(new[] - { - source1.Reader.ReadAllAsync(), source2.Reader.ReadAllAsync() - }); - - await Task.WhenAll(Produce(), Consume()); - return; - - async Task Produce() - { - await source1.Writer.WriteAsync(1); - await source2.Writer.WriteAsync(1); - await source1.Writer.WriteAsync(1); - - await Task.Delay(TimeSpan.FromSeconds(1)); - - source1.Writer.Complete(); - await source2.Writer.WriteAsync(4); - - await Task.Delay(TimeSpan.FromSeconds(1)); - - source2.Writer.Complete(); - } - - async Task Consume() - { - var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5)); - - var expect = new Queue(); - expect.Enqueue(1); - expect.Enqueue(1); - expect.Enqueue(1); - expect.Enqueue(4); - - await foreach (var r in results.WithCancellation(cts.Token)) - r.Should().Be(expect.Dequeue()); - - expect.Should().BeEmpty(); - } - } - - [Fact] - internal async Task aggregates_multiple_channels_over_time() - { - var source1 = Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false - }); - var source2 = Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false - }); - var results = new AsyncEnumerableMerger(true); - - await Task.WhenAll(Produce(), Consume()); - return; - - async Task Produce() - { - await source1.Writer.WriteAsync(1); - await source2.Writer.WriteAsync(2); - await source1.Writer.WriteAsync(3); - - await Task.Delay(TimeSpan.FromSeconds(1)); // Does not matter - - results.Add(source1.Reader.ReadAllAsync()); - - source1.Writer.Complete(); - - await source2.Writer.WriteAsync(4); - - results.Add(source2.Reader.ReadAllAsync()); - - await Task.Delay(TimeSpan.FromSeconds(1)); - - source2.Writer.Complete(); - } - - async Task Consume() - { - var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5)); - - var expect = new Queue(); - expect.Enqueue(1); - expect.Enqueue(3); - expect.Enqueue(2); - expect.Enqueue(4); - - try - { - await foreach (var r in results.WithCancellation(cts.Token)) - { -// r.Should().Be(expect.Dequeue()); - expect.Dequeue(); - } - } - catch (OperationCanceledException e) when (e.CancellationToken == cts.Token) - { - // Should happen - } - - cts.IsCancellationRequested.Should().BeTrue(); - expect.Should().BeEmpty(); - } - } - - [Fact] - internal async Task aggregates_multiple_channels_permanently() - { - var sut = new AsyncEnumerableMerger(true); - var cts = new CancellationTokenSource(TimeSpan.FromSeconds(5)); - - try - { - await foreach (var r in sut.WithCancellation(cts.Token)) - { - } - } - catch (OperationCanceledException) - { - cts.IsCancellationRequested.Should().BeTrue(); - } - } -} diff --git a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs b/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs deleted file mode 100644 index 3080bea..0000000 --- a/tests/LocalPost.Tests/AsyncEnumerable/BatchingAsyncEnumerableTests.cs +++ /dev/null @@ -1,48 +0,0 @@ -using System.Threading.Channels; -using LocalPost.AsyncEnumerable; - -namespace LocalPost.Tests.AsyncEnumerable; - -public class BatchingAsyncEnumerableTests -{ - [Fact] - internal async Task collects_in_batches() - { - var source = Channel.CreateUnbounded(new UnboundedChannelOptions - { - SingleReader = true, - SingleWriter = false - }); - var results = source.Reader.ReadAllAsync().Batch(10, TimeSpan.FromSeconds(2)); - - await Task.WhenAll(Produce(), Consume()); - return; - - async Task Produce() - { - await source.Writer.WriteAsync(1); - await source.Writer.WriteAsync(2); - await source.Writer.WriteAsync(3); - - await Task.Delay(TimeSpan.FromSeconds(3)); - - await source.Writer.WriteAsync(4); - await source.Writer.WriteAsync(5); - - source.Writer.Complete(); - } - - async Task Consume() - { - var expect = new Queue(); - expect.Enqueue([1, 2, 3]); - expect.Enqueue([4, 5]); - await foreach (var batch in results) - { - batch.Should().ContainInOrder(expect.Dequeue()); - } - - expect.Should().BeEmpty(); - } - } -} diff --git a/tests/LocalPost.Tests/LocalPost.Tests.csproj b/tests/LocalPost.Tests/LocalPost.Tests.csproj index 0fdfd4f..bbb9b08 100644 --- a/tests/LocalPost.Tests/LocalPost.Tests.csproj +++ b/tests/LocalPost.Tests/LocalPost.Tests.csproj @@ -2,23 +2,8 @@ net6;net8 - - false - - - - - - - - - - - - - diff --git a/tests/LocalPost.Tests/PrimitivesTests.cs b/tests/LocalPost.Tests/PrimitivesTests.cs index d3967ac..d4eae08 100644 --- a/tests/LocalPost.Tests/PrimitivesTests.cs +++ b/tests/LocalPost.Tests/PrimitivesTests.cs @@ -5,7 +5,7 @@ public class PrimitivesTests [Fact] public void MaxSize_implicit_conversion() { - MaxSize batchSize = default; + PositiveInt batchSize = default; int value = batchSize; value.Should().Be(1); diff --git a/tests/LocalPost.Tests/Usings.cs b/tests/LocalPost.Tests/Usings.cs deleted file mode 100644 index 91743bb..0000000 --- a/tests/LocalPost.Tests/Usings.cs +++ /dev/null @@ -1,2 +0,0 @@ -global using Xunit; -global using FluentAssertions; diff --git a/tests/LocalPost.Tests/globalusings.cs b/tests/LocalPost.Tests/globalusings.cs new file mode 100644 index 0000000..8ab2b64 --- /dev/null +++ b/tests/LocalPost.Tests/globalusings.cs @@ -0,0 +1,4 @@ +global using System.Threading.Tasks; +global using Xunit; +global using Xunit.Abstractions; +global using FluentAssertions; From c2e240425812fc5012907fab291bcdddd53ff0bf Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 20 Jan 2025 14:52:05 +0000 Subject: [PATCH 23/33] WIP --- .../DependencyInjection/KafkaBuilder.cs | 8 ---- src/LocalPost/Flow/Event.cs | 41 +++++++++++++++---- src/LocalPost/Middlewares.cs | 13 +++++- .../ConsumerTests.cs | 23 ++++------- 4 files changed, 51 insertions(+), 34 deletions(-) diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 209db93..730bc43 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -19,14 +19,6 @@ public sealed class KafkaBuilder(IServiceCollection services) public OptionsBuilder AddConsumer(HandlerFactory> hf) => AddConsumer(Options.DefaultName, hf); - /// - /// Add a Kafka consumer with a custom message handler. - /// - /// Message handler factory. - /// Consumer options builder. - public OptionsBuilder AddConsumer(HandlerFactory>> hf) => - AddConsumer(Options.DefaultName, hf); - /// /// Add a Kafka consumer with a custom message handler. /// diff --git a/src/LocalPost/Flow/Event.cs b/src/LocalPost/Flow/Event.cs index 4bde1ab..23717c2 100644 --- a/src/LocalPost/Flow/Event.cs +++ b/src/LocalPost/Flow/Event.cs @@ -1,17 +1,40 @@ namespace LocalPost.Flow; -public enum EventType : byte + + + +public interface IHandlerManager { - Message, - Begin, - End, + ValueTask Start(CancellationToken ct); + + ValueTask Handle(T payload, CancellationToken ct); // Handler + + ValueTask Stop(Exception? error, CancellationToken ct); } -[PublicAPI] -public readonly record struct Event(EventType Type, T Payload = default) +internal class HandlerManager(Handler handler) : IHandlerManager { - public static Event Begin => new(EventType.Begin); - public static Event End => new(EventType.End); + public ValueTask Start(CancellationToken ct) => ValueTask.CompletedTask; + + public ValueTask Handle(T payload, CancellationToken ct) => handler(payload, ct); - public static implicit operator Event(T payload) => new(EventType.Message, payload); + public ValueTask Stop(Exception? error, CancellationToken ct) => ValueTask.CompletedTask; } + + +// public enum EventType : byte +// { +// Message, // With required payload +// Begin, // Empty +// End, // With an optional error +// } +// +// [PublicAPI] +// public readonly record struct Event(EventType Type, T Payload = default!, Exception? Error = null) +// { +// public static Event Begin => new(EventType.Begin); +// public static Event End => new(EventType.End); +// public static Event Fail(Exception e) => new(EventType.End, Error: e); +// +// public static implicit operator Event(T payload) => new(EventType.Message, payload); +// } diff --git a/src/LocalPost/Middlewares.cs b/src/LocalPost/Middlewares.cs index 0d28cdb..b14edca 100644 --- a/src/LocalPost/Middlewares.cs +++ b/src/LocalPost/Middlewares.cs @@ -1,3 +1,4 @@ +using System.Text; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; @@ -5,6 +6,15 @@ namespace LocalPost; public static partial class Middlewares { + public static HandlerFactory DecodeString(this HandlerFactory hf) => + DecodeString(hf, Encoding.UTF8); + + public static HandlerFactory DecodeString(this HandlerFactory hf, Encoding encoding) => hf.Map(next => async (payload, ct) => + { + var s = encoding.GetString(payload); + await next(s, ct).ConfigureAwait(false); + }); + /// /// Handle exceptions and log them, to not break the consumer loop. /// @@ -82,7 +92,6 @@ public async ValueTask InvokeAsync(T payload, CancellationToken ct) await using var scope = sf.CreateAsyncScope(); var handler = hf(scope.ServiceProvider); - - await handler(payload, ct); + await handler(payload, ct).ConfigureAwait(false); } } diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index c47b93d..bcf2ced 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -48,25 +48,18 @@ public async Task handles_messages() var hostBuilder = Host.CreateApplicationBuilder(); hostBuilder.Services.AddKafkaConsumers(kafka => kafka - .AddConsumer("test-consumer", HandlerStack.For((payload, _) => - { - received.Add(payload); - return default; - }) - .Map(next => async (payload, ct) => - { - // TODO Support string payload out of the box?.. - await next(Encoding.UTF8.GetString(payload), ct); - }) - .UseKafkaPayload() - .Acknowledge() - .Scoped() - .Trace() + .AddConsumer("test-consumer", + HandlerStack.For(payload => received.Add(payload)) + .Scoped() + .DecodeString() + .UseKafkaPayload() + .Acknowledge() + .Trace() ) .Configure(co => { co.ClientConfig.BootstrapServers = _container.GetBootstrapAddress(); - // This is the default value, from the name parameter above + // Already set, see above // co.ClientConfig.GroupId = "test-consumer"; co.Topics.Add(Topic); // Otherwise the client attaches to the end of the topic, skipping all the published messages From 1920ba3e4ad0a5ae529a8fe8d2a5cbbdc5b18475 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Wed, 22 Jan 2025 11:31:57 +0000 Subject: [PATCH 24/33] HandlerManager --- src/LocalPost.KafkaConsumer/Consumer.cs | 9 +- .../DependencyInjection/KafkaBuilder.cs | 4 +- src/LocalPost.SqsConsumer/Consumer.cs | 9 +- .../DependencyInjection/SqsBuilder.cs | 4 +- .../BackgroundQueue/BackgroundQueue.cs | 4 +- .../BackgroundQueuesBuilder.cs | 8 +- src/LocalPost/Flow/Event.cs | 6 +- src/LocalPost/Flow/HandlerStackEx.cs | 131 +++++++++++------- 8 files changed, 99 insertions(+), 76 deletions(-) diff --git a/src/LocalPost.KafkaConsumer/Consumer.cs b/src/LocalPost.KafkaConsumer/Consumer.cs index 69666db..6b98a69 100644 --- a/src/LocalPost.KafkaConsumer/Consumer.cs +++ b/src/LocalPost.KafkaConsumer/Consumer.cs @@ -7,7 +7,7 @@ namespace LocalPost.KafkaConsumer; internal sealed class Consumer(string name, ILogger logger, - ClientFactory clientFactory, Handler>> handler) + ClientFactory clientFactory, IHandlerManager> handler) : IHostedService, IHealthAwareService, IDisposable { private Clients _clients = new([]); @@ -39,7 +39,7 @@ private async Task RunConsumerAsync(Client client, CancellationToken execToken) while (!execToken.IsCancellationRequested) { var result = client.Consume(execToken); - await handler(new ConsumeContext(client, result, result.Message.Value), CancellationToken.None) + await handler.Handle(new ConsumeContext(client, result, result.Message.Value), CancellationToken.None) .ConfigureAwait(false); } } @@ -76,7 +76,7 @@ public async Task StartAsync(CancellationToken ct) logger.LogInformation("Kafka consumer started"); logger.LogDebug("Invoking the event handler..."); - await handler(Event>.Begin, ct).ConfigureAwait(false); + await handler.Start(ct).ConfigureAwait(false); logger.LogDebug("Event handler started"); _exec = ObserveExecution(); @@ -91,8 +91,7 @@ async Task ObserveExecution() ).ToArray(); await (executions.Length == 1 ? executions[0] : Task.WhenAll(executions)).ConfigureAwait(false); - // TODO Pass the exception (if any) to the handler - await handler(Event>.End, _completionToken).ConfigureAwait(false); + await handler.Stop(_execException, _completionToken).ConfigureAwait(false); } finally { diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 730bc43..09b34d6 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -26,7 +26,7 @@ public OptionsBuilder AddConsumer(HandlerFactoryMessage handler factory. /// Consumer options builder. public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) => - AddConsumer(name, hf.SelectMessageEvent()); + AddConsumer(name, hf.AsHandlerManager()); /// /// Add a Kafka consumer with a custom message handler. @@ -34,7 +34,7 @@ public OptionsBuilder AddConsumer(string name, HandlerFactoryConsumer name (should be unique in the application). Also, the default group ID. /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory>> hf) + public OptionsBuilder AddConsumer(string name, HandlerManagerFactory> hf) { var added = services.TryAddKeyedSingleton(name, (provider, _) => { diff --git a/src/LocalPost.SqsConsumer/Consumer.cs b/src/LocalPost.SqsConsumer/Consumer.cs index 1709955..487862e 100644 --- a/src/LocalPost.SqsConsumer/Consumer.cs +++ b/src/LocalPost.SqsConsumer/Consumer.cs @@ -8,7 +8,7 @@ namespace LocalPost.SqsConsumer; internal sealed class Consumer(string name, ILogger logger, IAmazonSQS sqs, - ConsumerOptions settings, Handler>> handler) + ConsumerOptions settings, IHandlerManager> handler) : IHostedService, IHealthAwareService, IDisposable { private CancellationTokenSource? _execTokenSource; @@ -40,7 +40,7 @@ private async Task RunConsumerAsync(QueueClient client, CancellationToken execTo var messages = await client.PullMessages(execToken).ConfigureAwait(false); await Task.WhenAll(messages .Select(message => new ConsumeContext(client, message, message.Body)) - .Select(context => handler(context, CancellationToken.None).AsTask())) + .Select(context => handler.Handle(context, CancellationToken.None).AsTask())) .ConfigureAwait(false); } } @@ -74,7 +74,7 @@ public async Task StartAsync(CancellationToken ct) var client = new QueueClient(logger, sqs, settings); await client.Connect(ct).ConfigureAwait(false); - await handler(Event>.Begin, ct).ConfigureAwait(false); + await handler.Start(ct).ConfigureAwait(false); _exec = ObserveExecution(); return; @@ -92,8 +92,7 @@ async Task ObserveExecution() }; await execution.ConfigureAwait(false); - // TODO Pass the exception (if any) to the handler - await handler(Event>.End, _completionToken).ConfigureAwait(false); + await handler.Stop(_execException, _completionToken).ConfigureAwait(false); } finally { diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index 8adcc5e..f2d8a05 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -26,7 +26,7 @@ public OptionsBuilder AddConsumer(HandlerFactoryMessage handler factory. /// Consumer options builder. public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) => - AddConsumer(name, hf.SelectMessageEvent()); + AddConsumer(name, hf.AsHandlerManager()); /// /// Add an SQS consumer with a custom message handler. @@ -34,7 +34,7 @@ public OptionsBuilder AddConsumer(string name, HandlerFactoryConsumer name (should be unique in the application). Also, the default queue name. /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory>> hf) + public OptionsBuilder AddConsumer(string name, HandlerManagerFactory> hf) { var added = services.TryAddKeyedSingleton(name, (provider, _) => new Consumer(name, provider.GetLoggerFor(), diff --git a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs index 3997f36..25e11be 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -31,12 +31,12 @@ public async Task StopAsync(CancellationToken forceShutdownToken) } finally { - await runner.Stop(forceShutdownToken).ConfigureAwait(false); + await runner.Stop(null, forceShutdownToken).ConfigureAwait(false); } } public void Dispose() { - runner?.Dispose(); + runner.Dispose(); } } diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 4da019f..23aaeef 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -30,9 +30,9 @@ public OptionsBuilder> AddQueue(HandlerFactory> AddQueue(string name, HandlerFactory> hf) => - AddQueue(name, hf.SelectMessageEvent()); + AddQueue(name, hf.AsHandlerManager()); - public OptionsBuilder> AddQueue(string name, HandlerFactory>> hf) + public OptionsBuilder> AddQueue(string name, HandlerManagerFactory> hmf) { if (!services.TryAddSingletonAlias, BackgroundQueue>(name)) // throw new InvalidOperationException( @@ -61,8 +61,8 @@ BackgroundQueue CreateQueue(IServiceProvider provider, object? key) SingleWriter = settings.SingleProducer, }) }; - var handler = hf(provider); - var runner = ChannelRunner.Create(channel, handler, settings.MaxConcurrency, settings.ProcessLeftovers); + var hm = hmf(provider); + var runner = ChannelRunner.Create(channel, hm, settings.MaxConcurrency, settings.ProcessLeftovers); return new BackgroundQueue(provider.GetLoggerFor>(), settings, channel, runner); } diff --git a/src/LocalPost/Flow/Event.cs b/src/LocalPost/Flow/Event.cs index 23717c2..bc71e75 100644 --- a/src/LocalPost/Flow/Event.cs +++ b/src/LocalPost/Flow/Event.cs @@ -1,7 +1,6 @@ namespace LocalPost.Flow; - - +public delegate IHandlerManager HandlerManagerFactory(IServiceProvider provider); public interface IHandlerManager { @@ -12,7 +11,7 @@ public interface IHandlerManager ValueTask Stop(Exception? error, CancellationToken ct); } -internal class HandlerManager(Handler handler) : IHandlerManager +internal sealed class HandlerManager(Handler handler) : IHandlerManager { public ValueTask Start(CancellationToken ct) => ValueTask.CompletedTask; @@ -21,7 +20,6 @@ internal class HandlerManager(Handler handler) : IHandlerManager public ValueTask Stop(Exception? error, CancellationToken ct) => ValueTask.CompletedTask; } - // public enum EventType : byte // { // Message, // With required payload diff --git a/src/LocalPost/Flow/HandlerStackEx.cs b/src/LocalPost/Flow/HandlerStackEx.cs index 46edbc5..2ad8125 100644 --- a/src/LocalPost/Flow/HandlerStackEx.cs +++ b/src/LocalPost/Flow/HandlerStackEx.cs @@ -7,22 +7,28 @@ namespace LocalPost.Flow; [PublicAPI] public static partial class HandlerStackEx { - /// - /// Gateway from T to Event{T}. - /// - /// Message handler factory. - /// Message payload type. - /// Wrapped handler factory. - public static HandlerFactory> SelectMessageEvent(this HandlerFactory hf) => hf.Map, T>( - next => (flowEvent, ct) => flowEvent.Type switch - { - EventType.Message => next(flowEvent.Payload!, ct), - _ => ValueTask.CompletedTask, - }); + // Keep it internal for now, until it's clear that this generic transformation is useful + internal static HandlerManagerFactory AsHandlerManager(this HandlerFactory hf) => provider => + { + var handler = hf(provider); + return new HandlerManager(handler); + }; - public static HandlerFactory> Buffer(this HandlerFactory> hf, - int capacity, - int consumers = 1, bool singleProducer = false) + public static HandlerManagerFactory Buffer(this HandlerFactory hf, + int capacity, int consumers = 1, bool singleProducer = false) => + hf.AsHandlerManager().Buffer(capacity, consumers, singleProducer); + + public static HandlerManagerFactory Batch(this HandlerFactory> hf, + int size, TimeSpan window, + int capacity = 1, int consumers = 1, bool singleProducer = false) => + hf.AsHandlerManager().Batch(size, window, capacity, consumers, singleProducer); +} + +[PublicAPI] +public static partial class HandlerManagerStackEx +{ + public static HandlerManagerFactory Buffer(this HandlerManagerFactory hmf, + int capacity, int consumers = 1, bool singleProducer = false) { var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) { @@ -31,33 +37,48 @@ public static HandlerFactory> Buffer(this HandlerFactory> h SingleWriter = singleProducer, }); - return hf.Buffer(channel, consumers); + return hmf.Buffer(channel, consumers); } - private static HandlerFactory> Buffer(this HandlerFactory> hf, Channel channel, - int consumers = 1) => provider => + private static HandlerManagerFactory Buffer(this HandlerManagerFactory hmf, + Channel channel, int consumers = 1) => provider => { - var handler = hf(provider); - var buffer = new ChannelRunner(channel, Consume, handler) { Consumers = consumers }; - - return (flowEvent, ct) => flowEvent.Type switch - { - EventType.Begin => buffer.Start(ct), - EventType.Message => channel.Writer.WriteAsync(flowEvent.Payload, ct), - EventType.End => buffer.Stop(ct), - _ => ValueTask.CompletedTask, - }; + var next = hmf(provider); + var buffer = new ChannelRunner(channel, Consume, next) { Consumers = consumers }; + return new BufferHandlerManager(channel, buffer); async Task Consume(CancellationToken execToken) { await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) - await handler(message, CancellationToken.None).ConfigureAwait(false); - } + await next.Handle(message, CancellationToken.None).ConfigureAwait(false); + }; }; - public static HandlerFactory> Batch(this HandlerFactory>> hf, + public static HandlerManagerFactory Batch(this HandlerManagerFactory> hmf, int size, TimeSpan window, int capacity = 1, int consumers = 1, bool singleProducer = false) => provider => + { + var next = hmf(provider); + return BatchHandlerManager.Create(next, size, window, capacity, consumers, singleProducer); + }; +} + +internal sealed class BufferHandlerManager(Channel channel, + ChannelRunner runner) : IHandlerManager +{ + public ValueTask Start(CancellationToken ct) => runner.Start(ct); + + public ValueTask Handle(T payload, CancellationToken ct) => channel.Writer.WriteAsync(payload, ct); + + public ValueTask Stop(Exception? error, CancellationToken ct) => runner.Stop(error, ct); +} + +internal sealed class BatchHandlerManager(Channel channel, + ChannelRunner> runner) : IHandlerManager +{ + public static BatchHandlerManager Create(IHandlerManager> next, + int size, TimeSpan window, + int capacity = 1, int consumers = 1, bool singleProducer = false) { var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) { @@ -65,16 +86,10 @@ public static HandlerFactory> Batch(this HandlerFactory>(channel, Consume, handler) { Consumers = consumers }; - - return (flowEvent, ct) => flowEvent.Type switch - { - EventType.Begin => buffer.Start(ct), - EventType.Message => channel.Writer.WriteAsync(flowEvent.Payload, ct), - EventType.End => buffer.Stop(ct), - _ => ValueTask.CompletedTask, - }; + // var handler = hf(provider); + var buffer = new ChannelRunner>(channel, Consume, next) { Consumers = consumers }; + var hm = new BatchHandlerManager(channel, buffer); + return hm; async Task Consume(CancellationToken execToken) { @@ -111,15 +126,22 @@ async Task Consume(CancellationToken execToken) // Otherwise, the contents will be copied into a new array. The internal buffer will then be set to a // zero length array. var batch = batchBuilder.DrainToImmutable(); - await handler(batch, CancellationToken.None).ConfigureAwait(false); + await next.Handle(batch, CancellationToken.None).ConfigureAwait(false); } - } - }; + }; + } + + public ValueTask Start(CancellationToken ct) => runner.Start(ct); + + public ValueTask Handle(T payload, CancellationToken ct) => channel.Writer.WriteAsync(payload, ct); + + public ValueTask Stop(Exception? error, CancellationToken ct) => runner.Stop(error, ct); } internal static class ChannelRunner { - public static ChannelRunner Create(Channel channel, Handler> handler, + // public static ChannelRunner Create(Channel channel, Handler> handler, + public static ChannelRunner Create(Channel channel, IHandlerManager handler, int consumers = 1, bool processLeftovers = true) { return new ChannelRunner(channel, Consume, handler) @@ -128,13 +150,14 @@ public static ChannelRunner Create(Channel channel, Handler async Task Consume(CancellationToken execToken) { await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) - await handler(message, CancellationToken.None).ConfigureAwait(false); + await handler.Handle(message, CancellationToken.None).ConfigureAwait(false); } } } internal sealed class ChannelRunner(Channel channel, - Func consumer, Handler> handler) : IDisposable + // Func consumer, Handler> handler) : IDisposable + Func consumer, IHandlerManager handler) : IDisposable { public HealthCheckResult Ready => (_execTokenSource, _exec, _execException) switch { @@ -161,7 +184,8 @@ public async ValueTask Start(CancellationToken ct) var execTokenSource = _execTokenSource = new CancellationTokenSource(); - await handler(Event.Begin, ct).ConfigureAwait(false); + // await handler(Event.Begin, ct).ConfigureAwait(false); + await handler.Start(ct).ConfigureAwait(false); _exec = Run(execTokenSource.Token); } @@ -175,7 +199,8 @@ private async Task Run(CancellationToken execToken) }; await exec.ConfigureAwait(false); - await handler(Event.End, _completionToken).ConfigureAwait(false); + // await handler(Event.End, _completionToken).ConfigureAwait(false); + await handler.Stop(_execException, _completionToken).ConfigureAwait(false); } private async Task RunConsumer(CancellationToken execToken) @@ -183,6 +208,7 @@ private async Task RunConsumer(CancellationToken execToken) try { await consumer(execToken).ConfigureAwait(false); + Close(); } catch (OperationCanceledException e) when (e.CancellationToken == execToken) { @@ -198,10 +224,10 @@ private async Task RunConsumer(CancellationToken execToken) } } - public async ValueTask Stop(CancellationToken ct) + public async ValueTask Stop(Exception? e, CancellationToken ct) { _completionToken = ct; - Close(); + Close(e); if (_exec is not null) await _exec.ConfigureAwait(false); } @@ -214,7 +240,8 @@ public void Dispose() private void Close(Exception? e = null) { - channel.Writer.TryComplete(e); + if (!channel.Writer.TryComplete(e)) + return; _execException ??= e; if (!ProcessLeftovers) _execTokenSource?.Cancel(); From f9101b2ea7466afe40a7963bc60bd096a9593b7a Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Thu, 23 Jan 2025 11:12:24 +0000 Subject: [PATCH 25/33] fix: tests --- docker-compose.yml | 4 ++-- samples/KafkaConsumerApp/Program.cs | 10 ++++++++-- samples/SqsConsumerApp/Program.cs | 7 +++++-- src/LocalPost.KafkaConsumer/ConsumeContext.cs | 2 +- src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 8 ++++++++ src/LocalPost.KafkaConsumer/Options.cs | 3 +-- src/LocalPost/HandlerStack.cs | 2 +- tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs | 5 +++-- .../LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs | 2 +- tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs | 2 +- 10 files changed, 31 insertions(+), 14 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index a50a691..899a459 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -28,7 +28,7 @@ services: redpanda: # Mainly from: https://docs.redpanda.com/redpanda-labs/docker-compose/single-broker/ # See also: https://docs.redpanda.com/current/deploy/deployment-option/self-hosted/docker-image/ - image: docker.redpanda.com/redpandadata/redpanda:v24.3.2 + image: docker.redpanda.com/redpandadata/redpanda:v24.3.3 container_name: redpanda command: - redpanda start @@ -64,7 +64,7 @@ services: # retries: 5 # start_period: 5s redpanda-console: - image: docker.redpanda.com/redpandadata/console:v2.8.1 + image: docker.redpanda.com/redpandadata/console:v2.8.2 entrypoint: /bin/sh command: -c "echo \"$$CONSOLE_CONFIG_FILE\" > /tmp/config.yml; /app/console" environment: diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index 65bce59..1ffd43d 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -6,6 +6,12 @@ var builder = Host.CreateApplicationBuilder(args); +builder.Services.Configure(options => +{ + options.ServicesStartConcurrently = true; + options.ServicesStopConcurrently = true; +}); + builder.Services .AddScoped() .AddKafkaConsumers(kafka => @@ -19,7 +25,7 @@ .Scoped() .DeserializeJson() .Trace() - .Acknowledge() + // .Acknowledge() .LogExceptions() ) .Bind(builder.Configuration.GetSection("Kafka:Consumer")) @@ -27,6 +33,7 @@ { options.ClientConfig.AutoOffsetReset = AutoOffsetReset.Earliest; // options.ClientConfig.EnableAutoCommit = false; // DryRun + // options.ClientConfig.EnableAutoOffsetStore = false; // Manually acknowledge every message }) .ValidateDataAnnotations(); }); @@ -34,7 +41,6 @@ await builder.Build().RunAsync(); - [UsedImplicitly] public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index 96c4683..caeae2a 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -13,7 +13,11 @@ .AddDefaultAWSOptions(builder.Configuration.GetAWSOptions()) .AddAWSService(); - +builder.Services.Configure(options => +{ + options.ServicesStartConcurrently = true; + options.ServicesStopConcurrently = true; +}); builder.Services .AddScoped() @@ -36,7 +40,6 @@ await builder.Build().RunAsync(); - [UsedImplicitly] public record WeatherForecast(int TemperatureC, int TemperatureF, string Summary); diff --git a/src/LocalPost.KafkaConsumer/ConsumeContext.cs b/src/LocalPost.KafkaConsumer/ConsumeContext.cs index 8aac243..6674045 100644 --- a/src/LocalPost.KafkaConsumer/ConsumeContext.cs +++ b/src/LocalPost.KafkaConsumer/ConsumeContext.cs @@ -41,7 +41,7 @@ public void Deconstruct(out T payload, out IReadOnlyList headers) public ConsumeContext Transform(Func, TOut> transform) => Transform(transform(this)); public async Task> Transform(Func, Task> transform) => - Transform(await transform(this)); + Transform(await transform(this).ConfigureAwait(false)); public static implicit operator T(ConsumeContext context) => context.Payload; diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index c812e3e..5b524d3 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -27,6 +27,14 @@ public static HandlerFactory> Trace(this HandlerFactory + /// Manually acknowledge every message (store offset). + /// + /// Works only when EnableAutoOffsetStore is false! + /// + /// Message handler factory. + /// Message type. + /// Wrapped handler factory. public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => hf.Map, ConsumeContext>(next => async (context, ct) => diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 9168e7a..2469232 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -6,9 +6,8 @@ namespace LocalPost.KafkaConsumer; public sealed record ConsumerOptions { public ConsumerConfig ClientConfig { get; set; } = new(); - // public required ConsumerConfig ClientConfig { get; init; } = new() // { - // EnableAutoOffsetStore = false // We will store offsets manually, see Acknowledge middleware + // EnableAutoOffsetStore = false // Store offsets manually, see Acknowledge middleware // }; [MinLength(1)] diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index c03a4b1..7903dd5 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -14,7 +14,7 @@ public static class HandlerStack public static HandlerFactory For(Action syncHandler) => For((payload, _) => { syncHandler(payload); - return default; + return ValueTask.CompletedTask; }); public static HandlerFactory For(Handler handler) => _ => handler; diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index bcf2ced..8383cc3 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -27,12 +27,12 @@ public async Task InitializeAsync() await producer.ProduceAsync(Topic, new Message { Key = "London", - Value = "It will rainy in London tomorrow" + Value = "It will be rainy in London tomorrow" }); await producer.ProduceAsync(Topic, new Message { Key = "Paris", - Value = "It will rainy in London tomorrow" + Value = "It will be sunny in Paris tomorrow" }); } @@ -62,6 +62,7 @@ public async Task handles_messages() // Already set, see above // co.ClientConfig.GroupId = "test-consumer"; co.Topics.Add(Topic); + co.ClientConfig.EnableAutoOffsetStore = false; // Manually acknowledge every message // Otherwise the client attaches to the end of the topic, skipping all the published messages co.ClientConfig.AutoOffsetReset = AutoOffsetReset.Earliest; }) diff --git a/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs index ad34474..6f728d2 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs @@ -9,7 +9,7 @@ namespace LocalPost.KafkaConsumer.Tests; // See also https://github.com/testcontainers/testcontainers-dotnet/blob/develop/src/Testcontainers.Kafka/KafkaBuilder.cs public sealed class RedpandaBuilder : ContainerBuilder { - public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.3.2"; + public const string RedpandaImage = "docker.redpanda.com/redpandadata/redpanda:v24.3.3"; public const ushort KafkaPort = 9092; public const ushort KafkaAdminPort = 9644; diff --git a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs index dbf1434..c0e83c2 100644 --- a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs @@ -16,7 +16,7 @@ public class ConsumerTests(ITestOutputHelper output) : IAsyncLifetime .WithEnvironment("SERVICES", "sqs") .Build(); - private readonly AWSCredentials _credentials = new BasicAWSCredentials("LSIAQAAAAAAVNCBMPNSG", "any"); + private readonly AWSCredentials _credentials = new BasicAWSCredentials("test", "test"); private const string QueueName = "weather-forecasts"; From 0601ef8c0bb72181b1e990389a31003dfce6303b Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Sat, 25 Jan 2025 21:56:42 +0000 Subject: [PATCH 26/33] WIP --- LocalPost.sln | 41 +------------- .../AzureQueueConsumerApp.csproj | 18 ++++++ .../LocalPost.AzureQueueConsumer.csproj | 55 +++++++++++++++++++ src/LocalPost/Flow/HandlerStackEx.cs | 20 +++---- .../ConsumerTests.cs | 2 +- .../LocalPost.KafkaConsumer.Tests.csproj | 4 +- .../RedpandaContainer.cs | 10 ---- 7 files changed, 86 insertions(+), 64 deletions(-) create mode 100644 samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj create mode 100644 src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj diff --git a/LocalPost.sln b/LocalPost.sln index 325365c..e4eaede 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -4,38 +4,26 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost", "src\LocalPost\ EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BackgroundQueueApp", "samples\BackgroundQueueApp\BackgroundQueueApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SnsPublisher", "src\LocalPost.SnsPublisher\LocalPost.SnsPublisher.csproj", "{D256C568-2B42-4DCC-AB54-15B512A99C44}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Tests", "tests\LocalPost.Tests\LocalPost.Tests.csproj", "{0E69A423-5F70-4BA7-8015-0AB0BC4B6FD2}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SnsPublisher.Tests", "tests\LocalPost.SnsPublisher.Tests\LocalPost.SnsPublisher.Tests.csproj", "{0B8929F4-E220-45A9-A279-41F5D94A8C1B}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SqsConsumer", "src\LocalPost.SqsConsumer\LocalPost.SqsConsumer.csproj", "{30232703-C103-4F7A-9822-80F2F680A88D}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SqsConsumer.Tests", "tests\LocalPost.SqsConsumer.Tests\LocalPost.SqsConsumer.Tests.csproj", "{2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Azure.QueueConsumer", "src\LocalPost.Azure.QueueConsumer\LocalPost.Azure.QueueConsumer.csproj", "{3F9454C4-9C0D-4FB4-9476-F32224182C7B}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.RabbitMqConsumer", "src\LocalPost.RabbitMqConsumer\LocalPost.RabbitMqConsumer.csproj", "{3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.AzureQueueConsumer", "src\LocalPost.AzureQueueConsumer\LocalPost.AzureQueueConsumer.csproj", "{3F9454C4-9C0D-4FB4-9476-F32224182C7B}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer", "src\LocalPost.KafkaConsumer\LocalPost.KafkaConsumer.csproj", "{D9139C53-5B9F-49E7-80DF-41C995C37E2F}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Examples", "Examples", "{405721DC-F290-4191-B638-9907D5EB042B}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AzureQueueApp", "samples\AzureQueueApp\AzureQueueApp.csproj", "{7C21BB9A-9C68-4750-84AA-272F201878A1}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AzureQueueConsumerApp", "samples\AzureQueueConsumerApp\AzureQueueConsumerApp.csproj", "{7C21BB9A-9C68-4750-84AA-272F201878A1}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{C310487A-B976-4D3E-80AF-4ADBE1C63139}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SqsConsumerApp", "samples\SqsConsumerApp\SqsConsumerApp.csproj", "{2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Resilience", "src\LocalPost.Resilience\LocalPost.Resilience.csproj", "{EA69FF51-BEF7-415C-836A-BB5432206F7E}" -EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "RabbitMqConsumerApp", "samples\RabbitMqConsumerApp\RabbitMqConsumerApp.csproj", "{F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer.Tests", "tests\LocalPost.KafkaConsumer.Tests\LocalPost.KafkaConsumer.Tests.csproj", "{734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.RabbitMqConsumer.Tests", "tests\LocalPost.RabbitMqConsumer.Tests\LocalPost.RabbitMqConsumer.Tests.csproj", "{92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.NatsConsumer", "src\LocalPost.NatsConsumer\LocalPost.NatsConsumer.csproj", "{05A771C9-0987-484A-8A7F-B6B1180F55F9}" EndProject Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "BackgroundQueueApp.FSharp", "samples\BackgroundQueueApp.FSharp\BackgroundQueueApp.FSharp.fsproj", "{79CF7EFF-860D-464F-B59A-55E48D25D70C}" @@ -54,18 +42,10 @@ Global {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}.Debug|Any CPU.Build.0 = Debug|Any CPU {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}.Release|Any CPU.ActiveCfg = Release|Any CPU {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}.Release|Any CPU.Build.0 = Release|Any CPU - {D256C568-2B42-4DCC-AB54-15B512A99C44}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D256C568-2B42-4DCC-AB54-15B512A99C44}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D256C568-2B42-4DCC-AB54-15B512A99C44}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D256C568-2B42-4DCC-AB54-15B512A99C44}.Release|Any CPU.Build.0 = Release|Any CPU {0E69A423-5F70-4BA7-8015-0AB0BC4B6FD2}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {0E69A423-5F70-4BA7-8015-0AB0BC4B6FD2}.Debug|Any CPU.Build.0 = Debug|Any CPU {0E69A423-5F70-4BA7-8015-0AB0BC4B6FD2}.Release|Any CPU.ActiveCfg = Release|Any CPU {0E69A423-5F70-4BA7-8015-0AB0BC4B6FD2}.Release|Any CPU.Build.0 = Release|Any CPU - {0B8929F4-E220-45A9-A279-41F5D94A8C1B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {0B8929F4-E220-45A9-A279-41F5D94A8C1B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {0B8929F4-E220-45A9-A279-41F5D94A8C1B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {0B8929F4-E220-45A9-A279-41F5D94A8C1B}.Release|Any CPU.Build.0 = Release|Any CPU {30232703-C103-4F7A-9822-80F2F680A88D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {30232703-C103-4F7A-9822-80F2F680A88D}.Debug|Any CPU.Build.0 = Debug|Any CPU {30232703-C103-4F7A-9822-80F2F680A88D}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -78,10 +58,6 @@ Global {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Debug|Any CPU.Build.0 = Debug|Any CPU {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Release|Any CPU.ActiveCfg = Release|Any CPU {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Release|Any CPU.Build.0 = Release|Any CPU - {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Debug|Any CPU.Build.0 = Debug|Any CPU - {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Release|Any CPU.ActiveCfg = Release|Any CPU - {3387EE63-AFD4-4792-8FD3-C1C8D2C5900D}.Release|Any CPU.Build.0 = Release|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.Build.0 = Debug|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -98,22 +74,10 @@ Global {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Debug|Any CPU.Build.0 = Debug|Any CPU {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Release|Any CPU.ActiveCfg = Release|Any CPU {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}.Release|Any CPU.Build.0 = Release|Any CPU - {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Debug|Any CPU.Build.0 = Debug|Any CPU - {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Release|Any CPU.ActiveCfg = Release|Any CPU - {EA69FF51-BEF7-415C-836A-BB5432206F7E}.Release|Any CPU.Build.0 = Release|Any CPU - {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Debug|Any CPU.Build.0 = Debug|Any CPU - {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Release|Any CPU.ActiveCfg = Release|Any CPU - {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA}.Release|Any CPU.Build.0 = Release|Any CPU {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Debug|Any CPU.Build.0 = Debug|Any CPU {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Release|Any CPU.ActiveCfg = Release|Any CPU {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Release|Any CPU.Build.0 = Release|Any CPU - {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Debug|Any CPU.Build.0 = Debug|Any CPU - {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Release|Any CPU.ActiveCfg = Release|Any CPU - {92461B19-6794-4EB2-BFFE-DCD9CDFFD6D8}.Release|Any CPU.Build.0 = Release|Any CPU {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.Build.0 = Debug|Any CPU {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -128,7 +92,6 @@ Global {7C21BB9A-9C68-4750-84AA-272F201878A1} = {405721DC-F290-4191-B638-9907D5EB042B} {C310487A-B976-4D3E-80AF-4ADBE1C63139} = {405721DC-F290-4191-B638-9907D5EB042B} {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD} = {405721DC-F290-4191-B638-9907D5EB042B} - {F9BD092A-DC16-43A8-B605-46FDFAEFBEDA} = {405721DC-F290-4191-B638-9907D5EB042B} {79CF7EFF-860D-464F-B59A-55E48D25D70C} = {405721DC-F290-4191-B638-9907D5EB042B} EndGlobalSection EndGlobal diff --git a/samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj b/samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj new file mode 100644 index 0000000..153cfe2 --- /dev/null +++ b/samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj @@ -0,0 +1,18 @@ + + + + net8 + + + + + + + + + + + + + + diff --git a/src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj b/src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj new file mode 100644 index 0000000..3031621 --- /dev/null +++ b/src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj @@ -0,0 +1,55 @@ + + + + netstandard2.0 + true + + false + + LocalPost.AzureQueueConsumer + background;task;queue;azure;sqs + Local (in-process) background queue for sending to Amazon SNS. + Alexey Shokov + + README.md + MIT + https://github.com/alexeyshockov/LocalPost + git + true + + + + + + + + + + true + + + + true + true + true + true + snupkg + + + true + + + + + + + + + + + + + + + + diff --git a/src/LocalPost/Flow/HandlerStackEx.cs b/src/LocalPost/Flow/HandlerStackEx.cs index 2ad8125..9a50561 100644 --- a/src/LocalPost/Flow/HandlerStackEx.cs +++ b/src/LocalPost/Flow/HandlerStackEx.cs @@ -20,8 +20,8 @@ public static HandlerManagerFactory Buffer(this HandlerFactory hf, public static HandlerManagerFactory Batch(this HandlerFactory> hf, int size, TimeSpan window, - int capacity = 1, int consumers = 1, bool singleProducer = false) => - hf.AsHandlerManager().Batch(size, window, capacity, consumers, singleProducer); + int capacity = 1, bool singleProducer = false) => + hf.AsHandlerManager().Batch(size, window, capacity, singleProducer); } [PublicAPI] @@ -56,10 +56,10 @@ async Task Consume(CancellationToken execToken) public static HandlerManagerFactory Batch(this HandlerManagerFactory> hmf, int size, TimeSpan window, - int capacity = 1, int consumers = 1, bool singleProducer = false) => provider => + int capacity = 1, bool singleProducer = false) => provider => { var next = hmf(provider); - return BatchHandlerManager.Create(next, size, window, capacity, consumers, singleProducer); + return BatchHandlerManager.Create(next, size, window, capacity, singleProducer); }; } @@ -78,16 +78,15 @@ internal sealed class BatchHandlerManager(Channel channel, { public static BatchHandlerManager Create(IHandlerManager> next, int size, TimeSpan window, - int capacity = 1, int consumers = 1, bool singleProducer = false) + int capacity = 1, bool singleProducer = false) { var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) { FullMode = BoundedChannelFullMode.Wait, - SingleReader = consumers == 1, + SingleReader = true, SingleWriter = singleProducer, }); - // var handler = hf(provider); - var buffer = new ChannelRunner>(channel, Consume, next) { Consumers = consumers }; + var buffer = new ChannelRunner>(channel, Consume, next) { Consumers = 1 }; var hm = new BatchHandlerManager(channel, buffer); return hm; @@ -126,6 +125,7 @@ async Task Consume(CancellationToken execToken) // Otherwise, the contents will be copied into a new array. The internal buffer will then be set to a // zero length array. var batch = batchBuilder.DrainToImmutable(); + await next.Handle(batch, CancellationToken.None).ConfigureAwait(false); } }; @@ -140,7 +140,6 @@ async Task Consume(CancellationToken execToken) internal static class ChannelRunner { - // public static ChannelRunner Create(Channel channel, Handler> handler, public static ChannelRunner Create(Channel channel, IHandlerManager handler, int consumers = 1, bool processLeftovers = true) { @@ -156,7 +155,6 @@ async Task Consume(CancellationToken execToken) } internal sealed class ChannelRunner(Channel channel, - // Func consumer, Handler> handler) : IDisposable Func consumer, IHandlerManager handler) : IDisposable { public HealthCheckResult Ready => (_execTokenSource, _exec, _execException) switch @@ -184,7 +182,6 @@ public async ValueTask Start(CancellationToken ct) var execTokenSource = _execTokenSource = new CancellationTokenSource(); - // await handler(Event.Begin, ct).ConfigureAwait(false); await handler.Start(ct).ConfigureAwait(false); _exec = Run(execTokenSource.Token); @@ -199,7 +196,6 @@ private async Task Run(CancellationToken execToken) }; await exec.ConfigureAwait(false); - // await handler(Event.End, _completionToken).ConfigureAwait(false); await handler.Stop(_execException, _completionToken).ConfigureAwait(false); } diff --git a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs index 8383cc3..e192cc0 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/ConsumerTests.cs @@ -23,7 +23,7 @@ public async Task InitializeAsync() BootstrapServers = _container.GetBootstrapAddress() }).Build(); - // Redpanda: by default, topic is created automatically on the first message + // Redpanda creates a topic automatically if it doesn't exist await producer.ProduceAsync(Topic, new Message { Key = "London", diff --git a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj index ca9fa2d..b4b8894 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj +++ b/tests/LocalPost.KafkaConsumer.Tests/LocalPost.KafkaConsumer.Tests.csproj @@ -6,8 +6,8 @@ - - + + diff --git a/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs index 6f728d2..435067e 100644 --- a/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs +++ b/tests/LocalPost.KafkaConsumer.Tests/RedpandaContainer.cs @@ -76,18 +76,8 @@ protected override RedpandaBuilder Init() } } - - public sealed class RedpandaContainer(IContainerConfiguration configuration) : DockerContainer(configuration) { - public override async Task StartAsync(CancellationToken ct = default) - { - await base.StartAsync(ct); - - // Dirty fix, but otherwise the client just fails with strange errors - await Task.Delay(3_000, ct); - } - public string GetSchemaRegistryAddress() => new UriBuilder(Uri.UriSchemeHttp, Hostname, GetMappedPublicPort(RedpandaBuilder.SchemaRegistryPort)).ToString(); From 92b240b72383c367fcbbd9ca7b3f7b31dac98306 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Tue, 28 Jan 2025 14:56:53 +0000 Subject: [PATCH 27/33] WIP --- LocalPost.sln | 26 --- docker-compose.yml | 13 ++ justfile | 7 - .../AzureQueueConsumerApp.csproj | 18 -- samples/KafkaConsumerApp/Program.cs | 5 +- samples/SqsConsumerApp/Program.cs | 36 +++- .../Properties/launchSettings.json | 7 +- samples/SqsConsumerApp/README.md | 33 +++- samples/SqsConsumerApp/SqsConsumerApp.csproj | 3 + .../appsettings.Development.json | 6 +- .../LocalPost.AzureQueueConsumer.csproj | 55 ------ src/LocalPost.KafkaConsumer/Consumer.cs | 14 +- .../DependencyInjection/KafkaBuilder.cs | 21 +-- src/LocalPost.KafkaConsumer/HandlerStackEx.cs | 165 +++++++++++++----- src/LocalPost.KafkaConsumer/Tracing.cs | 33 +++- src/LocalPost.SqsConsumer/ConsumeContext.cs | 3 +- src/LocalPost.SqsConsumer/Consumer.cs | 15 +- .../DependencyInjection/SqsBuilder.cs | 21 +-- src/LocalPost.SqsConsumer/HandlerStackEx.cs | 90 ++++++++-- src/LocalPost.SqsConsumer/QueueClient.cs | 7 + src/LocalPost.SqsConsumer/README.md | 17 +- src/LocalPost.SqsConsumer/Tracing.cs | 32 +++- .../BackgroundQueue/BackgroundQueue.cs | 37 +++- .../BackgroundQueue/ConsumeContext.cs | 2 +- .../BackgroundQueuesBuilder.cs | 21 ++- .../BackgroundQueue/HandlerStackEx.cs | 8 +- src/LocalPost/BackgroundQueue/Tracing.cs | 5 +- src/LocalPost/Flow/Event.cs | 38 ---- src/LocalPost/Flow/HandlerStackEx.cs | 146 +++++++--------- src/LocalPost/Handler.cs | 33 +++- src/LocalPost/HandlerStack.cs | 33 +++- src/LocalPost/HandlerStackOps.cs | 77 ++------ src/LocalPost/Middlewares.cs | 125 +++++++------ src/LocalPost/Resilience/HandlerStackEx.cs | 13 +- .../ConsumerTests.cs | 43 +++++ 35 files changed, 680 insertions(+), 528 deletions(-) delete mode 100755 justfile delete mode 100644 samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj delete mode 100644 src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj delete mode 100644 src/LocalPost/Flow/Event.cs diff --git a/LocalPost.sln b/LocalPost.sln index e4eaede..4b447ab 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -10,24 +10,16 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SqsConsumer", "sr EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.SqsConsumer.Tests", "tests\LocalPost.SqsConsumer.Tests\LocalPost.SqsConsumer.Tests.csproj", "{2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.AzureQueueConsumer", "src\LocalPost.AzureQueueConsumer\LocalPost.AzureQueueConsumer.csproj", "{3F9454C4-9C0D-4FB4-9476-F32224182C7B}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer", "src\LocalPost.KafkaConsumer\LocalPost.KafkaConsumer.csproj", "{D9139C53-5B9F-49E7-80DF-41C995C37E2F}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Examples", "Examples", "{405721DC-F290-4191-B638-9907D5EB042B}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "AzureQueueConsumerApp", "samples\AzureQueueConsumerApp\AzureQueueConsumerApp.csproj", "{7C21BB9A-9C68-4750-84AA-272F201878A1}" -EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{C310487A-B976-4D3E-80AF-4ADBE1C63139}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SqsConsumerApp", "samples\SqsConsumerApp\SqsConsumerApp.csproj", "{2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer.Tests", "tests\LocalPost.KafkaConsumer.Tests\LocalPost.KafkaConsumer.Tests.csproj", "{734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.NatsConsumer", "src\LocalPost.NatsConsumer\LocalPost.NatsConsumer.csproj", "{05A771C9-0987-484A-8A7F-B6B1180F55F9}" -EndProject -Project("{F2A71F9B-5D33-465A-A702-920D77279786}") = "BackgroundQueueApp.FSharp", "samples\BackgroundQueueApp.FSharp\BackgroundQueueApp.FSharp.fsproj", "{79CF7EFF-860D-464F-B59A-55E48D25D70C}" -EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -54,18 +46,10 @@ Global {2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}.Debug|Any CPU.Build.0 = Debug|Any CPU {2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}.Release|Any CPU.ActiveCfg = Release|Any CPU {2F61DCD7-E4CB-4ECC-B24E-A663D12D9C03}.Release|Any CPU.Build.0 = Release|Any CPU - {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Debug|Any CPU.Build.0 = Debug|Any CPU - {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Release|Any CPU.ActiveCfg = Release|Any CPU - {3F9454C4-9C0D-4FB4-9476-F32224182C7B}.Release|Any CPU.Build.0 = Release|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Debug|Any CPU.Build.0 = Debug|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.ActiveCfg = Release|Any CPU {D9139C53-5B9F-49E7-80DF-41C995C37E2F}.Release|Any CPU.Build.0 = Release|Any CPU - {7C21BB9A-9C68-4750-84AA-272F201878A1}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {7C21BB9A-9C68-4750-84AA-272F201878A1}.Debug|Any CPU.Build.0 = Debug|Any CPU - {7C21BB9A-9C68-4750-84AA-272F201878A1}.Release|Any CPU.ActiveCfg = Release|Any CPU - {7C21BB9A-9C68-4750-84AA-272F201878A1}.Release|Any CPU.Build.0 = Release|Any CPU {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Debug|Any CPU.ActiveCfg = Debug|Any CPU {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Debug|Any CPU.Build.0 = Debug|Any CPU {C310487A-B976-4D3E-80AF-4ADBE1C63139}.Release|Any CPU.ActiveCfg = Release|Any CPU @@ -78,20 +62,10 @@ Global {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Debug|Any CPU.Build.0 = Debug|Any CPU {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Release|Any CPU.ActiveCfg = Release|Any CPU {734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}.Release|Any CPU.Build.0 = Release|Any CPU - {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Debug|Any CPU.Build.0 = Debug|Any CPU - {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.ActiveCfg = Release|Any CPU - {05A771C9-0987-484A-8A7F-B6B1180F55F9}.Release|Any CPU.Build.0 = Release|Any CPU - {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Debug|Any CPU.Build.0 = Debug|Any CPU - {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.ActiveCfg = Release|Any CPU - {79CF7EFF-860D-464F-B59A-55E48D25D70C}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(NestedProjects) = preSolution {46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E} = {405721DC-F290-4191-B638-9907D5EB042B} - {7C21BB9A-9C68-4750-84AA-272F201878A1} = {405721DC-F290-4191-B638-9907D5EB042B} {C310487A-B976-4D3E-80AF-4ADBE1C63139} = {405721DC-F290-4191-B638-9907D5EB042B} {2778AEBD-0345-4F79-9E93-73AFAB6C7BCD} = {405721DC-F290-4191-B638-9907D5EB042B} - {79CF7EFF-860D-464F-B59A-55E48D25D70C} = {405721DC-F290-4191-B638-9907D5EB042B} EndGlobalSection EndGlobal diff --git a/docker-compose.yml b/docker-compose.yml index 899a459..2d3f7a3 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -8,6 +8,19 @@ volumes: localstack: driver: local services: + # https://learn.microsoft.com/en-us/dotnet/aspire/fundamentals/dashboard/standalone + # https://hub.docker.com/r/microsoft/dotnet-aspire-dashboard + aspire: + image: mcr.microsoft.com/dotnet/aspire-dashboard:9.0 + ports: + - "18888:18888" # HTTP + - "18889:18889" # OTEL collector GRPC + - "18890:18890" # OTEL collector HTTP + environment: + - DOTNET_DASHBOARD_UNSECURED_ALLOW_ANONYMOUS=true +# - Dashboard__Otlp__AuthMode=Unsecured + # This setting is a shortcut to configuring Dashboard:Frontend:AuthMode and Dashboard:Otlp:AuthMode to Unsecured + - ASPIRE_ALLOW_UNSECURED_TRANSPORT=true localstack: # https://docs.localstack.cloud/getting-started/installation/#docker-compose image: localstack/localstack:4 diff --git a/justfile b/justfile deleted file mode 100755 index 993d5c0..0000000 --- a/justfile +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env just --justfile - -update-deps: - dotnet restore --force-evaluate - -install-deps: - dotnet restore diff --git a/samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj b/samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj deleted file mode 100644 index 153cfe2..0000000 --- a/samples/AzureQueueConsumerApp/AzureQueueConsumerApp.csproj +++ /dev/null @@ -1,18 +0,0 @@ - - - - net8 - - - - - - - - - - - - - - diff --git a/samples/KafkaConsumerApp/Program.cs b/samples/KafkaConsumerApp/Program.cs index 1ffd43d..2cd5b02 100644 --- a/samples/KafkaConsumerApp/Program.cs +++ b/samples/KafkaConsumerApp/Program.cs @@ -1,3 +1,4 @@ +using System.Text.Json; using Confluent.Kafka; using JetBrains.Annotations; using LocalPost; @@ -21,9 +22,9 @@ .ValidateDataAnnotations(); kafka.AddConsumer("example-consumer-group", HandlerStack.From() - .UseKafkaPayload() .Scoped() - .DeserializeJson() + .UseKafkaPayload() + .Deserialize(context => JsonSerializer.Deserialize(context.Payload)!) .Trace() // .Acknowledge() .LogExceptions() diff --git a/samples/SqsConsumerApp/Program.cs b/samples/SqsConsumerApp/Program.cs index caeae2a..4aae3c1 100644 --- a/samples/SqsConsumerApp/Program.cs +++ b/samples/SqsConsumerApp/Program.cs @@ -3,6 +3,9 @@ using LocalPost; using LocalPost.SqsConsumer; using LocalPost.SqsConsumer.DependencyInjection; +using OpenTelemetry; +using OpenTelemetry.Metrics; +using OpenTelemetry.Trace; using Serilog; using Serilog.Sinks.FingersCrossed; @@ -19,15 +22,37 @@ options.ServicesStopConcurrently = true; }); +#region OpenTelemetry + +// See also: https://learn.microsoft.com/en-us/dotnet/core/diagnostics/observability-otlp-example + +// To use full potential of Serilog, it's better to use Serilog.Sinks.OpenTelemetry, +// see https://github.com/Blind-Striker/dotnet-otel-aspire-localstack-demo as an example +// builder.Logging.AddOpenTelemetry(logging => +// { +// logging.IncludeFormattedMessage = true; +// logging.IncludeScopes = true; +// }); + +builder.Services.AddOpenTelemetry() + .WithMetrics(metrics => metrics + .AddAWSInstrumentation()) + .WithTracing(tracing => tracing + .AddSource("LocalPost.*") + .AddAWSInstrumentation()) + .UseOtlpExporter(); + +#endregion + builder.Services .AddScoped() .AddSqsConsumers(sqs => { sqs.Defaults.Configure(options => options.MaxNumberOfMessages = 1); - sqs.AddConsumer("weather-forecasts", + sqs.AddConsumer("weather-forecasts", // Also acts as a queue name HandlerStack.From() - .UseSqsPayload() .Scoped() + .UseSqsPayload() .DeserializeJson() .Trace() .Acknowledge() // Do not include DeleteMessage call in the OpenTelemetry root span (transaction) @@ -36,7 +61,6 @@ ); }); -// TODO Health + Supervisor await builder.Build().RunAsync(); @@ -56,10 +80,10 @@ public async ValueTask InvokeAsync(WeatherForecast payload, CancellationToken ct } } -public static class FingersCrossedLogging +public static class HandlerStackEx { - public static HandlerFactory LogFingersCrossed(this HandlerFactory hf) => - hf.Touch(next => async (context, ct) => + public static HandlerManagerFactory LogFingersCrossed(this HandlerManagerFactory hmf) => + hmf.TouchHandler(next => async (context, ct) => { using var logBuffer = LogBuffer.BeginScope(); try diff --git a/samples/SqsConsumerApp/Properties/launchSettings.json b/samples/SqsConsumerApp/Properties/launchSettings.json index 91ef085..4328853 100644 --- a/samples/SqsConsumerApp/Properties/launchSettings.json +++ b/samples/SqsConsumerApp/Properties/launchSettings.json @@ -4,7 +4,12 @@ "commandName": "Project", "dotnetRunMessages": true, "environmentVariables": { - "DOTNET_ENVIRONMENT": "Development" + "DOTNET_ENVIRONMENT": "Development", + "AWS_ACCESS_KEY_ID": "test", + "AWS_SECRET_ACCESS_KEY": "test", + "OTEL_SERVICE_NAME": "SampleSqsConsumer", + "OTEL_EXPORTER_OTLP_PROTOCOL": "grpc", + "OTEL_EXPORTER_OTLP_ENDPOINT": "http://127.0.0.1:18889" } } } diff --git a/samples/SqsConsumerApp/README.md b/samples/SqsConsumerApp/README.md index fdeb0ee..6261c32 100644 --- a/samples/SqsConsumerApp/README.md +++ b/samples/SqsConsumerApp/README.md @@ -1,9 +1,32 @@ -# SQS Consumer +# SQS Consumer Sample App -## Configuration +## Setup -### Queue name +### Local infrastructure -TODO +`docker compose up -d` to spin up the localstack & Aspire containers -## +### SQS queue + +```shell +aws --endpoint-url=http://localhost:4566 --region=us-east-1 --no-sign-request \ + sqs create-queue --queue-name "weather-forecasts" +``` + +To get the queue URL: + +```shell +aws --endpoint-url=http://localhost:4566 --region=us-east-1 --no-sign-request \ + sqs get-queue-url --queue-name "weather-forecasts" --query "QueueUrl" +``` + +## Run + +To see that the consumer is working, you can send a message to the queue using the AWS CLI: + +```shell +aws --endpoint-url=http://localhost:4566 --region=us-east-1 --no-sign-request \ + sqs send-message \ + --queue-url "http://sqs.us-east-1.localhost.localstack.cloud:4566/000000000000/weather-forecasts" \ + --message-body '{"TemperatureC": 25, "TemperatureF": 77, "Summary": "not hot, not cold, perfect"}' +``` diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/samples/SqsConsumerApp/SqsConsumerApp.csproj index b09212a..6105d44 100644 --- a/samples/SqsConsumerApp/SqsConsumerApp.csproj +++ b/samples/SqsConsumerApp/SqsConsumerApp.csproj @@ -14,6 +14,9 @@ + + + diff --git a/samples/SqsConsumerApp/appsettings.Development.json b/samples/SqsConsumerApp/appsettings.Development.json index 75a3ba5..ef8e3e4 100644 --- a/samples/SqsConsumerApp/appsettings.Development.json +++ b/samples/SqsConsumerApp/appsettings.Development.json @@ -1,9 +1,5 @@ { "AWS": { - "ServiceURL": "http://localhost:8000" - }, - "WeatherSqsConsumer": { - "MaxConcurrency": 100, - "Attributes": null + "ServiceURL": "http://127.0.0.1:4566" } } diff --git a/src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj b/src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj deleted file mode 100644 index 3031621..0000000 --- a/src/LocalPost.AzureQueueConsumer/LocalPost.AzureQueueConsumer.csproj +++ /dev/null @@ -1,55 +0,0 @@ - - - - netstandard2.0 - true - - false - - LocalPost.AzureQueueConsumer - background;task;queue;azure;sqs - Local (in-process) background queue for sending to Amazon SNS. - Alexey Shokov - - README.md - MIT - https://github.com/alexeyshockov/LocalPost - git - true - - - - - - - - - - true - - - - true - true - true - true - snupkg - - - true - - - - - - - - - - - - - - - - diff --git a/src/LocalPost.KafkaConsumer/Consumer.cs b/src/LocalPost.KafkaConsumer/Consumer.cs index 6b98a69..a8d4737 100644 --- a/src/LocalPost.KafkaConsumer/Consumer.cs +++ b/src/LocalPost.KafkaConsumer/Consumer.cs @@ -7,7 +7,7 @@ namespace LocalPost.KafkaConsumer; internal sealed class Consumer(string name, ILogger logger, - ClientFactory clientFactory, IHandlerManager> handler) + ClientFactory clientFactory, IHandlerManager> hm) : IHostedService, IHealthAwareService, IDisposable { private Clients _clients = new([]); @@ -30,7 +30,7 @@ internal sealed class Consumer(string name, ILogger logger, public IHealthCheck ReadinessCheck => HealthChecks.From(() => Ready); - private async Task RunConsumerAsync(Client client, CancellationToken execToken) + private async Task RunConsumerAsync(Client client, Handler> handler, CancellationToken execToken) { // (Optionally) wait for app start @@ -39,8 +39,8 @@ private async Task RunConsumerAsync(Client client, CancellationToken execToken) while (!execToken.IsCancellationRequested) { var result = client.Consume(execToken); - await handler.Handle(new ConsumeContext(client, result, result.Message.Value), CancellationToken.None) - .ConfigureAwait(false); + var context = new ConsumeContext(client, result, result.Message.Value); + await handler(context, CancellationToken.None).ConfigureAwait(false); } } catch (OperationCanceledException e) when (e.CancellationToken == execToken) @@ -76,7 +76,7 @@ public async Task StartAsync(CancellationToken ct) logger.LogInformation("Kafka consumer started"); logger.LogDebug("Invoking the event handler..."); - await handler.Start(ct).ConfigureAwait(false); + var handler = await hm.Start(ct).ConfigureAwait(false); logger.LogDebug("Event handler started"); _exec = ObserveExecution(); @@ -87,11 +87,11 @@ async Task ObserveExecution() try { var executions = clients.Select(client => - Task.Run(() => RunConsumerAsync(client, execTokenSource.Token), ct) + Task.Run(() => RunConsumerAsync(client, handler, execTokenSource.Token), ct) ).ToArray(); await (executions.Length == 1 ? executions[0] : Task.WhenAll(executions)).ConfigureAwait(false); - await handler.Stop(_execException, _completionToken).ConfigureAwait(false); + await hm.Stop(_execException, _completionToken).ConfigureAwait(false); } finally { diff --git a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs index 09b34d6..cd5e200 100644 --- a/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs +++ b/src/LocalPost.KafkaConsumer/DependencyInjection/KafkaBuilder.cs @@ -14,27 +14,18 @@ public sealed class KafkaBuilder(IServiceCollection services) /// /// Add a Kafka consumer with a custom message handler. /// - /// Message handler factory. + /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(HandlerFactory> hf) => - AddConsumer(Options.DefaultName, hf); + public OptionsBuilder AddConsumer(HandlerManagerFactory> hmf) => + AddConsumer(Options.DefaultName, hmf); /// /// Add a Kafka consumer with a custom message handler. /// /// Consumer name (should be unique in the application). Also, the default group ID. - /// Message handler factory. + /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) => - AddConsumer(name, hf.AsHandlerManager()); - - /// - /// Add a Kafka consumer with a custom message handler. - /// - /// Consumer name (should be unique in the application). Also, the default group ID. - /// Message handler factory. - /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerManagerFactory> hf) + public OptionsBuilder AddConsumer(string name, HandlerManagerFactory> hmf) { var added = services.TryAddKeyedSingleton(name, (provider, _) => { @@ -46,7 +37,7 @@ public OptionsBuilder AddConsumer(string name, HandlerManagerFa return new Consumer(name, provider.GetLoggerFor(), clientFactory, - hf(provider) + hmf(provider) ); }); diff --git a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs index 5b524d3..8d77325 100644 --- a/src/LocalPost.KafkaConsumer/HandlerStackEx.cs +++ b/src/LocalPost.KafkaConsumer/HandlerStackEx.cs @@ -1,79 +1,150 @@ -using System.Text.Json; using Confluent.Kafka; namespace LocalPost.KafkaConsumer; +using MessageHmf = HandlerManagerFactory>; +using MessagesHmf = HandlerManagerFactory>>; + [PublicAPI] public static class HandlerStackEx { - public static HandlerFactory> UseKafkaPayload(this HandlerFactory hf) => - hf.Map, T>(next => async (context, ct) => + public static HandlerManagerFactory> UseKafkaPayload(this HandlerManagerFactory hmf) => + hmf.MapHandler, T>(next => async (context, ct) => await next(context.Payload, ct).ConfigureAwait(false)); - public static HandlerFactory> Trace(this HandlerFactory> hf) => - hf.Map, ConsumeContext>(next => - async (context, ct) => + public static HandlerManagerFactory>> UseKafkaPayload( + this HandlerManagerFactory> hmf) => + hmf.MapHandler>, IReadOnlyCollection>(next => async (batch, ct) => + await next(batch.Select(context => context.Payload).ToArray(), ct).ConfigureAwait(false)); + + public static HandlerManagerFactory> Trace( + this HandlerManagerFactory> hmf) => + hmf.TouchHandler(next => async (context, ct) => + { + using var activity = Tracing.StartProcessing(context); + try { - using var activity = Tracing.StartProcessing(context); - try - { - await next(context, ct).ConfigureAwait(false); - activity?.Success(); - } - catch (Exception e) - { - activity?.Error(e); - throw; - } - }); + await next(context, ct).ConfigureAwait(false); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); + + public static HandlerManagerFactory>> Trace( + this HandlerManagerFactory>> hmf) => + hmf.TouchHandler(next => async (batch, ct) => + { + using var activity = Tracing.StartProcessing(batch); + try + { + await next(batch, ct).ConfigureAwait(false); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); /// /// Manually acknowledge every message (store offset). /// /// Works only when EnableAutoOffsetStore is false! /// - /// Message handler factory. + /// Message handler factory. /// Message type. /// Wrapped handler factory. - public static HandlerFactory> Acknowledge(this HandlerFactory> hf) => - hf.Map, ConsumeContext>(next => - async (context, ct) => - { - await next(context, ct).ConfigureAwait(false); - context.Acknowledge(); - }); + public static HandlerManagerFactory> Acknowledge( + this HandlerManagerFactory> hmf) => + hmf.TouchHandler(next => async (context, ct) => + { + await next(context, ct).ConfigureAwait(false); + context.StoreOffset(); + }); - #region Deserialize() - - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, Func, T> deserialize) => - hf.Map, ConsumeContext>(next => async (context, ct) => - await next(context.Transform(deserialize), ct).ConfigureAwait(false)); - - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, Func, Task> deserialize) => - hf.Map, ConsumeContext>(next => async (context, ct) => - await next(await context.Transform(deserialize).ConfigureAwait(false), ct).ConfigureAwait(false)); + /// + /// Manually acknowledge every message (store offset). + /// + /// Works only when EnableAutoOffsetStore is false! + /// + /// Message handler factory. + /// Message type. + /// Wrapped handler factory. + public static HandlerManagerFactory>> Acknowledge( + this HandlerManagerFactory>> hmf) => + hmf.TouchHandler(next => async (batch, ct) => + { + await next(batch, ct).ConfigureAwait(false); + foreach (var context in batch) + context.StoreOffset(); + }); private static Func, Task> AsyncDeserializer(IAsyncDeserializer deserializer) => context => deserializer.DeserializeAsync(context.Payload, false, new SerializationContext( MessageComponentType.Value, context.Topic, context.Message.Headers)); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, IAsyncDeserializer deserializer) => - hf.Deserialize(AsyncDeserializer(deserializer)); - private static Func, T> Deserializer(IDeserializer deserializer) => context => deserializer.Deserialize(context.Payload, false, new SerializationContext( MessageComponentType.Value, context.Topic, context.Message.Headers)); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, IDeserializer deserializer) => - hf.Deserialize(Deserializer(deserializer)); + #region Deserialize() + + public static MessageHmf Deserialize(this HandlerManagerFactory> hmf, + Func, T> deserialize) => + hmf.MapHandler, ConsumeContext>(next => async (context, ct) => + await next(context.Transform(deserialize), ct).ConfigureAwait(false)); + + public static MessageHmf Deserialize(this HandlerManagerFactory> hmf, + Func, Task> deserialize) => + hmf.MapHandler, ConsumeContext>(next => async (context, ct) => + await next(await context.Transform(deserialize).ConfigureAwait(false), ct).ConfigureAwait(false)); + + public static MessageHmf Deserialize(this HandlerManagerFactory> hmf, + IAsyncDeserializer deserializer) => hmf.Deserialize(AsyncDeserializer(deserializer)); + + public static MessageHmf Deserialize(this HandlerManagerFactory> hmf, + IDeserializer deserializer) => hmf.Deserialize(Deserializer(deserializer)); + + + + public static MessagesHmf Deserialize( + this HandlerManagerFactory>> hmf, + Func, T> deserialize) => + hmf.MapHandler>, IReadOnlyCollection>>(next => + async (batch, ct) => + { + var modBatch = batch.Select(context => context.Transform(deserialize)).ToArray(); + await next(modBatch, ct).ConfigureAwait(false); + }); + + public static MessagesHmf Deserialize( + this HandlerManagerFactory>> hmf, + Func, Task> deserialize) => + hmf.MapHandler>, IReadOnlyCollection>>(next => + async (batch, ct) => + { + var modifications = batch.Select(context => context.Transform(deserialize)); + // Task.WhenAll() preserves the order + var modBatch = await Task.WhenAll(modifications).ConfigureAwait(false); + await next(modBatch, ct).ConfigureAwait(false); + }); + + public static MessagesHmf Deserialize( + this HandlerManagerFactory>> hmf, + IAsyncDeserializer deserializer) => hmf.Deserialize(AsyncDeserializer(deserializer)); + + public static MessagesHmf Deserialize( + this HandlerManagerFactory>> hmf, + IDeserializer deserializer) => hmf.Deserialize(Deserializer(deserializer)); #endregion - public static HandlerFactory> DeserializeJson( - this HandlerFactory> hf, JsonSerializerOptions? options = null) => - hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); + // public static HandlerFactory> DeserializeJson( + // this HandlerFactory> hf, JsonSerializerOptions? options = null) => + // hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.KafkaConsumer/Tracing.cs b/src/LocalPost.KafkaConsumer/Tracing.cs index 8f2847a..17e6c49 100644 --- a/src/LocalPost.KafkaConsumer/Tracing.cs +++ b/src/LocalPost.KafkaConsumer/Tracing.cs @@ -68,8 +68,6 @@ public static void AcceptDistributedTracingFrom(this Activity acti activity?.SetTag("messaging.message.body.size", context.Message.Value.Length); - // TODO messaging.operation.type - // Skip, as we always ignore the key on consumption // activity.SetTag("messaging.kafka.message.key", context.Message.Key); @@ -78,6 +76,15 @@ public static void AcceptDistributedTracingFrom(this Activity acti return activity; } + public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> batch) + { + activity?.SetTag("messaging.batch.message_count", batch.Count); + if (batch.Count > 0) + activity?.SetTag("messaging.destination.name", batch.First().Topic); + + return activity; + } + // public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> batch) => // activity?.SetTag("messaging.batch.message_count", batch.Count); } @@ -94,10 +101,25 @@ internal static class Tracing static Tracing() { - // See https://stackoverflow.com/a/909583/322079 var assembly = Assembly.GetExecutingAssembly(); - var version = assembly.GetName().Version; - Source = new ActivitySource(assembly.FullName, version.ToString()); + var version = assembly.GetName().Version?.ToString() ?? "0.0.0"; + Source = new ActivitySource("LocalPost.KafkaConsumer", version); + } + + public static Activity? StartProcessing(IReadOnlyCollection> batch) + { + Debug.Assert(batch.Count > 0); + var activity = Source.CreateActivity($"{batch.First().Topic} process", ActivityKind.Consumer); + if (activity is { IsAllDataRequested: true }) + { + activity.SetTag("messaging.operation.type", "process"); + activity.SetDefaultTags(batch.First().Client); + activity.SetTagsFor(batch); + } + + activity?.Start(); + + return activity; } public static Activity? StartProcessing(ConsumeContext context) @@ -105,6 +127,7 @@ static Tracing() var activity = Source.CreateActivity($"{context.Topic} process", ActivityKind.Consumer); if (activity is { IsAllDataRequested: true }) { + activity.SetTag("messaging.operation.type", "process"); activity.SetDefaultTags(context.Client); activity.SetTagsFor(context); activity.AcceptDistributedTracingFrom(context.Message); diff --git a/src/LocalPost.SqsConsumer/ConsumeContext.cs b/src/LocalPost.SqsConsumer/ConsumeContext.cs index a2b082b..5dc8e58 100644 --- a/src/LocalPost.SqsConsumer/ConsumeContext.cs +++ b/src/LocalPost.SqsConsumer/ConsumeContext.cs @@ -18,7 +18,6 @@ internal ConsumeContext(QueueClient client, Message message, T payload) Message = message; } - // TODO Headers instead of the message public void Deconstruct(out T payload, out Message message) { payload = Payload; @@ -44,7 +43,7 @@ public ConsumeContext Transform(TOut payload) => public ConsumeContext Transform(Func, TOut> transform) => Transform(transform(this)); public async Task> Transform(Func, Task> transform) => - Transform(await transform(this)); + Transform(await transform(this).ConfigureAwait(false)); public static implicit operator T(ConsumeContext context) => context.Payload; diff --git a/src/LocalPost.SqsConsumer/Consumer.cs b/src/LocalPost.SqsConsumer/Consumer.cs index 487862e..7ee6752 100644 --- a/src/LocalPost.SqsConsumer/Consumer.cs +++ b/src/LocalPost.SqsConsumer/Consumer.cs @@ -8,7 +8,7 @@ namespace LocalPost.SqsConsumer; internal sealed class Consumer(string name, ILogger logger, IAmazonSQS sqs, - ConsumerOptions settings, IHandlerManager> handler) + ConsumerOptions settings, IHandlerManager> hm) : IHostedService, IHealthAwareService, IDisposable { private CancellationTokenSource? _execTokenSource; @@ -29,7 +29,8 @@ internal sealed class Consumer(string name, ILogger logger, IAmazonSQS public IHealthCheck ReadinessCheck => HealthChecks.From(() => Ready); - private async Task RunConsumerAsync(QueueClient client, CancellationToken execToken) + private async Task RunConsumerAsync( + QueueClient client, Handler> handler, CancellationToken execToken) { // (Optionally) wait for app start @@ -40,7 +41,7 @@ private async Task RunConsumerAsync(QueueClient client, CancellationToken execTo var messages = await client.PullMessages(execToken).ConfigureAwait(false); await Task.WhenAll(messages .Select(message => new ConsumeContext(client, message, message.Body)) - .Select(context => handler.Handle(context, CancellationToken.None).AsTask())) + .Select(context => handler(context, CancellationToken.None).AsTask())) .ConfigureAwait(false); } } @@ -74,7 +75,7 @@ public async Task StartAsync(CancellationToken ct) var client = new QueueClient(logger, sqs, settings); await client.Connect(ct).ConfigureAwait(false); - await handler.Start(ct).ConfigureAwait(false); + var handler = await hm.Start(ct).ConfigureAwait(false); _exec = ObserveExecution(); return; @@ -85,14 +86,14 @@ async Task ObserveExecution() { var execution = settings.Consumers switch { - 1 => RunConsumerAsync(client, execTokenSource.Token), + 1 => RunConsumerAsync(client, handler, execTokenSource.Token), _ => Task.WhenAll(Enumerable .Range(0, settings.Consumers) - .Select(_ => RunConsumerAsync(client, execTokenSource.Token))) + .Select(_ => RunConsumerAsync(client, handler, execTokenSource.Token))) }; await execution.ConfigureAwait(false); - await handler.Stop(_execException, _completionToken).ConfigureAwait(false); + await hm.Stop(_execException, _completionToken).ConfigureAwait(false); } finally { diff --git a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs index f2d8a05..6668476 100644 --- a/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs +++ b/src/LocalPost.SqsConsumer/DependencyInjection/SqsBuilder.cs @@ -14,33 +14,24 @@ public sealed class SqsBuilder(IServiceCollection services) /// /// Add an SQS consumer with a custom message handler. /// - /// Message handler factory. + /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(HandlerFactory> hf) => - AddConsumer(Options.DefaultName, hf); + public OptionsBuilder AddConsumer(HandlerManagerFactory> hmf) => + AddConsumer(Options.DefaultName, hmf); /// /// Add an SQS consumer with a custom message handler. /// /// Consumer name (should be unique in the application). Also, the default queue name. - /// Message handler factory. + /// Message handler factory. /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerFactory> hf) => - AddConsumer(name, hf.AsHandlerManager()); - - /// - /// Add an SQS consumer with a custom message handler. - /// - /// Consumer name (should be unique in the application). Also, the default queue name. - /// Message handler factory. - /// Consumer options builder. - public OptionsBuilder AddConsumer(string name, HandlerManagerFactory> hf) + public OptionsBuilder AddConsumer(string name, HandlerManagerFactory> hmf) { var added = services.TryAddKeyedSingleton(name, (provider, _) => new Consumer(name, provider.GetLoggerFor(), provider.GetRequiredService(), provider.GetOptions(name), - hf(provider) + hmf(provider) )); if (!added) diff --git a/src/LocalPost.SqsConsumer/HandlerStackEx.cs b/src/LocalPost.SqsConsumer/HandlerStackEx.cs index 36c896b..2a83086 100644 --- a/src/LocalPost.SqsConsumer/HandlerStackEx.cs +++ b/src/LocalPost.SqsConsumer/HandlerStackEx.cs @@ -2,20 +2,23 @@ namespace LocalPost.SqsConsumer; +using MessageHmf = HandlerManagerFactory>; +using MessagesHmf = HandlerManagerFactory>>; + [PublicAPI] public static class HandlerStackEx { - public static HandlerFactory> UseSqsPayload(this HandlerFactory hf) => - hf.Map, T>(next => async (context, ct) => + public static HandlerManagerFactory> UseSqsPayload(this HandlerManagerFactory hmf) => + hmf.MapHandler, T>(next => async (context, ct) => await next(context.Payload, ct).ConfigureAwait(false)); - public static HandlerFactory>> UseSqsPayload( - this HandlerFactory> hf) => - hf.Map>, IEnumerable>(next => async (batch, ct) => - await next(batch.Select(context => context.Payload), ct).ConfigureAwait(false)); + public static HandlerManagerFactory>> UseSqsPayload( + this HandlerManagerFactory> hmf) => + hmf.MapHandler>, IReadOnlyCollection>(next => async (batch, ct) => + await next(batch.Select(context => context.Payload).ToArray(), ct).ConfigureAwait(false)); - public static HandlerFactory> Trace(this HandlerFactory> hf) => - hf.Touch(next => async (context, ct) => + public static HandlerManagerFactory> Trace(this HandlerManagerFactory> hmf) => + hmf.TouchHandler(next => async (context, ct) => { using var activity = Tracing.StartProcessing(context); try @@ -30,19 +33,72 @@ public static HandlerFactory> Trace(this HandlerFactory> Acknowledge(this HandlerFactory> hf) => - hf.Touch(next => async (context, ct) => + public static HandlerManagerFactory>> Trace( + this HandlerManagerFactory>> hmf) => + hmf.TouchHandler(next => async (batch, ct) => + { + using var activity = Tracing.StartProcessing(batch); + try + { + await next(batch, ct).ConfigureAwait(false); + activity?.Success(); + } + catch (Exception e) + { + activity?.Error(e); + throw; + } + }); + + public static HandlerManagerFactory> Acknowledge( + this HandlerManagerFactory> hmf) => hmf.TouchHandler(next => + async (context, ct) => { await next(context, ct).ConfigureAwait(false); - await context.Client.DeleteMessage(context, ct).ConfigureAwait(false); + // await context.Client.DeleteMessage(context, ct).ConfigureAwait(false); + await context.DeleteMessage(ct).ConfigureAwait(false); }); - public static HandlerFactory> Deserialize( - this HandlerFactory> hf, Func, T> deserialize) => - hf.Map, ConsumeContext>(next => + public static HandlerManagerFactory>> Acknowledge( + this HandlerManagerFactory>> hmf) => hmf.TouchHandler(next => + async (batch, ct) => + { + await next(batch, ct).ConfigureAwait(false); + if (batch.Count > 0) + await batch.First().Client.DeleteMessages(batch, ct).ConfigureAwait(false); + }); + + public static MessageHmf Deserialize( + this HandlerManagerFactory> hmf, + Func, T>> df) => provider => + { + var handler = hmf(provider); + var deserialize = df(provider); + return handler.Map, ConsumeContext>(next => async (context, ct) => await next(context.Transform(deserialize), ct).ConfigureAwait(false)); + }; + + public static MessagesHmf Deserialize( + this HandlerManagerFactory>> hmf, + Func, T>> df) => provider => + { + var handler = hmf(provider); + var deserialize = df(provider); + return handler.Map>, IReadOnlyCollection>>(next => + async (batch, ct) => + { + var modBatch = batch.Select(context => context.Transform(deserialize)).ToArray(); + await next(modBatch, ct).ConfigureAwait(false); + }); + }; + + public static MessageHmf DeserializeJson( + this HandlerManagerFactory> hmf, + JsonSerializerOptions? options = null) => + hmf.Deserialize(_ => context => JsonSerializer.Deserialize(context.Payload, options)!); - public static HandlerFactory> DeserializeJson( - this HandlerFactory> hf, JsonSerializerOptions? options = null) => - hf.Deserialize(context => JsonSerializer.Deserialize(context.Payload, options)!); + public static MessagesHmf DeserializeJson( + this HandlerManagerFactory>> hmf, + JsonSerializerOptions? options = null) => + hmf.Deserialize(_ => context => JsonSerializer.Deserialize(context.Payload, options)!); } diff --git a/src/LocalPost.SqsConsumer/QueueClient.cs b/src/LocalPost.SqsConsumer/QueueClient.cs index df4b41e..220b3ce 100644 --- a/src/LocalPost.SqsConsumer/QueueClient.cs +++ b/src/LocalPost.SqsConsumer/QueueClient.cs @@ -107,4 +107,11 @@ public async Task DeleteMessage(ConsumeContext context, CancellationToken // TODO Log failures?.. } + + public async Task DeleteMessages(IEnumerable> batch, CancellationToken ct = default) + { + // TODO DeleteMessageBatch + foreach (var context in batch) + await sqs.DeleteMessageAsync(QueueUrl, context.ReceiptHandle, ct).ConfigureAwait(false); + } } diff --git a/src/LocalPost.SqsConsumer/README.md b/src/LocalPost.SqsConsumer/README.md index 3924a11..7b1b69c 100644 --- a/src/LocalPost.SqsConsumer/README.md +++ b/src/LocalPost.SqsConsumer/README.md @@ -2,8 +2,15 @@ ## IAM Permissions -To operate on a queue, below [permissions](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-api-permissions-reference.html) are required: -- `sqs:GetQueueUrl` -- `sqs:GetQueueAttributes` -- `sqs:ReceiveMessage` -- `sqs:ChangeMessageVisibility` +Only `sqs:ReceiveMessage` is required to run a queue consumer. To use additional features also require: +- `sqs:GetQueueUrl` (to use queue names instead of the full URLs) +- `sqs:GetQueueAttributes` (to fetch queue attributes on startup) + +## Batching + +The first thing to note when using batch processing with SQS: make sure that the queue +[visibility timeout](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/sqs-visibility-timeout.html) +is greater than the batch window. Otherwise, the messages will be re-queued before the batch is processed. + +Most of other [AWS Lambda recommendations](https://docs.aws.amazon.com/lambda/latest/dg/services-sqs-configure.html) +also apply. diff --git a/src/LocalPost.SqsConsumer/Tracing.cs b/src/LocalPost.SqsConsumer/Tracing.cs index 3852e6f..81e02f6 100644 --- a/src/LocalPost.SqsConsumer/Tracing.cs +++ b/src/LocalPost.SqsConsumer/Tracing.cs @@ -44,7 +44,7 @@ public static void AcceptDistributedTracingFrom(this Activity activity, Message public static void SetDefaultTags(this Activity? activity, QueueClient client) { // See https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/#messaging-attributes - activity?.SetTag("messaging.system", "sqs"); + activity?.SetTag("messaging.system", "aws_sqs"); activity?.SetTag("messaging.destination.name", client.QueueName); @@ -56,8 +56,8 @@ public static void SetDefaultTags(this Activity? activity, QueueClient client) public static Activity? SetTagsFor(this Activity? activity, ConsumeContext context) => activity?.SetTag("messaging.message.id", context.MessageId); - public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> context) => - activity?.SetTag("messaging.batch.message_count", context.Count); + public static Activity? SetTagsFor(this Activity? activity, IReadOnlyCollection> batch) => + activity?.SetTag("messaging.batch.message_count", batch.Count); public static Activity? SetTagsFor(this Activity? activity, ReceiveMessageResponse response) => activity?.SetTag("messaging.batch.message_count", response.Messages.Count); @@ -75,10 +75,27 @@ internal static class Tracing static Tracing() { - // See https://stackoverflow.com/a/909583/322079 var assembly = Assembly.GetExecutingAssembly(); - var version = assembly.GetName().Version; - Source = new ActivitySource(assembly.FullName, version.ToString()); + var version = assembly.GetName().Version?.ToString() ?? "0.0.0"; + Source = new ActivitySource("LocalPost.SqsConsumer", version); + } + + public static Activity? StartProcessing(IReadOnlyCollection> batch) + { + Debug.Assert(batch.Count > 0); + var client = batch.First().Client; + var activity = Source.CreateActivity($"{client.QueueName} process", ActivityKind.Consumer); + if (activity is { IsAllDataRequested: true }) + { + activity?.SetTag("messaging.operation.type", "process"); + activity.SetDefaultTags(client); + activity.SetTagsFor(batch); + // TODO Distributed tracing (OTEL links) + } + + activity?.Start(); + + return activity; } public static Activity? StartProcessing(ConsumeContext context) @@ -86,6 +103,7 @@ static Tracing() var activity = Source.CreateActivity($"{context.Client.QueueName} process", ActivityKind.Consumer); if (activity is { IsAllDataRequested: true }) { + activity.SetTag("messaging.operation.type", "process"); activity.SetDefaultTags(context.Client); activity.SetTagsFor(context); activity.AcceptDistributedTracingFrom(context.Message); @@ -102,6 +120,7 @@ static Tracing() if (activity is not { IsAllDataRequested: true }) return activity; + activity.SetTag("messaging.operation.type", "settle"); activity.SetDefaultTags(context.Client); activity.SetTag("messaging.message.id", context.MessageId); @@ -114,6 +133,7 @@ static Tracing() if (activity is not { IsAllDataRequested: true }) return activity; + activity.SetTag("messaging.operation.type", "receive"); activity.SetDefaultTags(client); return activity; diff --git a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs index 25e11be..0eafbed 100644 --- a/src/LocalPost/BackgroundQueue/BackgroundQueue.cs +++ b/src/LocalPost/BackgroundQueue/BackgroundQueue.cs @@ -7,22 +7,45 @@ namespace LocalPost.BackgroundQueue; internal sealed class BackgroundQueue(ILogger> logger, QueueOptions settings, - Channel> queue, ChannelRunner, ConsumeContext> runner) + Channel> channel, IHandlerManager> hm) : IBackgroundQueue, IHostedService, IHealthAwareService, IDisposable { - public IHealthCheck ReadinessCheck => HealthChecks.From(() => runner.Ready); + public IHealthCheck ReadinessCheck => HealthChecks.From(() => _runner switch + { + not null => _runner.Ready, + _ => HealthCheckResult.Unhealthy("Background queue has not started yet"), + }); + + public ValueTask Enqueue(T payload, CancellationToken ct = default) => channel.Writer.WriteAsync(payload, ct); - public ValueTask Enqueue(T payload, CancellationToken ct = default) => queue.Writer.WriteAsync(payload, ct); + public ChannelWriter> Writer => channel.Writer; - public ChannelWriter> Writer => queue.Writer; + private ChannelRunner, ConsumeContext>? _runner; + + private ChannelRunner, ConsumeContext> CreateRunner(Handler> handler) + { + return new ChannelRunner, ConsumeContext>(channel, Consume, hm) + { Consumers = settings.MaxConcurrency }; + + async Task Consume(CancellationToken execToken) + { + await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) + await handler(message, CancellationToken.None).ConfigureAwait(false); + } + } public async Task StartAsync(CancellationToken ct) { - await runner.Start(ct).ConfigureAwait(false); + var handler = await hm.Start(ct).ConfigureAwait(false); + _runner = CreateRunner(handler); + await _runner.Start(ct).ConfigureAwait(false); } public async Task StopAsync(CancellationToken forceShutdownToken) { + if (_runner is null) + return; + logger.LogInformation("Shutting down background queue"); try { @@ -31,12 +54,12 @@ public async Task StopAsync(CancellationToken forceShutdownToken) } finally { - await runner.Stop(null, forceShutdownToken).ConfigureAwait(false); + await _runner.Stop(null, forceShutdownToken).ConfigureAwait(false); } } public void Dispose() { - runner.Dispose(); + _runner?.Dispose(); } } diff --git a/src/LocalPost/BackgroundQueue/ConsumeContext.cs b/src/LocalPost/BackgroundQueue/ConsumeContext.cs index 8cd77f0..c9864e7 100644 --- a/src/LocalPost/BackgroundQueue/ConsumeContext.cs +++ b/src/LocalPost/BackgroundQueue/ConsumeContext.cs @@ -4,7 +4,7 @@ namespace LocalPost.BackgroundQueue; [PublicAPI] -public readonly record struct ConsumeContext // TODO Rename +public readonly record struct ConsumeContext // Better name?.. { public readonly ActivityContext? ActivityContext; public readonly T Payload; diff --git a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs index 23aaeef..1f5b675 100644 --- a/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs +++ b/src/LocalPost/BackgroundQueue/DependencyInjection/BackgroundQueuesBuilder.cs @@ -18,19 +18,23 @@ public OptionsBuilder> AddDefaultJobQueue() => AddJo ); // TODO Open later - internal OptionsBuilder> AddJobQueue(HandlerFactory> hf) + internal OptionsBuilder> AddJobQueue( + HandlerManagerFactory> hmf) { services.TryAddSingleton(); services.TryAddSingletonAlias(); - return AddQueue(hf); + return AddQueue(hmf); } - public OptionsBuilder> AddQueue(HandlerFactory> hf) => - AddQueue(Options.DefaultName, hf); + // public OptionsBuilder> AddQueue(HandlerFactory> hf) => + // AddQueue(Options.DefaultName, hf); + // + // public OptionsBuilder> AddQueue(string name, HandlerFactory> hf) => + // AddQueue(name, provider => new HandlerManager(hf(provider))); - public OptionsBuilder> AddQueue(string name, HandlerFactory> hf) => - AddQueue(name, hf.AsHandlerManager()); + public OptionsBuilder> AddQueue(HandlerManagerFactory> hmf) => + AddQueue(Options.DefaultName, hmf); public OptionsBuilder> AddQueue(string name, HandlerManagerFactory> hmf) { @@ -44,7 +48,7 @@ public OptionsBuilder> AddQueue(string name, HandlerManagerFa return QueueFor(name); - BackgroundQueue CreateQueue(IServiceProvider provider, object? key) + BackgroundQueue CreateQueue(IServiceProvider provider, object? _) { var settings = provider.GetOptions>(name); var channel = settings.Capacity switch @@ -62,9 +66,8 @@ BackgroundQueue CreateQueue(IServiceProvider provider, object? key) }) }; var hm = hmf(provider); - var runner = ChannelRunner.Create(channel, hm, settings.MaxConcurrency, settings.ProcessLeftovers); - return new BackgroundQueue(provider.GetLoggerFor>(), settings, channel, runner); + return new BackgroundQueue(provider.GetLoggerFor>(), settings, channel, hm); } } diff --git a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs index 604bed2..77105bb 100644 --- a/src/LocalPost/BackgroundQueue/HandlerStackEx.cs +++ b/src/LocalPost/BackgroundQueue/HandlerStackEx.cs @@ -5,15 +5,15 @@ namespace LocalPost.BackgroundQueue; [PublicAPI] public static class HandlerStackEx { - public static HandlerFactory> UseMessagePayload(this HandlerFactory hf) => - hf.Map, T>(next => async (context, ct) => + public static HandlerManagerFactory> UseMessagePayload(this HandlerManagerFactory hmf) => + hmf.MapHandler, T>(next => async (context, ct) => await next(context.Payload, ct).ConfigureAwait(false)); - public static HandlerFactory> Trace(this HandlerFactory> hf) + public static HandlerManagerFactory> Trace(this HandlerManagerFactory> hmf) { var typeName = Reflection.FriendlyNameOf(); var transactionName = $"{typeName} process"; - return hf.Map, ConsumeContext>(next => async (context, ct) => + return hmf.TouchHandler(next => async (context, ct) => { using var activity = context.ActivityContext.HasValue ? Tracing.Source.StartActivity(transactionName, ActivityKind.Consumer, diff --git a/src/LocalPost/BackgroundQueue/Tracing.cs b/src/LocalPost/BackgroundQueue/Tracing.cs index 45f6ba7..03e720e 100644 --- a/src/LocalPost/BackgroundQueue/Tracing.cs +++ b/src/LocalPost/BackgroundQueue/Tracing.cs @@ -11,9 +11,8 @@ internal static class Tracing static Tracing() { - // See https://stackoverflow.com/a/909583/322079 var assembly = Assembly.GetExecutingAssembly(); - var version = assembly.GetName().Version; - Source = new ActivitySource(assembly.FullName, version.ToString()); + var version = assembly.GetName().Version?.ToString() ?? "0.0.0"; + Source = new ActivitySource("LocalPost.BackgroundQueue", version); } } diff --git a/src/LocalPost/Flow/Event.cs b/src/LocalPost/Flow/Event.cs deleted file mode 100644 index bc71e75..0000000 --- a/src/LocalPost/Flow/Event.cs +++ /dev/null @@ -1,38 +0,0 @@ -namespace LocalPost.Flow; - -public delegate IHandlerManager HandlerManagerFactory(IServiceProvider provider); - -public interface IHandlerManager -{ - ValueTask Start(CancellationToken ct); - - ValueTask Handle(T payload, CancellationToken ct); // Handler - - ValueTask Stop(Exception? error, CancellationToken ct); -} - -internal sealed class HandlerManager(Handler handler) : IHandlerManager -{ - public ValueTask Start(CancellationToken ct) => ValueTask.CompletedTask; - - public ValueTask Handle(T payload, CancellationToken ct) => handler(payload, ct); - - public ValueTask Stop(Exception? error, CancellationToken ct) => ValueTask.CompletedTask; -} - -// public enum EventType : byte -// { -// Message, // With required payload -// Begin, // Empty -// End, // With an optional error -// } -// -// [PublicAPI] -// public readonly record struct Event(EventType Type, T Payload = default!, Exception? Error = null) -// { -// public static Event Begin => new(EventType.Begin); -// public static Event End => new(EventType.End); -// public static Event Fail(Exception e) => new(EventType.End, Error: e); -// -// public static implicit operator Event(T payload) => new(EventType.Message, payload); -// } diff --git a/src/LocalPost/Flow/HandlerStackEx.cs b/src/LocalPost/Flow/HandlerStackEx.cs index 9a50561..fd027cc 100644 --- a/src/LocalPost/Flow/HandlerStackEx.cs +++ b/src/LocalPost/Flow/HandlerStackEx.cs @@ -4,28 +4,9 @@ namespace LocalPost.Flow; -[PublicAPI] -public static partial class HandlerStackEx -{ - // Keep it internal for now, until it's clear that this generic transformation is useful - internal static HandlerManagerFactory AsHandlerManager(this HandlerFactory hf) => provider => - { - var handler = hf(provider); - return new HandlerManager(handler); - }; - - public static HandlerManagerFactory Buffer(this HandlerFactory hf, - int capacity, int consumers = 1, bool singleProducer = false) => - hf.AsHandlerManager().Buffer(capacity, consumers, singleProducer); - - public static HandlerManagerFactory Batch(this HandlerFactory> hf, - int size, TimeSpan window, - int capacity = 1, bool singleProducer = false) => - hf.AsHandlerManager().Batch(size, window, capacity, singleProducer); -} [PublicAPI] -public static partial class HandlerManagerStackEx +public static partial class HandlerStackEx { public static HandlerManagerFactory Buffer(this HandlerManagerFactory hmf, int capacity, int consumers = 1, bool singleProducer = false) @@ -42,71 +23,78 @@ public static HandlerManagerFactory Buffer(this HandlerManagerFactory h private static HandlerManagerFactory Buffer(this HandlerManagerFactory hmf, Channel channel, int consumers = 1) => provider => - { - var next = hmf(provider); - var buffer = new ChannelRunner(channel, Consume, next) { Consumers = consumers }; - return new BufferHandlerManager(channel, buffer); - - async Task Consume(CancellationToken execToken) - { - await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) - await next.Handle(message, CancellationToken.None).ConfigureAwait(false); - }; - }; + new BufferHandlerManager(hmf(provider), channel, consumers); - public static HandlerManagerFactory Batch(this HandlerManagerFactory> hmf, + public static HandlerManagerFactory Batch(this HandlerManagerFactory> hmf, int size, TimeSpan window, int capacity = 1, bool singleProducer = false) => provider => { + var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) + { + FullMode = BoundedChannelFullMode.Wait, + SingleReader = true, + SingleWriter = singleProducer, + }); var next = hmf(provider); - return BatchHandlerManager.Create(next, size, window, capacity, singleProducer); + return new BatchHandlerManager(next, channel, size, window); }; } -internal sealed class BufferHandlerManager(Channel channel, - ChannelRunner runner) : IHandlerManager +internal sealed class BufferHandlerManager(IHandlerManager next, Channel channel, + int consumers) : IHandlerManager { - public ValueTask Start(CancellationToken ct) => runner.Start(ct); + private ChannelRunner? _runner; + + private ChannelRunner CreateRunner(Handler handler) + { + return new ChannelRunner(channel, Consume, next) { Consumers = consumers }; + + async Task Consume(CancellationToken execToken) + { + await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) + await handler(message, CancellationToken.None).ConfigureAwait(false); + } + } - public ValueTask Handle(T payload, CancellationToken ct) => channel.Writer.WriteAsync(payload, ct); + public async ValueTask> Start(CancellationToken ct) + { + var handler = await next.Start(ct).ConfigureAwait(false); + _runner = CreateRunner(handler); + await _runner.Start(ct).ConfigureAwait(false); + return channel.Writer.WriteAsync; + } - public ValueTask Stop(Exception? error, CancellationToken ct) => runner.Stop(error, ct); + public async ValueTask Stop(Exception? error, CancellationToken ct) + { + if (_runner is not null) + await _runner.Stop(error, ct).ConfigureAwait(false); + await next.Stop(error, ct).ConfigureAwait(false); + } } -internal sealed class BatchHandlerManager(Channel channel, - ChannelRunner> runner) : IHandlerManager +internal sealed class BatchHandlerManager(IHandlerManager> next, Channel channel, + int size, TimeSpan window) : IHandlerManager { - public static BatchHandlerManager Create(IHandlerManager> next, - int size, TimeSpan window, - int capacity = 1, bool singleProducer = false) + private ChannelRunner>? _runner; + + private ChannelRunner> CreateRunner(Handler> handler) { - var channel = Channel.CreateBounded(new BoundedChannelOptions(capacity) - { - FullMode = BoundedChannelFullMode.Wait, - SingleReader = true, - SingleWriter = singleProducer, - }); - var buffer = new ChannelRunner>(channel, Consume, next) { Consumers = 1 }; - var hm = new BatchHandlerManager(channel, buffer); - return hm; + return new ChannelRunner>(channel, Consume, next) { Consumers = 1 }; async Task Consume(CancellationToken execToken) { - var reader = channel.Reader; - var completed = false; - var batchBuilder = ImmutableArray.CreateBuilder(size); - while (!completed) { + var batch = new List(size); using var timeWindowCts = CancellationTokenSource.CreateLinkedTokenSource(execToken); timeWindowCts.CancelAfter(window); try { - while (batchBuilder.Count < size) + while (batch.Count < size) { - var item = await reader.ReadAsync(timeWindowCts.Token).ConfigureAwait(false); - batchBuilder.Add(item); + var item = await channel.Reader.ReadAsync(timeWindowCts.Token).ConfigureAwait(false); + batch.Add(item); } } catch (OperationCanceledException) when (!execToken.IsCancellationRequested) @@ -118,39 +106,27 @@ async Task Consume(CancellationToken execToken) completed = true; } - if (batchBuilder.Count == 0) + if (batch.Count == 0) continue; - // If Capacity equals Count, the internal array will be extracted without copying the contents. - // Otherwise, the contents will be copied into a new array. The internal buffer will then be set to a - // zero length array. - var batch = batchBuilder.DrainToImmutable(); - - await next.Handle(batch, CancellationToken.None).ConfigureAwait(false); + await handler(batch, CancellationToken.None).ConfigureAwait(false); } - }; + } } - public ValueTask Start(CancellationToken ct) => runner.Start(ct); - - public ValueTask Handle(T payload, CancellationToken ct) => channel.Writer.WriteAsync(payload, ct); - - public ValueTask Stop(Exception? error, CancellationToken ct) => runner.Stop(error, ct); -} - -internal static class ChannelRunner -{ - public static ChannelRunner Create(Channel channel, IHandlerManager handler, - int consumers = 1, bool processLeftovers = true) + public async ValueTask> Start(CancellationToken ct) { - return new ChannelRunner(channel, Consume, handler) - { Consumers = consumers, ProcessLeftovers = processLeftovers }; + var handler = await next.Start(ct).ConfigureAwait(false); + _runner = CreateRunner(handler); + await _runner.Start(ct).ConfigureAwait(false); + return channel.Writer.WriteAsync; + } - async Task Consume(CancellationToken execToken) - { - await foreach (var message in channel.Reader.ReadAllAsync(execToken).ConfigureAwait(false)) - await handler.Handle(message, CancellationToken.None).ConfigureAwait(false); - } + public async ValueTask Stop(Exception? error, CancellationToken ct) + { + if (_runner is not null) + await _runner.Stop(error, ct).ConfigureAwait(false); + await next.Stop(error, ct).ConfigureAwait(false); } } diff --git a/src/LocalPost/Handler.cs b/src/LocalPost/Handler.cs index 7071d06..517362b 100644 --- a/src/LocalPost/Handler.cs +++ b/src/LocalPost/Handler.cs @@ -4,7 +4,6 @@ namespace LocalPost; public delegate Handler HandlerFactory(IServiceProvider provider); - public delegate Handler HandlerMiddleware(Handler next); // Too narrow use case @@ -17,3 +16,35 @@ public interface IHandler { ValueTask InvokeAsync(TOut payload, CancellationToken ct); } + + + +public delegate IHandlerManager HandlerManagerFactory(IServiceProvider provider); + +public delegate IHandlerManager HandlerManagerMiddleware(IHandlerManager next); + +public interface IHandlerManager +{ + ValueTask> Start(CancellationToken ct); + + ValueTask Stop(Exception? error, CancellationToken ct); +} + +internal sealed class HandlerManager(Handler handler) : IHandlerManager +{ + public ValueTask> Start(CancellationToken ct) => ValueTask.FromResult(handler); + + public ValueTask Stop(Exception? error, CancellationToken ct) => ValueTask.CompletedTask; +} + +internal sealed class HandlerDecorator( + IHandlerManager next, HandlerMiddleware middleware) : IHandlerManager +{ + public async ValueTask> Start(CancellationToken ct) + { + var nextHandler = await next.Start(ct).ConfigureAwait(false); + return middleware(nextHandler); + } + + public ValueTask Stop(Exception? error, CancellationToken ct) => next.Stop(error, ct); +} diff --git a/src/LocalPost/HandlerStack.cs b/src/LocalPost/HandlerStack.cs index 7903dd5..487777a 100644 --- a/src/LocalPost/HandlerStack.cs +++ b/src/LocalPost/HandlerStack.cs @@ -2,23 +2,38 @@ namespace LocalPost; -[PublicAPI] -public static class HandlerStack -{ - public static readonly HandlerFactory Empty = _ => (_, _) => default; -} +// [PublicAPI] +// public static class HandlerStack +// { +// public static readonly HandlerFactory Empty = _ => (_, _) => default; +// } + +// [PublicAPI] +// public static class HandlerStack +// { +// public static HandlerFactory For(Action syncHandler) => For((payload, _) => +// { +// syncHandler(payload); +// return ValueTask.CompletedTask; +// }); +// +// public static HandlerFactory For(Handler handler) => _ => handler; +// +// public static HandlerFactory From() where THandler : IHandler => +// provider => provider.GetRequiredService().InvokeAsync; +// } [PublicAPI] public static class HandlerStack { - public static HandlerFactory For(Action syncHandler) => For((payload, _) => + public static HandlerManagerFactory For(Action syncHandler) => For((payload, _) => { syncHandler(payload); return ValueTask.CompletedTask; }); - public static HandlerFactory For(Handler handler) => _ => handler; + public static HandlerManagerFactory For(Handler handler) => _ => new HandlerManager(handler); - public static HandlerFactory From() where THandler : IHandler => - provider => provider.GetRequiredService().InvokeAsync; + public static HandlerManagerFactory From() where THandler : IHandler => + provider => new HandlerManager(provider.GetRequiredService().InvokeAsync); } diff --git a/src/LocalPost/HandlerStackOps.cs b/src/LocalPost/HandlerStackOps.cs index 7d234e5..6711942 100644 --- a/src/LocalPost/HandlerStackOps.cs +++ b/src/LocalPost/HandlerStackOps.cs @@ -1,74 +1,29 @@ namespace LocalPost; + [PublicAPI] -public static class HandlerStackOps +public static partial class HandlerStackOps { - // Just resolve it manually, it's one line longer, same cognitive load or even less, and one additional type less - // public static HandlerFactory Map(this HandlerFactory hf, - // HandlerMiddlewareFactory middlewareFactory) => provider => - // { - // var h = hf(provider); - // var m = middlewareFactory(provider); - // - // return m(h); - // }; + public static IHandlerManager Map(this IHandlerManager hm, + HandlerMiddleware middleware) => new HandlerDecorator(hm, middleware); - // Too narrow use case, and makes Map() inconvenient to use - // public static HandlerFactory Map(this HandlerFactory hf, - // Func> middlewareFactory) => hf.Map(provider => - // middlewareFactory(provider).Invoke); - // public static HandlerFactory Map(this HandlerFactory hf, - // Func> middlewareFactory) => provider => - // { - // var handler = hf(provider); - // return middlewareFactory(provider).Invoke(handler); - // }; - // public static HandlerFactory Map(this HandlerFactory hf, - // where T : IHandlerMiddleware => hf.Map(provider => - // ActivatorUtilities.CreateInstance(provider).Invoke); + public static IHandlerManager Touch(this IHandlerManager hm, + HandlerMiddleware middleware) => hm.Map(middleware); - public static HandlerFactory Map(this HandlerFactory hf, - HandlerMiddleware middleware) => provider => + public static HandlerManagerFactory Map(this HandlerManagerFactory hmf, + HandlerManagerMiddleware middleware) => provider => { - var handler = hf(provider); + var handler = hmf(provider); return middleware(handler); }; - public static HandlerFactory Touch(this HandlerFactory hf, - HandlerMiddleware middleware) => hf.Map(middleware); - - public static HandlerFactory Dispose(this HandlerFactory hf) where T : IDisposable => - hf.Touch(next => async (context, ct) => - { - try - { - await next(context, ct).ConfigureAwait(false); - } - finally - { - context.Dispose(); - } - }); - - public static HandlerFactory DisposeAsync(this HandlerFactory hf) where T : IAsyncDisposable => - hf.Touch(next => async (context, ct) => - { - try - { - await next(context, ct).ConfigureAwait(false); - } - finally - { - await context.DisposeAsync().ConfigureAwait(false); - } - }); + public static HandlerManagerFactory Touch(this HandlerManagerFactory hf, + HandlerManagerMiddleware middleware) => hf.Map(middleware); - public static HandlerFactory SkipWhen(this HandlerFactory hf, Func pred) => - hf.Touch(next => async (context, ct) => - { - if (pred(context)) - return; + public static HandlerManagerFactory MapHandler(this HandlerManagerFactory hmf, + HandlerMiddleware middleware) => + hmf.Map(next => next.Map(middleware)); - await next(context, ct).ConfigureAwait(false); - }); + public static HandlerManagerFactory TouchHandler(this HandlerManagerFactory hmf, + HandlerMiddleware middleware) => hmf.MapHandler(middleware); } diff --git a/src/LocalPost/Middlewares.cs b/src/LocalPost/Middlewares.cs index b14edca..180b11e 100644 --- a/src/LocalPost/Middlewares.cs +++ b/src/LocalPost/Middlewares.cs @@ -6,92 +6,115 @@ namespace LocalPost; public static partial class Middlewares { - public static HandlerFactory DecodeString(this HandlerFactory hf) => - DecodeString(hf, Encoding.UTF8); + public static HandlerManagerFactory DecodeString(this HandlerManagerFactory hmf) => + DecodeString(hmf, Encoding.UTF8); - public static HandlerFactory DecodeString(this HandlerFactory hf, Encoding encoding) => hf.Map(next => async (payload, ct) => - { - var s = encoding.GetString(payload); - await next(s, ct).ConfigureAwait(false); - }); + public static HandlerManagerFactory DecodeString(this HandlerManagerFactory hmf, + Encoding encoding) => hmf.MapHandler(next => + async (payload, ct) => + { + var s = encoding.GetString(payload); + await next(s, ct).ConfigureAwait(false); + }); /// /// Handle exceptions and log them, to not break the consumer loop. /// - /// Handler factory to wrap. + /// Handler factory to wrap. /// Handler's payload type. /// Wrapped handler factory. - public static HandlerFactory LogExceptions(this HandlerFactory hf) => provider => + public static HandlerManagerFactory LogExceptions(this HandlerManagerFactory hmf) => provider => { var logger = provider.GetRequiredService>(); - var next = hf(provider); - - return async (context, ct) => - { - try + return hmf(provider).Touch(next => async (context, ct) => { - await next(context, ct).ConfigureAwait(false); + try + { + await next(context, ct).ConfigureAwait(false); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + logger.LogError(e, "Unhandled exception while processing a message"); + } } - catch (OperationCanceledException e) when (e.CancellationToken == ct) - { - throw; - } - catch (Exception e) - { - logger.LogError(e, "Unhandled exception while processing a message"); - } - }; + ); }; /// /// Create a DI scope for every message and resolve the handler from it. /// - /// Handler factory to wrap. + /// Handler factory to wrap. /// Handler's payload type. /// Wrapped handler factory. - public static HandlerFactory Scoped(this HandlerFactory hf) => provider => - new ScopedHandler(provider.GetRequiredService(), hf).InvokeAsync; + public static HandlerManagerFactory Scoped(this HandlerManagerFactory hmf) => provider => + new ScopedHandlerManager(provider.GetRequiredService(), hmf); /// /// Shutdown the whole app on error. /// - /// Handler factory to wrap. + /// Handler factory to wrap. /// Process exit code. /// Handler's payload type. /// Wrapped handler factory. - public static HandlerFactory ShutdownOnError(this HandlerFactory hf, int exitCode = 1) => provider => - { - var appLifetime = provider.GetRequiredService(); - var next = hf(provider); - - return async (context, ct) => + public static HandlerManagerFactory ShutdownOnError(this HandlerManagerFactory hmf, int exitCode = 1) => + provider => { - try - { - await next(context, ct).ConfigureAwait(false); - } - catch (OperationCanceledException e) when (e.CancellationToken == ct) + var appLifetime = provider.GetRequiredService(); + return hmf(provider).Touch(next => async (context, ct) => { - throw; - } - catch - { - appLifetime.StopApplication(); - Environment.ExitCode = exitCode; - } + try + { + await next(context, ct).ConfigureAwait(false); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch + { + appLifetime.StopApplication(); + Environment.ExitCode = exitCode; + } + }); }; - }; } -internal sealed class ScopedHandler(IServiceScopeFactory sf, HandlerFactory hf) : IHandler + +internal sealed class ScopedHandlerManager(IServiceScopeFactory sf, HandlerManagerFactory hmf) + : IHandlerManager { - public async ValueTask InvokeAsync(T payload, CancellationToken ct) + private async ValueTask Handle(T payload, CancellationToken ct) { // See https://andrewlock.net/exploring-dotnet-6-part-10-new-dependency-injection-features-in-dotnet-6/#handling-iasyncdisposable-services-with-iservicescope // And also https://devblogs.microsoft.com/dotnet/announcing-net-6/#microsoft-extensions-dependencyinjection-createasyncscope-apis await using var scope = sf.CreateAsyncScope(); - var handler = hf(scope.ServiceProvider); - await handler(payload, ct).ConfigureAwait(false); + var hm = hmf(scope.ServiceProvider); + var handler = await hm.Start(ct).ConfigureAwait(false); + try + { + await handler(payload, ct).ConfigureAwait(false); + await hm.Stop(null, ct).ConfigureAwait(false); + } + catch (OperationCanceledException e) when (e.CancellationToken == ct) + { + throw; + } + catch (Exception e) + { + await hm.Stop(e, ct).ConfigureAwait(false); + } + } + + public ValueTask> Start(CancellationToken ct) + { + Handler handler = Handle; + return ValueTask.FromResult(handler); } + + public ValueTask Stop(Exception? error, CancellationToken ct) => ValueTask.CompletedTask; } diff --git a/src/LocalPost/Resilience/HandlerStackEx.cs b/src/LocalPost/Resilience/HandlerStackEx.cs index 41e2cf4..101a8db 100644 --- a/src/LocalPost/Resilience/HandlerStackEx.cs +++ b/src/LocalPost/Resilience/HandlerStackEx.cs @@ -5,19 +5,16 @@ namespace LocalPost.Resilience; [PublicAPI] public static class HandlerStackEx { - public static HandlerFactory UsePollyPipeline(this HandlerFactory hf, - ResiliencePipeline pipeline) => hf.Touch(next => - async (context, ct) => - { - await pipeline.ExecuteAsync(execCt => next(context, execCt), ct); - }); + public static HandlerManagerFactory UsePollyPipeline(this HandlerManagerFactory hmf, + ResiliencePipeline pipeline) => hmf.TouchHandler(next => (context, ct) => + pipeline.ExecuteAsync(execCt => next(context, execCt), ct)); - public static HandlerFactory UsePollyPipeline(this HandlerFactory hf, + public static HandlerManagerFactory UsePollyPipeline(this HandlerManagerFactory hmf, Action configure) { var builder = new ResiliencePipelineBuilder(); configure(builder); - return hf.UsePollyPipeline(builder.Build()); + return hmf.UsePollyPipeline(builder.Build()); } } diff --git a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs index c0e83c2..44e93fb 100644 --- a/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs +++ b/tests/LocalPost.SqsConsumer.Tests/ConsumerTests.cs @@ -1,6 +1,7 @@ using Amazon.Extensions.NETCore.Setup; using Amazon.Runtime; using Amazon.SQS; +using LocalPost.Flow; using LocalPost.SqsConsumer.DependencyInjection; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Hosting; @@ -73,4 +74,46 @@ public async Task handles_messages() await host.StopAsync(); } + + [Fact] + public async Task handles_batches() + { + var hostBuilder = Host.CreateApplicationBuilder(); + + var received = new List>(); + + hostBuilder.Services + .AddDefaultAWSOptions(new AWSOptions() + { + DefaultClientConfig = { ServiceURL = _container.GetConnectionString() }, + Credentials = _credentials, + }) + .AddAWSService() + .AddSqsConsumers(sqs => sqs.AddConsumer(QueueName, + HandlerStack.For>(payload => received.Add(payload)) + .Scoped() + .UseSqsPayload() + .Trace() + .LogExceptions() + .Acknowledge() // Will acknowledge in any case, as we already caught all the exceptions before + .Batch(10, TimeSpan.FromSeconds(1)) + )); + + var host = hostBuilder.Build(); + + await host.StartAsync(); + + var sqs = CreateClient(); + await sqs.SendMessageAsync(_queueUrl, "It will be rainy in London tomorrow"); + await sqs.SendMessageAsync(_queueUrl, "It will be sunny in Paris tomorrow"); + + await Task.Delay(3_000); // "App is working" + + received.Should().HaveCount(1); + received[0].Should().BeEquivalentTo( + "It will be rainy in London tomorrow", + "It will be sunny in Paris tomorrow"); + + await host.StopAsync(); + } } From 0587d0e136a5c140100c098e804903085eb9b68b Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 24 Feb 2025 16:19:59 +0000 Subject: [PATCH 28/33] fix: OTEL and other refinements --- .editorconfig | 7 +++-- .github/dependabot.yml | 19 +----------- .github/workflows/publish.yaml | 6 ++-- .github/workflows/qa.yml | 3 +- README.md | 3 ++ sonar-scan.sh | 13 ++++---- src/Directory.Build.props | 4 +-- src/LocalPost.KafkaConsumer/Tracing.cs | 41 ++++++++++++-------------- src/LocalPost.SqsConsumer/Tracing.cs | 17 ++++++----- src/LocalPost/LocalPost.csproj | 3 -- 10 files changed, 50 insertions(+), 66 deletions(-) diff --git a/.editorconfig b/.editorconfig index 6201ef8..fb75f21 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,8 +8,11 @@ root = true [*] indent_style = space +[*.sln] +indent_style = tab + # Code files -[*.{cs,csx,vb,vbx}] +[*.{cs,csx,vb,vbx,fs,fsx,fsi}] indent_size = 4 max_line_length = 120 insert_final_newline = true @@ -35,7 +38,7 @@ trim_trailing_whitespace = true insert_final_newline = true # See https://github.com/dotnet/aspnetcore/blob/main/.editorconfig -[src/**/*.{cs,vb}] +[src/**/*.{cs,csx,vb,vbx,fs,fsx,fsi}] # See https://www.jetbrains.com/help/resharper/ConfigureAwait_Analysis.html configure_await_analysis_mode = library diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 91610e9..ce9df8b 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,21 +3,4 @@ updates: - package-ecosystem: "github-actions" directory: "/" schedule: - interval: weekly - - package-ecosystem: nuget - directory: "/" - groups: - xunit: - patterns: - - xunit* - schedule: - interval: weekly - ignore: - # Ignore the libraries which are pinned - - dependency-name: "Microsoft.Bcl.AsyncInterfaces" - - dependency-name: "Microsoft.Extensions.Logging" - - dependency-name: "Microsoft.Extensions.Logging.Abstractions" - - dependency-name: "Microsoft.Extensions.Hosting.Abstractions" - - dependency-name: "Microsoft.Extensions.Options" - - dependency-name: "System.Diagnostics.DiagnosticSource" - - dependency-name: "System.Threading.Channels" + interval: monthly diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index fb42371..c7724b7 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -1,8 +1,10 @@ +--- name: Publish on: release: types: [ published ] + jobs: test: runs-on: ubuntu-latest @@ -16,7 +18,6 @@ jobs: - uses: actions/setup-dotnet@v4 with: dotnet-version: | - 6.0.x 8.0.x 9.0.x - run: dotnet restore @@ -34,7 +35,8 @@ jobs: fetch-depth: 0 - uses: actions/setup-dotnet@v4 with: - dotnet-version: 9.0.x + dotnet-version: | + 9.0.x - run: dotnet pack -c Release - name: Publish run: | diff --git a/.github/workflows/qa.yml b/.github/workflows/qa.yml index 05fbdd6..69deab1 100644 --- a/.github/workflows/qa.yml +++ b/.github/workflows/qa.yml @@ -1,3 +1,4 @@ +--- name: QA on: @@ -5,6 +6,7 @@ on: branches: [ master, main ] pull_request: branches: [ master, main ] + jobs: lint: runs-on: ubuntu-latest @@ -40,7 +42,6 @@ jobs: - uses: actions/setup-dotnet@v4 with: dotnet-version: | - 6.0.x 8.0.x 9.0.x - run: dotnet tool restore diff --git a/README.md b/README.md index e21f553..c5c254b 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,8 @@ # LocalPost +[![NuGet package](https://img.shields.io/nuget/dt/LocalPost)](https://www.nuget.org/packages/LocalPost/) +[![Code coverage](https://img.shields.io/sonar/coverage/alexeyshockov_LocalPost.NET?server=https%3A%2F%2Fsonarcloud.io)](https://sonarcloud.io/project/overview?id=alexeyshockov_LocalPost.NET) + Simple .NET in-memory background queue ([System.Threading.Channels](https://learn.microsoft.com/de-de/dotnet/api/system.threading.channels?view=net-6.0) based). ## Background tasks diff --git a/sonar-scan.sh b/sonar-scan.sh index 33a9c09..6eac297 100755 --- a/sonar-scan.sh +++ b/sonar-scan.sh @@ -1,22 +1,19 @@ #!/usr/bin/env bash -# Print a command before actually executing it -set -x -# Break the script if one of the command fails (returns non-zero status code) -set -e +set -o xtrace,errexit # $SONAR_TOKEN must be defined # $GitVersion_FullSemVer can be used to specify the current version (see GitVersion) -VERSION="dev" +VERSION="" if [ -n "$GitVersion_FullSemVer" ]; then VERSION="/v:"$GitVersion_FullSemVer fi dotnet build-server shutdown dotnet sonarscanner begin \ - /d:sonar.host.url="https://sonarcloud.io" /d:sonar.login="$SONAR_TOKEN" \ - /o:"alexeyshockov" /k:"alexeyshockov_LocalPost" "$VERSION" \ + /d:sonar.host.url="https://sonarcloud.io" /d:sonar.token="$SONAR_TOKEN" \ + /o:"alexeyshockov" /k:"alexeyshockov_LocalPost.NET" "$VERSION" \ /d:sonar.dotnet.excludeTestProjects=true \ /d:sonar.cs.opencover.reportsPaths="tests/*/TestResults/*/coverage.opencover.xml" \ /d:sonar.cs.vstest.reportsPaths="tests/*/TestResults/*.trx" @@ -25,4 +22,4 @@ dotnet sonarscanner begin \ dotnet build dotnet test --no-build --collect:"XPlat Code Coverage" --settings coverlet.runsettings --logger=trx -dotnet sonarscanner end /d:sonar.login="$SONAR_TOKEN" +dotnet sonarscanner end /d:sonar.token="$SONAR_TOKEN" diff --git a/src/Directory.Build.props b/src/Directory.Build.props index dc110be..6320993 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -11,9 +11,9 @@ false Alexey Shokov - https://github.com/alexeyshockov/LocalPost/v$(Version) + https://github.com/alexeyshockov/LocalPost.NET/releases/tag/v$(Version) MIT - https://github.com/alexeyshockov/LocalPost + https://github.com/alexeyshockov/LocalPost.NET git true diff --git a/src/LocalPost.KafkaConsumer/Tracing.cs b/src/LocalPost.KafkaConsumer/Tracing.cs index 17e6c49..e67f1a3 100644 --- a/src/LocalPost.KafkaConsumer/Tracing.cs +++ b/src/LocalPost.KafkaConsumer/Tracing.cs @@ -89,10 +89,11 @@ public static void AcceptDistributedTracingFrom(this Activity acti // activity?.SetTag("messaging.batch.message_count", batch.Count); } -// Npgsql as an inspiration: -// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 -// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 -// Also OTEL semantic convention: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ +// Based on Semantic Conventions 1.30.0, see +// https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ +// Also Npgsql as an inspiration: +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs internal static class Tracing { private static readonly ActivitySource Source; @@ -109,31 +110,27 @@ static Tracing() public static Activity? StartProcessing(IReadOnlyCollection> batch) { Debug.Assert(batch.Count > 0); - var activity = Source.CreateActivity($"{batch.First().Topic} process", ActivityKind.Consumer); - if (activity is { IsAllDataRequested: true }) - { - activity.SetTag("messaging.operation.type", "process"); - activity.SetDefaultTags(batch.First().Client); - activity.SetTagsFor(batch); - } + var activity = Source.StartActivity($"process {batch.First().Topic}", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; - activity?.Start(); + activity.SetTag("messaging.operation.type", "process"); + activity.SetDefaultTags(batch.First().Client); + activity.SetTagsFor(batch); return activity; } public static Activity? StartProcessing(ConsumeContext context) { - var activity = Source.CreateActivity($"{context.Topic} process", ActivityKind.Consumer); - if (activity is { IsAllDataRequested: true }) - { - activity.SetTag("messaging.operation.type", "process"); - activity.SetDefaultTags(context.Client); - activity.SetTagsFor(context); - activity.AcceptDistributedTracingFrom(context.Message); - } - - activity?.Start(); + var activity = Source.StartActivity($"process {context.Topic}", ActivityKind.Consumer); + if (activity is not { IsAllDataRequested: true }) + return activity; + + activity.SetTag("messaging.operation.type", "process"); + activity.SetDefaultTags(context.Client); + activity.SetTagsFor(context); + activity.AcceptDistributedTracingFrom(context.Message); return activity; } diff --git a/src/LocalPost.SqsConsumer/Tracing.cs b/src/LocalPost.SqsConsumer/Tracing.cs index 81e02f6..1a68bfb 100644 --- a/src/LocalPost.SqsConsumer/Tracing.cs +++ b/src/LocalPost.SqsConsumer/Tracing.cs @@ -63,10 +63,11 @@ public static void SetDefaultTags(this Activity? activity, QueueClient client) activity?.SetTag("messaging.batch.message_count", response.Messages.Count); } -// Npgsql as an inspiration: -// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs#LL61C31-L61C49 -// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs#L1639-L1644 -// Also OTEL semantic convention: https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ +// Based on Semantic Conventions 1.30.0, see +// https://opentelemetry.io/docs/specs/semconv/messaging/messaging-spans/ +// Also Npgsql as an inspiration: +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlActivitySource.cs +// - https://github.com/npgsql/npgsql/blob/main/src/Npgsql/NpgsqlCommand.cs internal static class Tracing { private static readonly ActivitySource Source; @@ -84,7 +85,7 @@ static Tracing() { Debug.Assert(batch.Count > 0); var client = batch.First().Client; - var activity = Source.CreateActivity($"{client.QueueName} process", ActivityKind.Consumer); + var activity = Source.StartActivity($"process {client.QueueName}", ActivityKind.Consumer); if (activity is { IsAllDataRequested: true }) { activity?.SetTag("messaging.operation.type", "process"); @@ -100,7 +101,7 @@ static Tracing() public static Activity? StartProcessing(ConsumeContext context) { - var activity = Source.CreateActivity($"{context.Client.QueueName} process", ActivityKind.Consumer); + var activity = Source.StartActivity($"process {context.Client.QueueName}", ActivityKind.Consumer); if (activity is { IsAllDataRequested: true }) { activity.SetTag("messaging.operation.type", "process"); @@ -116,7 +117,7 @@ static Tracing() public static Activity? StartSettling(ConsumeContext context) { - var activity = Source.StartActivity($"{context.Client.QueueName} settle", ActivityKind.Consumer); + var activity = Source.StartActivity($"settle {context.Client.QueueName}", ActivityKind.Consumer); if (activity is not { IsAllDataRequested: true }) return activity; @@ -129,7 +130,7 @@ static Tracing() public static Activity? StartReceiving(QueueClient client) { - var activity = Source.StartActivity($"{client.QueueName} receive", ActivityKind.Consumer); + var activity = Source.StartActivity($"receive {client.QueueName}", ActivityKind.Consumer); if (activity is not { IsAllDataRequested: true }) return activity; diff --git a/src/LocalPost/LocalPost.csproj b/src/LocalPost/LocalPost.csproj index 4c5ff33..3e90087 100644 --- a/src/LocalPost/LocalPost.csproj +++ b/src/LocalPost/LocalPost.csproj @@ -48,9 +48,6 @@ - - <_Parameter1>$(MSBuildProjectName).Flow - <_Parameter1>$(MSBuildProjectName).SqsConsumer From 8dedbaca694bcb0f83de7814e91fc028d62901be Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Mon, 24 Feb 2025 17:47:48 +0000 Subject: [PATCH 29/33] chore: restructuring --- LocalPost.sln | 6 +++--- .../BackgroundQueueApp/BackgroundQueueApp.csproj | 0 .../Controllers/WeatherForecastController.cs | 0 {samples => examples}/BackgroundQueueApp/Program.cs | 0 .../BackgroundQueueApp/Properties/launchSettings.json | 0 {samples => examples}/BackgroundQueueApp/WeatherForecast.cs | 0 .../BackgroundQueueApp/appsettings.Development.json | 0 {samples => examples}/BackgroundQueueApp/appsettings.json | 0 {samples => examples}/Directory.Build.props | 0 .../KafkaConsumerApp/KafkaConsumerApp.csproj | 0 {samples => examples}/KafkaConsumerApp/Program.cs | 0 .../KafkaConsumerApp/Properties/launchSettings.json | 0 {samples => examples}/KafkaConsumerApp/README.md | 0 .../KafkaConsumerApp/appsettings.Development.json | 0 {samples => examples}/KafkaConsumerApp/appsettings.json | 0 {samples => examples}/SqsConsumerApp/Program.cs | 0 .../SqsConsumerApp/Properties/launchSettings.json | 0 {samples => examples}/SqsConsumerApp/README.md | 0 {samples => examples}/SqsConsumerApp/SqsConsumerApp.csproj | 0 .../SqsConsumerApp/appsettings.Development.json | 0 {samples => examples}/SqsConsumerApp/appsettings.json | 0 21 files changed, 3 insertions(+), 3 deletions(-) rename {samples => examples}/BackgroundQueueApp/BackgroundQueueApp.csproj (100%) rename {samples => examples}/BackgroundQueueApp/Controllers/WeatherForecastController.cs (100%) rename {samples => examples}/BackgroundQueueApp/Program.cs (100%) rename {samples => examples}/BackgroundQueueApp/Properties/launchSettings.json (100%) rename {samples => examples}/BackgroundQueueApp/WeatherForecast.cs (100%) rename {samples => examples}/BackgroundQueueApp/appsettings.Development.json (100%) rename {samples => examples}/BackgroundQueueApp/appsettings.json (100%) rename {samples => examples}/Directory.Build.props (100%) rename {samples => examples}/KafkaConsumerApp/KafkaConsumerApp.csproj (100%) rename {samples => examples}/KafkaConsumerApp/Program.cs (100%) rename {samples => examples}/KafkaConsumerApp/Properties/launchSettings.json (100%) rename {samples => examples}/KafkaConsumerApp/README.md (100%) rename {samples => examples}/KafkaConsumerApp/appsettings.Development.json (100%) rename {samples => examples}/KafkaConsumerApp/appsettings.json (100%) rename {samples => examples}/SqsConsumerApp/Program.cs (100%) rename {samples => examples}/SqsConsumerApp/Properties/launchSettings.json (100%) rename {samples => examples}/SqsConsumerApp/README.md (100%) rename {samples => examples}/SqsConsumerApp/SqsConsumerApp.csproj (100%) rename {samples => examples}/SqsConsumerApp/appsettings.Development.json (100%) rename {samples => examples}/SqsConsumerApp/appsettings.json (100%) diff --git a/LocalPost.sln b/LocalPost.sln index 4b447ab..76f0263 100644 --- a/LocalPost.sln +++ b/LocalPost.sln @@ -2,7 +2,7 @@ Microsoft Visual Studio Solution File, Format Version 12.00 Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost", "src\LocalPost\LocalPost.csproj", "{474D2C1A-5557-4ED9-AF20-FE195D4C1AF7}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BackgroundQueueApp", "samples\BackgroundQueueApp\BackgroundQueueApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "BackgroundQueueApp", "examples\BackgroundQueueApp\BackgroundQueueApp.csproj", "{46FC61E6-D0FB-4D7D-A81B-20EF8D8D1F4E}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.Tests", "tests\LocalPost.Tests\LocalPost.Tests.csproj", "{0E69A423-5F70-4BA7-8015-0AB0BC4B6FD2}" EndProject @@ -14,9 +14,9 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer", " EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Examples", "Examples", "{405721DC-F290-4191-B638-9907D5EB042B}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "samples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{C310487A-B976-4D3E-80AF-4ADBE1C63139}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "KafkaConsumerApp", "examples\KafkaConsumerApp\KafkaConsumerApp.csproj", "{C310487A-B976-4D3E-80AF-4ADBE1C63139}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SqsConsumerApp", "samples\SqsConsumerApp\SqsConsumerApp.csproj", "{2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "SqsConsumerApp", "examples\SqsConsumerApp\SqsConsumerApp.csproj", "{2778AEBD-0345-4F79-9E93-73AFAB6C7BCD}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "LocalPost.KafkaConsumer.Tests", "tests\LocalPost.KafkaConsumer.Tests\LocalPost.KafkaConsumer.Tests.csproj", "{734C9C76-B3D8-4AD7-8E76-B14539C3CB4D}" EndProject diff --git a/samples/BackgroundQueueApp/BackgroundQueueApp.csproj b/examples/BackgroundQueueApp/BackgroundQueueApp.csproj similarity index 100% rename from samples/BackgroundQueueApp/BackgroundQueueApp.csproj rename to examples/BackgroundQueueApp/BackgroundQueueApp.csproj diff --git a/samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs b/examples/BackgroundQueueApp/Controllers/WeatherForecastController.cs similarity index 100% rename from samples/BackgroundQueueApp/Controllers/WeatherForecastController.cs rename to examples/BackgroundQueueApp/Controllers/WeatherForecastController.cs diff --git a/samples/BackgroundQueueApp/Program.cs b/examples/BackgroundQueueApp/Program.cs similarity index 100% rename from samples/BackgroundQueueApp/Program.cs rename to examples/BackgroundQueueApp/Program.cs diff --git a/samples/BackgroundQueueApp/Properties/launchSettings.json b/examples/BackgroundQueueApp/Properties/launchSettings.json similarity index 100% rename from samples/BackgroundQueueApp/Properties/launchSettings.json rename to examples/BackgroundQueueApp/Properties/launchSettings.json diff --git a/samples/BackgroundQueueApp/WeatherForecast.cs b/examples/BackgroundQueueApp/WeatherForecast.cs similarity index 100% rename from samples/BackgroundQueueApp/WeatherForecast.cs rename to examples/BackgroundQueueApp/WeatherForecast.cs diff --git a/samples/BackgroundQueueApp/appsettings.Development.json b/examples/BackgroundQueueApp/appsettings.Development.json similarity index 100% rename from samples/BackgroundQueueApp/appsettings.Development.json rename to examples/BackgroundQueueApp/appsettings.Development.json diff --git a/samples/BackgroundQueueApp/appsettings.json b/examples/BackgroundQueueApp/appsettings.json similarity index 100% rename from samples/BackgroundQueueApp/appsettings.json rename to examples/BackgroundQueueApp/appsettings.json diff --git a/samples/Directory.Build.props b/examples/Directory.Build.props similarity index 100% rename from samples/Directory.Build.props rename to examples/Directory.Build.props diff --git a/samples/KafkaConsumerApp/KafkaConsumerApp.csproj b/examples/KafkaConsumerApp/KafkaConsumerApp.csproj similarity index 100% rename from samples/KafkaConsumerApp/KafkaConsumerApp.csproj rename to examples/KafkaConsumerApp/KafkaConsumerApp.csproj diff --git a/samples/KafkaConsumerApp/Program.cs b/examples/KafkaConsumerApp/Program.cs similarity index 100% rename from samples/KafkaConsumerApp/Program.cs rename to examples/KafkaConsumerApp/Program.cs diff --git a/samples/KafkaConsumerApp/Properties/launchSettings.json b/examples/KafkaConsumerApp/Properties/launchSettings.json similarity index 100% rename from samples/KafkaConsumerApp/Properties/launchSettings.json rename to examples/KafkaConsumerApp/Properties/launchSettings.json diff --git a/samples/KafkaConsumerApp/README.md b/examples/KafkaConsumerApp/README.md similarity index 100% rename from samples/KafkaConsumerApp/README.md rename to examples/KafkaConsumerApp/README.md diff --git a/samples/KafkaConsumerApp/appsettings.Development.json b/examples/KafkaConsumerApp/appsettings.Development.json similarity index 100% rename from samples/KafkaConsumerApp/appsettings.Development.json rename to examples/KafkaConsumerApp/appsettings.Development.json diff --git a/samples/KafkaConsumerApp/appsettings.json b/examples/KafkaConsumerApp/appsettings.json similarity index 100% rename from samples/KafkaConsumerApp/appsettings.json rename to examples/KafkaConsumerApp/appsettings.json diff --git a/samples/SqsConsumerApp/Program.cs b/examples/SqsConsumerApp/Program.cs similarity index 100% rename from samples/SqsConsumerApp/Program.cs rename to examples/SqsConsumerApp/Program.cs diff --git a/samples/SqsConsumerApp/Properties/launchSettings.json b/examples/SqsConsumerApp/Properties/launchSettings.json similarity index 100% rename from samples/SqsConsumerApp/Properties/launchSettings.json rename to examples/SqsConsumerApp/Properties/launchSettings.json diff --git a/samples/SqsConsumerApp/README.md b/examples/SqsConsumerApp/README.md similarity index 100% rename from samples/SqsConsumerApp/README.md rename to examples/SqsConsumerApp/README.md diff --git a/samples/SqsConsumerApp/SqsConsumerApp.csproj b/examples/SqsConsumerApp/SqsConsumerApp.csproj similarity index 100% rename from samples/SqsConsumerApp/SqsConsumerApp.csproj rename to examples/SqsConsumerApp/SqsConsumerApp.csproj diff --git a/samples/SqsConsumerApp/appsettings.Development.json b/examples/SqsConsumerApp/appsettings.Development.json similarity index 100% rename from samples/SqsConsumerApp/appsettings.Development.json rename to examples/SqsConsumerApp/appsettings.Development.json diff --git a/samples/SqsConsumerApp/appsettings.json b/examples/SqsConsumerApp/appsettings.json similarity index 100% rename from samples/SqsConsumerApp/appsettings.json rename to examples/SqsConsumerApp/appsettings.json From 0235ccdb1c0745d67bdb1de615892697fc6a9f1d Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Thu, 27 Feb 2025 14:42:25 +0000 Subject: [PATCH 30/33] WIP --- ARTICLES.md | 3 --- CHANGELOG.md | 19 +++++++++++-------- README.md | 5 +++-- sonar-scan.sh | 1 + 4 files changed, 15 insertions(+), 13 deletions(-) delete mode 100644 ARTICLES.md diff --git a/ARTICLES.md b/ARTICLES.md deleted file mode 100644 index d11b015..0000000 --- a/ARTICLES.md +++ /dev/null @@ -1,3 +0,0 @@ -# Existing tutorials - -https://blog.elmah.io/async-processing-of-long-running-tasks-in-asp-net-core/ diff --git a/CHANGELOG.md b/CHANGELOG.md index 93cc0d6..f72c8ca 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,26 +2,29 @@ All notable changes to this project will be documented in this file. -The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). ## [Unreleased] ### Added -### Fixed +### Changed + +### Removed ## [0.2.0] - 2023-01-23 -### Added +### Changed -- Kafka consumer +- Whole new design -### Changed +### Removed -- MessageHandler → Handler -- +- LocalPost.SnsPublisher -## [1.0.0] - 2017-06-20 +## [0.1.0] - 2023-01-01 ### Added + +- Initial release diff --git a/README.md b/README.md index c5c254b..b7c09c9 100644 --- a/README.md +++ b/README.md @@ -19,9 +19,10 @@ For the core library: dotnet add package LocalPost ``` -AWS SQS, Kafka and other integrations are provided as separate packages: +AWS SQS, Kafka and other integrations are provided as separate packages, like: ```shell +dotnet add package LocalPost.SqsConsumer dotnet add package LocalPost.KafkaConsumer ``` @@ -49,4 +50,4 @@ Service bus (for bigger solutions): ## Inspiration -- [FastStream](https://github.com/airtai/faststream) — Python framework with similar goals +- [FastStream](https://github.com/airtai/faststream) diff --git a/sonar-scan.sh b/sonar-scan.sh index 6eac297..7296c1f 100755 --- a/sonar-scan.sh +++ b/sonar-scan.sh @@ -15,6 +15,7 @@ dotnet sonarscanner begin \ /d:sonar.host.url="https://sonarcloud.io" /d:sonar.token="$SONAR_TOKEN" \ /o:"alexeyshockov" /k:"alexeyshockov_LocalPost.NET" "$VERSION" \ /d:sonar.dotnet.excludeTestProjects=true \ + /d:sonar.coverage.exclusions="**/examples/**" \ /d:sonar.cs.opencover.reportsPaths="tests/*/TestResults/*/coverage.opencover.xml" \ /d:sonar.cs.vstest.reportsPaths="tests/*/TestResults/*.trx" From ed0c4f4862d659cf76efbd4a8a0c3048936987c4 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Thu, 27 Feb 2025 14:50:09 +0000 Subject: [PATCH 31/33] chore: changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f72c8ca..2b0d033 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed -## [0.2.0] - 2023-01-23 +## [0.2.0] - 2025-02-27 ### Changed From bd88c1025c081ffc033273ee30eb74ca9233db74 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Fri, 28 Feb 2025 18:35:23 +0000 Subject: [PATCH 32/33] WIP --- CHANGELOG.md | 2 +- src/LocalPost.KafkaConsumer/Options.cs | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2b0d033..8f4ce4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed -## [0.2.0] - 2025-02-27 +## [0.2.0] - 2025-02-28 ### Changed diff --git a/src/LocalPost.KafkaConsumer/Options.cs b/src/LocalPost.KafkaConsumer/Options.cs index 2469232..368d500 100644 --- a/src/LocalPost.KafkaConsumer/Options.cs +++ b/src/LocalPost.KafkaConsumer/Options.cs @@ -3,6 +3,7 @@ namespace LocalPost.KafkaConsumer; +[UsedImplicitly] public sealed record ConsumerOptions { public ConsumerConfig ClientConfig { get; set; } = new(); From 4b97f103f7e33eaeab45ba0a45c4e209b552e2f5 Mon Sep 17 00:00:00 2001 From: Alexey Shokov Date: Wed, 12 Mar 2025 19:56:36 +0000 Subject: [PATCH 33/33] chore: minor cleanup --- .github/dependabot.yml | 2 +- .github/workflows/publish.yaml | 1 - .github/workflows/qa.yml | 7 +++---- CHANGELOG.md | 6 ++++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/dependabot.yml b/.github/dependabot.yml index ce9df8b..6fff16c 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,6 +1,6 @@ version: 2 updates: - - package-ecosystem: "github-actions" + - package-ecosystem: github-actions directory: "/" schedule: interval: monthly diff --git a/.github/workflows/publish.yaml b/.github/workflows/publish.yaml index c7724b7..d646d95 100644 --- a/.github/workflows/publish.yaml +++ b/.github/workflows/publish.yaml @@ -1,4 +1,3 @@ ---- name: Publish on: diff --git a/.github/workflows/qa.yml b/.github/workflows/qa.yml index 69deab1..37815a1 100644 --- a/.github/workflows/qa.yml +++ b/.github/workflows/qa.yml @@ -1,4 +1,3 @@ ---- name: QA on: @@ -19,9 +18,9 @@ jobs: env: DEFAULT_BRANCH: main GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - VALIDATE_ALL_CODEBASE: false # Only changed files + VALIDATE_ALL_CODEBASE: false # Only changed files VALIDATE_EDITORCONFIG: true - VALIDATE_CSHARP: true + VALIDATE_CSHARP: false # Checked by SonarQube VALIDATE_JSON: true VALIDATE_MARKDOWN: true VALIDATE_YAML: true @@ -33,7 +32,7 @@ jobs: steps: - uses: actions/checkout@v4 with: - # Disabling shallow clone is recommended by SonarCloud for improving relevancy of reporting + # Disabling shallow clone is recommended by SonarQube for improving relevancy of reporting fetch-depth: 0 - uses: actions/setup-java@v4 with: diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f4ce4c..dffd79e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,13 +7,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Fixed + ### Added ### Changed ### Removed -## [0.2.0] - 2025-02-28 +## [0.2.0] - 2025-03-12 ### Changed @@ -21,7 +23,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed -- LocalPost.SnsPublisher +- LocalPost.SnsPublisher package ## [0.1.0] - 2023-01-01