diff --git a/README.md b/README.md index b2881d1..611ee24 100644 --- a/README.md +++ b/README.md @@ -21,12 +21,12 @@ Atomizer is a modern, high-performance job scheduling and queueing framework for - πŸ›‘ **Graceful Shutdown** β€” Ensure in-flight jobs finish and pending batched jobs are safely released for re-processing during shutdowns. - πŸ“¦ **Batch Processing** β€” Tune throughput with batch size and parallelism settings per queue. - ⏳ **Visibility Timeout** β€” Prevent job duplication by locking jobs during processing. +- πŸ•’ **FIFO Partitioned Processing** β€” Guarantee strict in-order, one-at-a-time execution per partition key (e.g. per customer, per entity). - πŸ§ͺ **In-Memory Driver** β€” Perfect for local development and testing; spin up queues instantly with zero setup. - πŸ”” **ASP.NET Core Integration** β€” Works with DI, logging, and modern C# idioms. ## Planned Features - πŸ“ˆ **Dashboard** β€” Live monitoring, retry/dead-letter management, and operational insights. -- πŸ•’ **FIFO Processing** β€” Guarantee jobs are processed in strict order, without overlap. - ⚑ **Redis Driver** β€” Lightning-fast, distributed, in-memory queues for massive scale. ## Quick Start @@ -130,7 +130,20 @@ app.MapPost( ); ``` -### 5. Schedule Recurring Jobs +### 5. FIFO Processing (Partitioned Jobs) +To guarantee jobs for the same entity execute one-at-a-time in enqueue order, assign a `PartitionKey`: + +```csharp +// All stock events for the same product are processed in strict FIFO order. +await atomizerClient.EnqueueAsync( + new StockEvent(productId, "restock", delta: 50), + options => options.PartitionKey = new PartitionKey(productId.ToString()) +); +``` + +Jobs sharing the same `PartitionKey` and queue are serialized: the next job in the partition only starts after the previous one completes (or fails and is rescheduled). Unpartitioned jobs in the same queue are unaffected and continue to process in parallel. + +### 6. Schedule Recurring Jobs in Program.cs: ```csharp ... diff --git a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20250827145643_Initial.Designer.cs b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20250827145643_Initial.Designer.cs deleted file mode 100644 index ea93656..0000000 --- a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20250827145643_Initial.Designer.cs +++ /dev/null @@ -1,240 +0,0 @@ -ο»Ώ// -using System; -using Atomizer.EFCore.Example.Data.MySql; -using Microsoft.EntityFrameworkCore; -using Microsoft.EntityFrameworkCore.Infrastructure; -using Microsoft.EntityFrameworkCore.Metadata; -using Microsoft.EntityFrameworkCore.Migrations; -using Microsoft.EntityFrameworkCore.Storage.ValueConversion; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.MySql.Migrations -{ - [DbContext(typeof(ExampleMySqlContext))] - [Migration("20250827145643_Initial")] - partial class Initial - { - /// - protected override void BuildTargetModel(ModelBuilder modelBuilder) - { -#pragma warning disable 612, 618 - modelBuilder - .HasAnnotation("ProductVersion", "9.0.8") - .HasAnnotation("Relational:MaxIdentifierLength", 64); - - MySqlModelBuilderExtensions.AutoIncrementColumns(modelBuilder); - - modelBuilder.Entity("Atomizer.EFCore.Example.Entities.Product", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("char(36)"); - - b.Property("CreatedAt") - .HasColumnType("datetime(6)"); - - b.Property("Name") - .IsRequired() - .HasColumnType("longtext"); - - b.Property("Price") - .HasColumnType("decimal(65,30)"); - - b.Property("Quantity") - .HasColumnType("int"); - - b.HasKey("Id"); - - b.ToTable("Products"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("char(36)"); - - b.Property("Attempts") - .HasColumnType("int"); - - b.Property("CompletedAt") - .HasColumnType("datetime(6)"); - - b.Property("CreatedAt") - .HasColumnType("datetime(6)"); - - b.Property("FailedAt") - .HasColumnType("datetime(6)"); - - b.Property("IdempotencyKey") - .HasMaxLength(512) - .HasColumnType("varchar(512)"); - - b.Property("LeaseToken") - .HasMaxLength(512) - .HasColumnType("varchar(512)"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("longtext"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("varchar(1024)"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("varchar(512)"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("varchar(4096)"); - - b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("varchar(512)"); - - b.Property("ScheduledAt") - .HasColumnType("datetime(6)"); - - b.Property("Status") - .HasColumnType("int"); - - b.Property("UpdatedAt") - .HasColumnType("datetime(6)"); - - b.Property("VisibleAt") - .HasColumnType("datetime(6)"); - - b.HasKey("Id"); - - b.ToTable("AtomizerJobs", (string)null); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("char(36)"); - - b.Property("Attempt") - .HasColumnType("int"); - - b.Property("CreatedAt") - .HasColumnType("datetime(6)"); - - b.Property("ErrorMessage") - .HasMaxLength(2048) - .HasColumnType("varchar(2048)"); - - b.Property("ExceptionType") - .HasMaxLength(1024) - .HasColumnType("varchar(1024)"); - - b.Property("JobId") - .HasColumnType("char(36)"); - - b.Property("RuntimeIdentity") - .HasMaxLength(255) - .HasColumnType("varchar(255)"); - - b.Property("StackTrace") - .HasMaxLength(5120) - .HasColumnType("varchar(5120)"); - - b.HasKey("Id"); - - b.HasIndex("JobId"); - - b.ToTable("AtomizerJobErrors", (string)null); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerScheduleEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("char(36)"); - - b.Property("CreatedAt") - .HasColumnType("datetime(6)"); - - b.Property("Enabled") - .HasColumnType("tinyint(1)"); - - b.Property("JobKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("varchar(512)"); - - b.Property("LastEnqueueAt") - .HasColumnType("datetime(6)"); - - b.Property("MaxCatchUp") - .HasColumnType("int"); - - b.Property("MisfirePolicy") - .HasColumnType("int"); - - b.Property("NextRunAt") - .HasColumnType("datetime(6)"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("longtext"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("varchar(1024)"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("varchar(512)"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("varchar(4096)"); - - b.Property("Schedule") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("varchar(1024)"); - - b.Property("TimeZone") - .IsRequired() - .HasMaxLength(64) - .HasColumnType("varchar(64)"); - - b.Property("UpdatedAt") - .HasColumnType("datetime(6)"); - - b.HasKey("Id"); - - b.ToTable("AtomizerSchedules", (string)null); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.HasOne("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", "Job") - .WithMany("Errors") - .HasForeignKey("JobId") - .OnDelete(DeleteBehavior.Cascade) - .IsRequired(); - - b.Navigation("Job"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Navigation("Errors"); - }); -#pragma warning restore 612, 618 - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20250827145643_Initial.cs b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20250827145643_Initial.cs deleted file mode 100644 index c768c0d..0000000 --- a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20250827145643_Initial.cs +++ /dev/null @@ -1,176 +0,0 @@ -ο»Ώusing System; -using Microsoft.EntityFrameworkCore.Migrations; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.MySql.Migrations -{ - /// - public partial class Initial : Migration - { - /// - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.AlterDatabase().Annotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder - .CreateTable( - name: "AtomizerJobs", - columns: table => new - { - Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), - QueueKey = table - .Column(type: "varchar(512)", maxLength: 512, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - PayloadType = table - .Column(type: "varchar(1024)", maxLength: 1024, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - Payload = table - .Column(type: "longtext", nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - ScheduledAt = table.Column(type: "datetime(6)", nullable: false), - VisibleAt = table.Column(type: "datetime(6)", nullable: true), - Status = table.Column(type: "int", nullable: false), - Attempts = table.Column(type: "int", nullable: false), - RetryIntervals = table - .Column(type: "varchar(4096)", maxLength: 4096, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - CreatedAt = table.Column(type: "datetime(6)", nullable: false), - UpdatedAt = table.Column(type: "datetime(6)", nullable: false), - CompletedAt = table.Column(type: "datetime(6)", nullable: true), - FailedAt = table.Column(type: "datetime(6)", nullable: true), - LeaseToken = table - .Column(type: "varchar(512)", maxLength: 512, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - ScheduleJobKey = table - .Column(type: "varchar(512)", maxLength: 512, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - IdempotencyKey = table - .Column(type: "varchar(512)", maxLength: 512, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - }, - constraints: table => - { - table.PrimaryKey("PK_AtomizerJobs", x => x.Id); - } - ) - .Annotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder - .CreateTable( - name: "AtomizerSchedules", - columns: table => new - { - Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), - JobKey = table - .Column(type: "varchar(512)", maxLength: 512, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - QueueKey = table - .Column(type: "varchar(512)", maxLength: 512, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - PayloadType = table - .Column(type: "varchar(1024)", maxLength: 1024, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - Payload = table - .Column(type: "longtext", nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - Schedule = table - .Column(type: "varchar(1024)", maxLength: 1024, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - TimeZone = table - .Column(type: "varchar(64)", maxLength: 64, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - MisfirePolicy = table.Column(type: "int", nullable: false), - MaxCatchUp = table.Column(type: "int", nullable: false), - Enabled = table.Column(type: "tinyint(1)", nullable: false), - RetryIntervals = table - .Column(type: "varchar(4096)", maxLength: 4096, nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - NextRunAt = table.Column(type: "datetime(6)", nullable: false), - LastEnqueueAt = table.Column(type: "datetime(6)", nullable: true), - CreatedAt = table.Column(type: "datetime(6)", nullable: false), - UpdatedAt = table.Column(type: "datetime(6)", nullable: false), - }, - constraints: table => - { - table.PrimaryKey("PK_AtomizerSchedules", x => x.Id); - } - ) - .Annotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder - .CreateTable( - name: "Products", - columns: table => new - { - Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), - Name = table - .Column(type: "longtext", nullable: false) - .Annotation("MySql:CharSet", "utf8mb4"), - Price = table.Column(type: "decimal(65,30)", nullable: false), - CreatedAt = table.Column(type: "datetime(6)", nullable: false), - Quantity = table.Column(type: "int", nullable: false), - }, - constraints: table => - { - table.PrimaryKey("PK_Products", x => x.Id); - } - ) - .Annotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder - .CreateTable( - name: "AtomizerJobErrors", - columns: table => new - { - Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), - JobId = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), - ErrorMessage = table - .Column(type: "varchar(2048)", maxLength: 2048, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - StackTrace = table - .Column(type: "varchar(5120)", maxLength: 5120, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - ExceptionType = table - .Column(type: "varchar(1024)", maxLength: 1024, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - CreatedAt = table.Column(type: "datetime(6)", nullable: false), - Attempt = table.Column(type: "int", nullable: false), - RuntimeIdentity = table - .Column(type: "varchar(255)", maxLength: 255, nullable: true) - .Annotation("MySql:CharSet", "utf8mb4"), - }, - constraints: table => - { - table.PrimaryKey("PK_AtomizerJobErrors", x => x.Id); - table.ForeignKey( - name: "FK_AtomizerJobErrors_AtomizerJobs_JobId", - column: x => x.JobId, - principalTable: "AtomizerJobs", - principalColumn: "Id", - onDelete: ReferentialAction.Cascade - ); - } - ) - .Annotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder.CreateIndex( - name: "IX_AtomizerJobErrors_JobId", - table: "AtomizerJobErrors", - column: "JobId" - ); - } - - /// - protected override void Down(MigrationBuilder migrationBuilder) - { - migrationBuilder.DropTable(name: "AtomizerJobErrors"); - - migrationBuilder.DropTable(name: "AtomizerSchedules"); - - migrationBuilder.DropTable(name: "Products"); - - migrationBuilder.DropTable(name: "AtomizerJobs"); - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260503193358_AddIndexForJobKeyOnSchedulesTable.cs b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260503193358_AddIndexForJobKeyOnSchedulesTable.cs deleted file mode 100644 index cd10b04..0000000 --- a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260503193358_AddIndexForJobKeyOnSchedulesTable.cs +++ /dev/null @@ -1,76 +0,0 @@ -ο»Ώusing Microsoft.EntityFrameworkCore.Migrations; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.MySql.Migrations -{ - /// - public partial class AddIndexForJobKeyOnSchedulesTable : Migration - { - /// - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.AlterColumn( - name: "QueueKey", - table: "AtomizerSchedules", - type: "varchar(100)", - maxLength: 100, - nullable: false, - oldClrType: typeof(string), - oldType: "varchar(512)", - oldMaxLength: 512) - .Annotation("MySql:CharSet", "utf8mb4") - .OldAnnotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder.AlterColumn( - name: "JobKey", - table: "AtomizerSchedules", - type: "varchar(255)", - maxLength: 255, - nullable: false, - oldClrType: typeof(string), - oldType: "varchar(512)", - oldMaxLength: 512) - .Annotation("MySql:CharSet", "utf8mb4") - .OldAnnotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder.CreateIndex( - name: "IX_AtomizerSchedules_JobKey", - table: "AtomizerSchedules", - column: "JobKey", - unique: true); - } - - /// - protected override void Down(MigrationBuilder migrationBuilder) - { - migrationBuilder.DropIndex( - name: "IX_AtomizerSchedules_JobKey", - table: "AtomizerSchedules"); - - migrationBuilder.AlterColumn( - name: "QueueKey", - table: "AtomizerSchedules", - type: "varchar(512)", - maxLength: 512, - nullable: false, - oldClrType: typeof(string), - oldType: "varchar(100)", - oldMaxLength: 100) - .Annotation("MySql:CharSet", "utf8mb4") - .OldAnnotation("MySql:CharSet", "utf8mb4"); - - migrationBuilder.AlterColumn( - name: "JobKey", - table: "AtomizerSchedules", - type: "varchar(512)", - maxLength: 512, - nullable: false, - oldClrType: typeof(string), - oldType: "varchar(255)", - oldMaxLength: 255) - .Annotation("MySql:CharSet", "utf8mb4") - .OldAnnotation("MySql:CharSet", "utf8mb4"); - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260503193358_AddIndexForJobKeyOnSchedulesTable.Designer.cs b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260504193620_Initial.Designer.cs similarity index 94% rename from samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260503193358_AddIndexForJobKeyOnSchedulesTable.Designer.cs rename to samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260504193620_Initial.Designer.cs index 24c4a77..1d93d78 100644 --- a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260503193358_AddIndexForJobKeyOnSchedulesTable.Designer.cs +++ b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260504193620_Initial.Designer.cs @@ -12,8 +12,8 @@ namespace Atomizer.EFCore.Example.Data.MySql.Migrations { [DbContext(typeof(ExampleMySqlContext))] - [Migration("20260503193358_AddIndexForJobKeyOnSchedulesTable")] - partial class AddIndexForJobKeyOnSchedulesTable + [Migration("20260504193620_Initial")] + partial class Initial { /// protected override void BuildTargetModel(ModelBuilder modelBuilder) @@ -75,6 +75,10 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("varchar(512)"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("varchar(255)"); + b.Property("Payload") .IsRequired() .HasColumnType("longtext"); @@ -86,8 +90,8 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) - .HasColumnType("varchar(512)"); + .HasMaxLength(100) + .HasColumnType("varchar(100)"); b.Property("RetryIntervals") .IsRequired() @@ -95,12 +99,15 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasColumnType("varchar(4096)"); b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("varchar(512)"); + .HasMaxLength(255) + .HasColumnType("varchar(255)"); b.Property("ScheduledAt") .HasColumnType("datetime(6)"); + b.Property("SequenceNumber") + .HasColumnType("bigint"); + b.Property("Status") .HasColumnType("int"); diff --git a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260504193620_Initial.cs b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260504193620_Initial.cs new file mode 100644 index 0000000..14ccf33 --- /dev/null +++ b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/20260504193620_Initial.cs @@ -0,0 +1,161 @@ +ο»Ώusing System; +using Microsoft.EntityFrameworkCore.Migrations; + +#nullable disable + +namespace Atomizer.EFCore.Example.Data.MySql.Migrations +{ + /// + public partial class Initial : Migration + { + /// + protected override void Up(MigrationBuilder migrationBuilder) + { + migrationBuilder.AlterDatabase() + .Annotation("MySql:CharSet", "utf8mb4"); + + migrationBuilder.CreateTable( + name: "AtomizerJobs", + columns: table => new + { + Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), + QueueKey = table.Column(type: "varchar(100)", maxLength: 100, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + PayloadType = table.Column(type: "varchar(1024)", maxLength: 1024, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + Payload = table.Column(type: "longtext", nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + ScheduledAt = table.Column(type: "datetime(6)", nullable: false), + VisibleAt = table.Column(type: "datetime(6)", nullable: true), + Status = table.Column(type: "int", nullable: false), + Attempts = table.Column(type: "int", nullable: false), + RetryIntervals = table.Column(type: "varchar(4096)", maxLength: 4096, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + CreatedAt = table.Column(type: "datetime(6)", nullable: false), + UpdatedAt = table.Column(type: "datetime(6)", nullable: false), + CompletedAt = table.Column(type: "datetime(6)", nullable: true), + FailedAt = table.Column(type: "datetime(6)", nullable: true), + LeaseToken = table.Column(type: "varchar(512)", maxLength: 512, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + ScheduleJobKey = table.Column(type: "varchar(255)", maxLength: 255, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + IdempotencyKey = table.Column(type: "varchar(512)", maxLength: 512, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + PartitionKey = table.Column(type: "varchar(255)", maxLength: 255, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + SequenceNumber = table.Column(type: "bigint", nullable: true) + }, + constraints: table => + { + table.PrimaryKey("PK_AtomizerJobs", x => x.Id); + }) + .Annotation("MySql:CharSet", "utf8mb4"); + + migrationBuilder.CreateTable( + name: "AtomizerSchedules", + columns: table => new + { + Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), + JobKey = table.Column(type: "varchar(255)", maxLength: 255, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + QueueKey = table.Column(type: "varchar(100)", maxLength: 100, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + PayloadType = table.Column(type: "varchar(1024)", maxLength: 1024, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + Payload = table.Column(type: "longtext", nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + Schedule = table.Column(type: "varchar(1024)", maxLength: 1024, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + TimeZone = table.Column(type: "varchar(64)", maxLength: 64, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + MisfirePolicy = table.Column(type: "int", nullable: false), + MaxCatchUp = table.Column(type: "int", nullable: false), + Enabled = table.Column(type: "tinyint(1)", nullable: false), + RetryIntervals = table.Column(type: "varchar(4096)", maxLength: 4096, nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + NextRunAt = table.Column(type: "datetime(6)", nullable: false), + LastEnqueueAt = table.Column(type: "datetime(6)", nullable: true), + CreatedAt = table.Column(type: "datetime(6)", nullable: false), + UpdatedAt = table.Column(type: "datetime(6)", nullable: false) + }, + constraints: table => + { + table.PrimaryKey("PK_AtomizerSchedules", x => x.Id); + }) + .Annotation("MySql:CharSet", "utf8mb4"); + + migrationBuilder.CreateTable( + name: "Products", + columns: table => new + { + Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), + Name = table.Column(type: "longtext", nullable: false) + .Annotation("MySql:CharSet", "utf8mb4"), + Price = table.Column(type: "decimal(65,30)", nullable: false), + CreatedAt = table.Column(type: "datetime(6)", nullable: false), + Quantity = table.Column(type: "int", nullable: false) + }, + constraints: table => + { + table.PrimaryKey("PK_Products", x => x.Id); + }) + .Annotation("MySql:CharSet", "utf8mb4"); + + migrationBuilder.CreateTable( + name: "AtomizerJobErrors", + columns: table => new + { + Id = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), + JobId = table.Column(type: "char(36)", nullable: false, collation: "ascii_general_ci"), + ErrorMessage = table.Column(type: "varchar(2048)", maxLength: 2048, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + StackTrace = table.Column(type: "varchar(5120)", maxLength: 5120, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + ExceptionType = table.Column(type: "varchar(1024)", maxLength: 1024, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4"), + CreatedAt = table.Column(type: "datetime(6)", nullable: false), + Attempt = table.Column(type: "int", nullable: false), + RuntimeIdentity = table.Column(type: "varchar(255)", maxLength: 255, nullable: true) + .Annotation("MySql:CharSet", "utf8mb4") + }, + constraints: table => + { + table.PrimaryKey("PK_AtomizerJobErrors", x => x.Id); + table.ForeignKey( + name: "FK_AtomizerJobErrors_AtomizerJobs_JobId", + column: x => x.JobId, + principalTable: "AtomizerJobs", + principalColumn: "Id", + onDelete: ReferentialAction.Cascade); + }) + .Annotation("MySql:CharSet", "utf8mb4"); + + migrationBuilder.CreateIndex( + name: "IX_AtomizerJobErrors_JobId", + table: "AtomizerJobErrors", + column: "JobId"); + + migrationBuilder.CreateIndex( + name: "IX_AtomizerSchedules_JobKey", + table: "AtomizerSchedules", + column: "JobKey", + unique: true); + } + + /// + protected override void Down(MigrationBuilder migrationBuilder) + { + migrationBuilder.DropTable( + name: "AtomizerJobErrors"); + + migrationBuilder.DropTable( + name: "AtomizerSchedules"); + + migrationBuilder.DropTable( + name: "Products"); + + migrationBuilder.DropTable( + name: "AtomizerJobs"); + } + } +} diff --git a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/ExampleMySqlContextModelSnapshot.cs b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/ExampleMySqlContextModelSnapshot.cs index 1840d7b..7a03282 100644 --- a/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/ExampleMySqlContextModelSnapshot.cs +++ b/samples/Atomizer.EFCore.Example/Data/MySql/Migrations/ExampleMySqlContextModelSnapshot.cs @@ -72,6 +72,10 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("varchar(512)"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("varchar(255)"); + b.Property("Payload") .IsRequired() .HasColumnType("longtext"); @@ -83,8 +87,8 @@ protected override void BuildModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) - .HasColumnType("varchar(512)"); + .HasMaxLength(100) + .HasColumnType("varchar(100)"); b.Property("RetryIntervals") .IsRequired() @@ -92,12 +96,15 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasColumnType("varchar(4096)"); b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("varchar(512)"); + .HasMaxLength(255) + .HasColumnType("varchar(255)"); b.Property("ScheduledAt") .HasColumnType("datetime(6)"); + b.Property("SequenceNumber") + .HasColumnType("bigint"); + b.Property("Status") .HasColumnType("int"); diff --git a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20250827145628_Initial.Designer.cs b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20250827145628_Initial.Designer.cs deleted file mode 100644 index f4bcd5b..0000000 --- a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20250827145628_Initial.Designer.cs +++ /dev/null @@ -1,240 +0,0 @@ -ο»Ώ// -using System; -using Atomizer.EFCore.Example.Data.Postgres; -using Microsoft.EntityFrameworkCore; -using Microsoft.EntityFrameworkCore.Infrastructure; -using Microsoft.EntityFrameworkCore.Migrations; -using Microsoft.EntityFrameworkCore.Storage.ValueConversion; -using Npgsql.EntityFrameworkCore.PostgreSQL.Metadata; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.Postgres.Migrations -{ - [DbContext(typeof(ExamplePostgresContext))] - [Migration("20250827145628_Initial")] - partial class Initial - { - /// - protected override void BuildTargetModel(ModelBuilder modelBuilder) - { -#pragma warning disable 612, 618 - modelBuilder - .HasAnnotation("ProductVersion", "9.0.8") - .HasAnnotation("Relational:MaxIdentifierLength", 63); - - NpgsqlModelBuilderExtensions.UseIdentityByDefaultColumns(modelBuilder); - - modelBuilder.Entity("Atomizer.EFCore.Example.Entities.Product", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uuid"); - - b.Property("CreatedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("Name") - .IsRequired() - .HasColumnType("text"); - - b.Property("Price") - .HasColumnType("numeric"); - - b.Property("Quantity") - .HasColumnType("integer"); - - b.HasKey("Id"); - - b.ToTable("Products"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uuid"); - - b.Property("Attempts") - .HasColumnType("integer"); - - b.Property("CompletedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("CreatedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("FailedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("IdempotencyKey") - .HasMaxLength(512) - .HasColumnType("character varying(512)"); - - b.Property("LeaseToken") - .HasMaxLength(512) - .HasColumnType("character varying(512)"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("text"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("character varying(1024)"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("character varying(512)"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("character varying(4096)"); - - b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("character varying(512)"); - - b.Property("ScheduledAt") - .HasColumnType("timestamp with time zone"); - - b.Property("Status") - .HasColumnType("integer"); - - b.Property("UpdatedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("VisibleAt") - .HasColumnType("timestamp with time zone"); - - b.HasKey("Id"); - - b.ToTable("AtomizerJobs", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uuid"); - - b.Property("Attempt") - .HasColumnType("integer"); - - b.Property("CreatedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("ErrorMessage") - .HasMaxLength(2048) - .HasColumnType("character varying(2048)"); - - b.Property("ExceptionType") - .HasMaxLength(1024) - .HasColumnType("character varying(1024)"); - - b.Property("JobId") - .HasColumnType("uuid"); - - b.Property("RuntimeIdentity") - .HasMaxLength(255) - .HasColumnType("character varying(255)"); - - b.Property("StackTrace") - .HasMaxLength(5120) - .HasColumnType("character varying(5120)"); - - b.HasKey("Id"); - - b.HasIndex("JobId"); - - b.ToTable("AtomizerJobErrors", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerScheduleEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uuid"); - - b.Property("CreatedAt") - .HasColumnType("timestamp with time zone"); - - b.Property("Enabled") - .HasColumnType("boolean"); - - b.Property("JobKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("character varying(512)"); - - b.Property("LastEnqueueAt") - .HasColumnType("timestamp with time zone"); - - b.Property("MaxCatchUp") - .HasColumnType("integer"); - - b.Property("MisfirePolicy") - .HasColumnType("integer"); - - b.Property("NextRunAt") - .HasColumnType("timestamp with time zone"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("text"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("character varying(1024)"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("character varying(512)"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("character varying(4096)"); - - b.Property("Schedule") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("character varying(1024)"); - - b.Property("TimeZone") - .IsRequired() - .HasMaxLength(64) - .HasColumnType("character varying(64)"); - - b.Property("UpdatedAt") - .HasColumnType("timestamp with time zone"); - - b.HasKey("Id"); - - b.ToTable("AtomizerSchedules", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.HasOne("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", "Job") - .WithMany("Errors") - .HasForeignKey("JobId") - .OnDelete(DeleteBehavior.Cascade) - .IsRequired(); - - b.Navigation("Job"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Navigation("Errors"); - }); -#pragma warning restore 612, 618 - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260503193345_AddIndexForJobKeyOnSchedulesTable.cs b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260503193345_AddIndexForJobKeyOnSchedulesTable.cs deleted file mode 100644 index 85bbb0d..0000000 --- a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260503193345_AddIndexForJobKeyOnSchedulesTable.cs +++ /dev/null @@ -1,74 +0,0 @@ -ο»Ώusing Microsoft.EntityFrameworkCore.Migrations; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.Postgres.Migrations -{ - /// - public partial class AddIndexForJobKeyOnSchedulesTable : Migration - { - /// - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.AlterColumn( - name: "QueueKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "character varying(100)", - maxLength: 100, - nullable: false, - oldClrType: typeof(string), - oldType: "character varying(512)", - oldMaxLength: 512); - - migrationBuilder.AlterColumn( - name: "JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "character varying(255)", - maxLength: 255, - nullable: false, - oldClrType: typeof(string), - oldType: "character varying(512)", - oldMaxLength: 512); - - migrationBuilder.CreateIndex( - name: "IX_AtomizerSchedules_JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - column: "JobKey", - unique: true); - } - - /// - protected override void Down(MigrationBuilder migrationBuilder) - { - migrationBuilder.DropIndex( - name: "IX_AtomizerSchedules_JobKey", - schema: "Atomizer", - table: "AtomizerSchedules"); - - migrationBuilder.AlterColumn( - name: "QueueKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "character varying(512)", - maxLength: 512, - nullable: false, - oldClrType: typeof(string), - oldType: "character varying(100)", - oldMaxLength: 100); - - migrationBuilder.AlterColumn( - name: "JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "character varying(512)", - maxLength: 512, - nullable: false, - oldClrType: typeof(string), - oldType: "character varying(255)", - oldMaxLength: 255); - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260503193345_AddIndexForJobKeyOnSchedulesTable.Designer.cs b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260504193605_Initial.Designer.cs similarity index 94% rename from samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260503193345_AddIndexForJobKeyOnSchedulesTable.Designer.cs rename to samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260504193605_Initial.Designer.cs index 667ace1..2b18890 100644 --- a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260503193345_AddIndexForJobKeyOnSchedulesTable.Designer.cs +++ b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260504193605_Initial.Designer.cs @@ -12,8 +12,8 @@ namespace Atomizer.EFCore.Example.Data.Postgres.Migrations { [DbContext(typeof(ExamplePostgresContext))] - [Migration("20260503193345_AddIndexForJobKeyOnSchedulesTable")] - partial class AddIndexForJobKeyOnSchedulesTable + [Migration("20260504193605_Initial")] + partial class Initial { /// protected override void BuildTargetModel(ModelBuilder modelBuilder) @@ -75,6 +75,10 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("character varying(512)"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("character varying(255)"); + b.Property("Payload") .IsRequired() .HasColumnType("text"); @@ -86,8 +90,8 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) - .HasColumnType("character varying(512)"); + .HasMaxLength(100) + .HasColumnType("character varying(100)"); b.Property("RetryIntervals") .IsRequired() @@ -95,12 +99,15 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasColumnType("character varying(4096)"); b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("character varying(512)"); + .HasMaxLength(255) + .HasColumnType("character varying(255)"); b.Property("ScheduledAt") .HasColumnType("timestamp with time zone"); + b.Property("SequenceNumber") + .HasColumnType("bigint"); + b.Property("Status") .HasColumnType("integer"); diff --git a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20250827145628_Initial.cs b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260504193605_Initial.cs similarity index 65% rename from samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20250827145628_Initial.cs rename to samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260504193605_Initial.cs index 93ba47b..2503b82 100644 --- a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20250827145628_Initial.cs +++ b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/20260504193605_Initial.cs @@ -11,7 +11,8 @@ public partial class Initial : Migration /// protected override void Up(MigrationBuilder migrationBuilder) { - migrationBuilder.EnsureSchema(name: "Atomizer"); + migrationBuilder.EnsureSchema( + name: "Atomizer"); migrationBuilder.CreateTable( name: "AtomizerJobs", @@ -19,43 +20,28 @@ protected override void Up(MigrationBuilder migrationBuilder) columns: table => new { Id = table.Column(type: "uuid", nullable: false), - QueueKey = table.Column(type: "character varying(512)", maxLength: 512, nullable: false), - PayloadType = table.Column( - type: "character varying(1024)", - maxLength: 1024, - nullable: false - ), + QueueKey = table.Column(type: "character varying(100)", maxLength: 100, nullable: false), + PayloadType = table.Column(type: "character varying(1024)", maxLength: 1024, nullable: false), Payload = table.Column(type: "text", nullable: false), ScheduledAt = table.Column(type: "timestamp with time zone", nullable: false), VisibleAt = table.Column(type: "timestamp with time zone", nullable: true), Status = table.Column(type: "integer", nullable: false), Attempts = table.Column(type: "integer", nullable: false), - RetryIntervals = table.Column( - type: "character varying(4096)", - maxLength: 4096, - nullable: false - ), + RetryIntervals = table.Column(type: "character varying(4096)", maxLength: 4096, nullable: false), CreatedAt = table.Column(type: "timestamp with time zone", nullable: false), UpdatedAt = table.Column(type: "timestamp with time zone", nullable: false), CompletedAt = table.Column(type: "timestamp with time zone", nullable: true), FailedAt = table.Column(type: "timestamp with time zone", nullable: true), LeaseToken = table.Column(type: "character varying(512)", maxLength: 512, nullable: true), - ScheduleJobKey = table.Column( - type: "character varying(512)", - maxLength: 512, - nullable: true - ), - IdempotencyKey = table.Column( - type: "character varying(512)", - maxLength: 512, - nullable: true - ), + ScheduleJobKey = table.Column(type: "character varying(255)", maxLength: 255, nullable: true), + IdempotencyKey = table.Column(type: "character varying(512)", maxLength: 512, nullable: true), + PartitionKey = table.Column(type: "character varying(255)", maxLength: 255, nullable: true), + SequenceNumber = table.Column(type: "bigint", nullable: true) }, constraints: table => { table.PrimaryKey("PK_AtomizerJobs", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "AtomizerSchedules", @@ -63,34 +49,25 @@ protected override void Up(MigrationBuilder migrationBuilder) columns: table => new { Id = table.Column(type: "uuid", nullable: false), - JobKey = table.Column(type: "character varying(512)", maxLength: 512, nullable: false), - QueueKey = table.Column(type: "character varying(512)", maxLength: 512, nullable: false), - PayloadType = table.Column( - type: "character varying(1024)", - maxLength: 1024, - nullable: false - ), + JobKey = table.Column(type: "character varying(255)", maxLength: 255, nullable: false), + QueueKey = table.Column(type: "character varying(100)", maxLength: 100, nullable: false), + PayloadType = table.Column(type: "character varying(1024)", maxLength: 1024, nullable: false), Payload = table.Column(type: "text", nullable: false), Schedule = table.Column(type: "character varying(1024)", maxLength: 1024, nullable: false), TimeZone = table.Column(type: "character varying(64)", maxLength: 64, nullable: false), MisfirePolicy = table.Column(type: "integer", nullable: false), MaxCatchUp = table.Column(type: "integer", nullable: false), Enabled = table.Column(type: "boolean", nullable: false), - RetryIntervals = table.Column( - type: "character varying(4096)", - maxLength: 4096, - nullable: false - ), + RetryIntervals = table.Column(type: "character varying(4096)", maxLength: 4096, nullable: false), NextRunAt = table.Column(type: "timestamp with time zone", nullable: false), LastEnqueueAt = table.Column(type: "timestamp with time zone", nullable: true), CreatedAt = table.Column(type: "timestamp with time zone", nullable: false), - UpdatedAt = table.Column(type: "timestamp with time zone", nullable: false), + UpdatedAt = table.Column(type: "timestamp with time zone", nullable: false) }, constraints: table => { table.PrimaryKey("PK_AtomizerSchedules", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "Products", @@ -100,13 +77,12 @@ protected override void Up(MigrationBuilder migrationBuilder) Name = table.Column(type: "text", nullable: false), Price = table.Column(type: "numeric", nullable: false), CreatedAt = table.Column(type: "timestamp with time zone", nullable: false), - Quantity = table.Column(type: "integer", nullable: false), + Quantity = table.Column(type: "integer", nullable: false) }, constraints: table => { table.PrimaryKey("PK_Products", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "AtomizerJobErrors", @@ -115,24 +91,12 @@ protected override void Up(MigrationBuilder migrationBuilder) { Id = table.Column(type: "uuid", nullable: false), JobId = table.Column(type: "uuid", nullable: false), - ErrorMessage = table.Column( - type: "character varying(2048)", - maxLength: 2048, - nullable: true - ), + ErrorMessage = table.Column(type: "character varying(2048)", maxLength: 2048, nullable: true), StackTrace = table.Column(type: "character varying(5120)", maxLength: 5120, nullable: true), - ExceptionType = table.Column( - type: "character varying(1024)", - maxLength: 1024, - nullable: true - ), + ExceptionType = table.Column(type: "character varying(1024)", maxLength: 1024, nullable: true), CreatedAt = table.Column(type: "timestamp with time zone", nullable: false), Attempt = table.Column(type: "integer", nullable: false), - RuntimeIdentity = table.Column( - type: "character varying(255)", - maxLength: 255, - nullable: true - ), + RuntimeIdentity = table.Column(type: "character varying(255)", maxLength: 255, nullable: true) }, constraints: table => { @@ -143,29 +107,40 @@ protected override void Up(MigrationBuilder migrationBuilder) principalSchema: "Atomizer", principalTable: "AtomizerJobs", principalColumn: "Id", - onDelete: ReferentialAction.Cascade - ); - } - ); + onDelete: ReferentialAction.Cascade); + }); migrationBuilder.CreateIndex( name: "IX_AtomizerJobErrors_JobId", schema: "Atomizer", table: "AtomizerJobErrors", - column: "JobId" - ); + column: "JobId"); + + migrationBuilder.CreateIndex( + name: "IX_AtomizerSchedules_JobKey", + schema: "Atomizer", + table: "AtomizerSchedules", + column: "JobKey", + unique: true); } /// protected override void Down(MigrationBuilder migrationBuilder) { - migrationBuilder.DropTable(name: "AtomizerJobErrors", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerJobErrors", + schema: "Atomizer"); - migrationBuilder.DropTable(name: "AtomizerSchedules", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerSchedules", + schema: "Atomizer"); - migrationBuilder.DropTable(name: "Products"); + migrationBuilder.DropTable( + name: "Products"); - migrationBuilder.DropTable(name: "AtomizerJobs", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerJobs", + schema: "Atomizer"); } } } diff --git a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/ExamplePostgresContextModelSnapshot.cs b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/ExamplePostgresContextModelSnapshot.cs index 82b44a8..fc49076 100644 --- a/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/ExamplePostgresContextModelSnapshot.cs +++ b/samples/Atomizer.EFCore.Example/Data/Postgres/Migrations/ExamplePostgresContextModelSnapshot.cs @@ -72,6 +72,10 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("character varying(512)"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("character varying(255)"); + b.Property("Payload") .IsRequired() .HasColumnType("text"); @@ -83,8 +87,8 @@ protected override void BuildModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) - .HasColumnType("character varying(512)"); + .HasMaxLength(100) + .HasColumnType("character varying(100)"); b.Property("RetryIntervals") .IsRequired() @@ -92,12 +96,15 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasColumnType("character varying(4096)"); b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("character varying(512)"); + .HasMaxLength(255) + .HasColumnType("character varying(255)"); b.Property("ScheduledAt") .HasColumnType("timestamp with time zone"); + b.Property("SequenceNumber") + .HasColumnType("bigint"); + b.Property("Status") .HasColumnType("integer"); diff --git a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20250827145633_Initial.Designer.cs b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20250827145633_Initial.Designer.cs deleted file mode 100644 index 47ca2f9..0000000 --- a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20250827145633_Initial.Designer.cs +++ /dev/null @@ -1,240 +0,0 @@ -ο»Ώ// -using System; -using Atomizer.EFCore.Example.Data.SqlServer; -using Microsoft.EntityFrameworkCore; -using Microsoft.EntityFrameworkCore.Infrastructure; -using Microsoft.EntityFrameworkCore.Metadata; -using Microsoft.EntityFrameworkCore.Migrations; -using Microsoft.EntityFrameworkCore.Storage.ValueConversion; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.SqlServer.Migrations -{ - [DbContext(typeof(ExampleSqlServerContext))] - [Migration("20250827145633_Initial")] - partial class Initial - { - /// - protected override void BuildTargetModel(ModelBuilder modelBuilder) - { -#pragma warning disable 612, 618 - modelBuilder - .HasAnnotation("ProductVersion", "9.0.8") - .HasAnnotation("Relational:MaxIdentifierLength", 128); - - SqlServerModelBuilderExtensions.UseIdentityColumns(modelBuilder); - - modelBuilder.Entity("Atomizer.EFCore.Example.Entities.Product", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uniqueidentifier"); - - b.Property("CreatedAt") - .HasColumnType("datetime2"); - - b.Property("Name") - .IsRequired() - .HasColumnType("nvarchar(max)"); - - b.Property("Price") - .HasColumnType("decimal(18,2)"); - - b.Property("Quantity") - .HasColumnType("int"); - - b.HasKey("Id"); - - b.ToTable("Products"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uniqueidentifier"); - - b.Property("Attempts") - .HasColumnType("int"); - - b.Property("CompletedAt") - .HasColumnType("datetimeoffset"); - - b.Property("CreatedAt") - .HasColumnType("datetimeoffset"); - - b.Property("FailedAt") - .HasColumnType("datetimeoffset"); - - b.Property("IdempotencyKey") - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); - - b.Property("LeaseToken") - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("nvarchar(max)"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("nvarchar(1024)"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("nvarchar(max)"); - - b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); - - b.Property("ScheduledAt") - .HasColumnType("datetimeoffset"); - - b.Property("Status") - .HasColumnType("int"); - - b.Property("UpdatedAt") - .HasColumnType("datetimeoffset"); - - b.Property("VisibleAt") - .HasColumnType("datetimeoffset"); - - b.HasKey("Id"); - - b.ToTable("AtomizerJobs", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uniqueidentifier"); - - b.Property("Attempt") - .HasColumnType("int"); - - b.Property("CreatedAt") - .HasColumnType("datetimeoffset"); - - b.Property("ErrorMessage") - .HasMaxLength(2048) - .HasColumnType("nvarchar(2048)"); - - b.Property("ExceptionType") - .HasMaxLength(1024) - .HasColumnType("nvarchar(1024)"); - - b.Property("JobId") - .HasColumnType("uniqueidentifier"); - - b.Property("RuntimeIdentity") - .HasMaxLength(255) - .HasColumnType("nvarchar(255)"); - - b.Property("StackTrace") - .HasMaxLength(5120) - .HasColumnType("nvarchar(max)"); - - b.HasKey("Id"); - - b.HasIndex("JobId"); - - b.ToTable("AtomizerJobErrors", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerScheduleEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("uniqueidentifier"); - - b.Property("CreatedAt") - .HasColumnType("datetimeoffset"); - - b.Property("Enabled") - .HasColumnType("bit"); - - b.Property("JobKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); - - b.Property("LastEnqueueAt") - .HasColumnType("datetimeoffset"); - - b.Property("MaxCatchUp") - .HasColumnType("int"); - - b.Property("MisfirePolicy") - .HasColumnType("int"); - - b.Property("NextRunAt") - .HasColumnType("datetimeoffset"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("nvarchar(max)"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("nvarchar(1024)"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("nvarchar(max)"); - - b.Property("Schedule") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("nvarchar(1024)"); - - b.Property("TimeZone") - .IsRequired() - .HasMaxLength(64) - .HasColumnType("nvarchar(64)"); - - b.Property("UpdatedAt") - .HasColumnType("datetimeoffset"); - - b.HasKey("Id"); - - b.ToTable("AtomizerSchedules", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.HasOne("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", "Job") - .WithMany("Errors") - .HasForeignKey("JobId") - .OnDelete(DeleteBehavior.Cascade) - .IsRequired(); - - b.Navigation("Job"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Navigation("Errors"); - }); -#pragma warning restore 612, 618 - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260503193351_AddIndexForJobKeyOnSchedulesTable.cs b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260503193351_AddIndexForJobKeyOnSchedulesTable.cs deleted file mode 100644 index d0f0558..0000000 --- a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260503193351_AddIndexForJobKeyOnSchedulesTable.cs +++ /dev/null @@ -1,74 +0,0 @@ -ο»Ώusing Microsoft.EntityFrameworkCore.Migrations; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.SqlServer.Migrations -{ - /// - public partial class AddIndexForJobKeyOnSchedulesTable : Migration - { - /// - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.AlterColumn( - name: "QueueKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "nvarchar(100)", - maxLength: 100, - nullable: false, - oldClrType: typeof(string), - oldType: "nvarchar(512)", - oldMaxLength: 512); - - migrationBuilder.AlterColumn( - name: "JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "nvarchar(255)", - maxLength: 255, - nullable: false, - oldClrType: typeof(string), - oldType: "nvarchar(512)", - oldMaxLength: 512); - - migrationBuilder.CreateIndex( - name: "IX_AtomizerSchedules_JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - column: "JobKey", - unique: true); - } - - /// - protected override void Down(MigrationBuilder migrationBuilder) - { - migrationBuilder.DropIndex( - name: "IX_AtomizerSchedules_JobKey", - schema: "Atomizer", - table: "AtomizerSchedules"); - - migrationBuilder.AlterColumn( - name: "QueueKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "nvarchar(512)", - maxLength: 512, - nullable: false, - oldClrType: typeof(string), - oldType: "nvarchar(100)", - oldMaxLength: 100); - - migrationBuilder.AlterColumn( - name: "JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - type: "nvarchar(512)", - maxLength: 512, - nullable: false, - oldClrType: typeof(string), - oldType: "nvarchar(255)", - oldMaxLength: 255); - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260503193351_AddIndexForJobKeyOnSchedulesTable.Designer.cs b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260504193610_Initial.Designer.cs similarity index 94% rename from samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260503193351_AddIndexForJobKeyOnSchedulesTable.Designer.cs rename to samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260504193610_Initial.Designer.cs index 92cc882..44caae1 100644 --- a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260503193351_AddIndexForJobKeyOnSchedulesTable.Designer.cs +++ b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260504193610_Initial.Designer.cs @@ -12,8 +12,8 @@ namespace Atomizer.EFCore.Example.Data.SqlServer.Migrations { [DbContext(typeof(ExampleSqlServerContext))] - [Migration("20260503193351_AddIndexForJobKeyOnSchedulesTable")] - partial class AddIndexForJobKeyOnSchedulesTable + [Migration("20260504193610_Initial")] + partial class Initial { /// protected override void BuildTargetModel(ModelBuilder modelBuilder) @@ -75,6 +75,10 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("nvarchar(512)"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("nvarchar(255)"); + b.Property("Payload") .IsRequired() .HasColumnType("nvarchar(max)"); @@ -86,8 +90,8 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); + .HasMaxLength(100) + .HasColumnType("nvarchar(100)"); b.Property("RetryIntervals") .IsRequired() @@ -95,12 +99,15 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasColumnType("nvarchar(max)"); b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); + .HasMaxLength(255) + .HasColumnType("nvarchar(255)"); b.Property("ScheduledAt") .HasColumnType("datetimeoffset"); + b.Property("SequenceNumber") + .HasColumnType("bigint"); + b.Property("Status") .HasColumnType("int"); diff --git a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20250827145633_Initial.cs b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260504193610_Initial.cs similarity index 81% rename from samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20250827145633_Initial.cs rename to samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260504193610_Initial.cs index 35aa82b..2c1b131 100644 --- a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20250827145633_Initial.cs +++ b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/20260504193610_Initial.cs @@ -11,7 +11,8 @@ public partial class Initial : Migration /// protected override void Up(MigrationBuilder migrationBuilder) { - migrationBuilder.EnsureSchema(name: "Atomizer"); + migrationBuilder.EnsureSchema( + name: "Atomizer"); migrationBuilder.CreateTable( name: "AtomizerJobs", @@ -19,7 +20,7 @@ protected override void Up(MigrationBuilder migrationBuilder) columns: table => new { Id = table.Column(type: "uniqueidentifier", nullable: false), - QueueKey = table.Column(type: "nvarchar(512)", maxLength: 512, nullable: false), + QueueKey = table.Column(type: "nvarchar(100)", maxLength: 100, nullable: false), PayloadType = table.Column(type: "nvarchar(1024)", maxLength: 1024, nullable: false), Payload = table.Column(type: "nvarchar(max)", nullable: false), ScheduledAt = table.Column(type: "datetimeoffset", nullable: false), @@ -32,14 +33,15 @@ protected override void Up(MigrationBuilder migrationBuilder) CompletedAt = table.Column(type: "datetimeoffset", nullable: true), FailedAt = table.Column(type: "datetimeoffset", nullable: true), LeaseToken = table.Column(type: "nvarchar(512)", maxLength: 512, nullable: true), - ScheduleJobKey = table.Column(type: "nvarchar(512)", maxLength: 512, nullable: true), + ScheduleJobKey = table.Column(type: "nvarchar(255)", maxLength: 255, nullable: true), IdempotencyKey = table.Column(type: "nvarchar(512)", maxLength: 512, nullable: true), + PartitionKey = table.Column(type: "nvarchar(255)", maxLength: 255, nullable: true), + SequenceNumber = table.Column(type: "bigint", nullable: true) }, constraints: table => { table.PrimaryKey("PK_AtomizerJobs", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "AtomizerSchedules", @@ -47,8 +49,8 @@ protected override void Up(MigrationBuilder migrationBuilder) columns: table => new { Id = table.Column(type: "uniqueidentifier", nullable: false), - JobKey = table.Column(type: "nvarchar(512)", maxLength: 512, nullable: false), - QueueKey = table.Column(type: "nvarchar(512)", maxLength: 512, nullable: false), + JobKey = table.Column(type: "nvarchar(255)", maxLength: 255, nullable: false), + QueueKey = table.Column(type: "nvarchar(100)", maxLength: 100, nullable: false), PayloadType = table.Column(type: "nvarchar(1024)", maxLength: 1024, nullable: false), Payload = table.Column(type: "nvarchar(max)", nullable: false), Schedule = table.Column(type: "nvarchar(1024)", maxLength: 1024, nullable: false), @@ -60,13 +62,12 @@ protected override void Up(MigrationBuilder migrationBuilder) NextRunAt = table.Column(type: "datetimeoffset", nullable: false), LastEnqueueAt = table.Column(type: "datetimeoffset", nullable: true), CreatedAt = table.Column(type: "datetimeoffset", nullable: false), - UpdatedAt = table.Column(type: "datetimeoffset", nullable: false), + UpdatedAt = table.Column(type: "datetimeoffset", nullable: false) }, constraints: table => { table.PrimaryKey("PK_AtomizerSchedules", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "Products", @@ -76,13 +77,12 @@ protected override void Up(MigrationBuilder migrationBuilder) Name = table.Column(type: "nvarchar(max)", nullable: false), Price = table.Column(type: "decimal(18,2)", nullable: false), CreatedAt = table.Column(type: "datetime2", nullable: false), - Quantity = table.Column(type: "int", nullable: false), + Quantity = table.Column(type: "int", nullable: false) }, constraints: table => { table.PrimaryKey("PK_Products", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "AtomizerJobErrors", @@ -96,7 +96,7 @@ protected override void Up(MigrationBuilder migrationBuilder) ExceptionType = table.Column(type: "nvarchar(1024)", maxLength: 1024, nullable: true), CreatedAt = table.Column(type: "datetimeoffset", nullable: false), Attempt = table.Column(type: "int", nullable: false), - RuntimeIdentity = table.Column(type: "nvarchar(255)", maxLength: 255, nullable: true), + RuntimeIdentity = table.Column(type: "nvarchar(255)", maxLength: 255, nullable: true) }, constraints: table => { @@ -107,29 +107,40 @@ protected override void Up(MigrationBuilder migrationBuilder) principalSchema: "Atomizer", principalTable: "AtomizerJobs", principalColumn: "Id", - onDelete: ReferentialAction.Cascade - ); - } - ); + onDelete: ReferentialAction.Cascade); + }); migrationBuilder.CreateIndex( name: "IX_AtomizerJobErrors_JobId", schema: "Atomizer", table: "AtomizerJobErrors", - column: "JobId" - ); + column: "JobId"); + + migrationBuilder.CreateIndex( + name: "IX_AtomizerSchedules_JobKey", + schema: "Atomizer", + table: "AtomizerSchedules", + column: "JobKey", + unique: true); } /// protected override void Down(MigrationBuilder migrationBuilder) { - migrationBuilder.DropTable(name: "AtomizerJobErrors", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerJobErrors", + schema: "Atomizer"); - migrationBuilder.DropTable(name: "AtomizerSchedules", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerSchedules", + schema: "Atomizer"); - migrationBuilder.DropTable(name: "Products"); + migrationBuilder.DropTable( + name: "Products"); - migrationBuilder.DropTable(name: "AtomizerJobs", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerJobs", + schema: "Atomizer"); } } } diff --git a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/ExampleSqlServerContextModelSnapshot.cs b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/ExampleSqlServerContextModelSnapshot.cs index 0d4e211..48ffccb 100644 --- a/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/ExampleSqlServerContextModelSnapshot.cs +++ b/samples/Atomizer.EFCore.Example/Data/SqlServer/Migrations/ExampleSqlServerContextModelSnapshot.cs @@ -72,6 +72,10 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("nvarchar(512)"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("nvarchar(255)"); + b.Property("Payload") .IsRequired() .HasColumnType("nvarchar(max)"); @@ -83,8 +87,8 @@ protected override void BuildModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); + .HasMaxLength(100) + .HasColumnType("nvarchar(100)"); b.Property("RetryIntervals") .IsRequired() @@ -92,12 +96,15 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasColumnType("nvarchar(max)"); b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("nvarchar(512)"); + .HasMaxLength(255) + .HasColumnType("nvarchar(255)"); b.Property("ScheduledAt") .HasColumnType("datetimeoffset"); + b.Property("SequenceNumber") + .HasColumnType("bigint"); + b.Property("Status") .HasColumnType("int"); diff --git a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20250827145638_Initial.Designer.cs b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20250827145638_Initial.Designer.cs deleted file mode 100644 index d70f5c9..0000000 --- a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20250827145638_Initial.Designer.cs +++ /dev/null @@ -1,235 +0,0 @@ -ο»Ώ// -using System; -using Atomizer.EFCore.Example.Data.Sqlite; -using Microsoft.EntityFrameworkCore; -using Microsoft.EntityFrameworkCore.Infrastructure; -using Microsoft.EntityFrameworkCore.Migrations; -using Microsoft.EntityFrameworkCore.Storage.ValueConversion; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.Sqlite.Migrations -{ - [DbContext(typeof(ExampleSqliteContext))] - [Migration("20250827145638_Initial")] - partial class Initial - { - /// - protected override void BuildTargetModel(ModelBuilder modelBuilder) - { -#pragma warning disable 612, 618 - modelBuilder.HasAnnotation("ProductVersion", "9.0.8"); - - modelBuilder.Entity("Atomizer.EFCore.Example.Entities.Product", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("TEXT"); - - b.Property("CreatedAt") - .HasColumnType("TEXT"); - - b.Property("Name") - .IsRequired() - .HasColumnType("TEXT"); - - b.Property("Price") - .HasColumnType("TEXT"); - - b.Property("Quantity") - .HasColumnType("INTEGER"); - - b.HasKey("Id"); - - b.ToTable("Products"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("TEXT"); - - b.Property("Attempts") - .HasColumnType("INTEGER"); - - b.Property("CompletedAt") - .HasColumnType("TEXT"); - - b.Property("CreatedAt") - .HasColumnType("TEXT"); - - b.Property("FailedAt") - .HasColumnType("TEXT"); - - b.Property("IdempotencyKey") - .HasMaxLength(512) - .HasColumnType("TEXT"); - - b.Property("LeaseToken") - .HasMaxLength(512) - .HasColumnType("TEXT"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("TEXT"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("TEXT"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("TEXT"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("TEXT"); - - b.Property("ScheduleJobKey") - .HasMaxLength(512) - .HasColumnType("TEXT"); - - b.Property("ScheduledAt") - .HasColumnType("TEXT"); - - b.Property("Status") - .HasColumnType("INTEGER"); - - b.Property("UpdatedAt") - .HasColumnType("TEXT"); - - b.Property("VisibleAt") - .HasColumnType("TEXT"); - - b.HasKey("Id"); - - b.ToTable("AtomizerJobs", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("TEXT"); - - b.Property("Attempt") - .HasColumnType("INTEGER"); - - b.Property("CreatedAt") - .HasColumnType("TEXT"); - - b.Property("ErrorMessage") - .HasMaxLength(2048) - .HasColumnType("TEXT"); - - b.Property("ExceptionType") - .HasMaxLength(1024) - .HasColumnType("TEXT"); - - b.Property("JobId") - .HasColumnType("TEXT"); - - b.Property("RuntimeIdentity") - .HasMaxLength(255) - .HasColumnType("TEXT"); - - b.Property("StackTrace") - .HasMaxLength(5120) - .HasColumnType("TEXT"); - - b.HasKey("Id"); - - b.HasIndex("JobId"); - - b.ToTable("AtomizerJobErrors", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerScheduleEntity", b => - { - b.Property("Id") - .ValueGeneratedOnAdd() - .HasColumnType("TEXT"); - - b.Property("CreatedAt") - .HasColumnType("TEXT"); - - b.Property("Enabled") - .HasColumnType("INTEGER"); - - b.Property("JobKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("TEXT"); - - b.Property("LastEnqueueAt") - .HasColumnType("TEXT"); - - b.Property("MaxCatchUp") - .HasColumnType("INTEGER"); - - b.Property("MisfirePolicy") - .HasColumnType("INTEGER"); - - b.Property("NextRunAt") - .HasColumnType("TEXT"); - - b.Property("Payload") - .IsRequired() - .HasColumnType("TEXT"); - - b.Property("PayloadType") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("TEXT"); - - b.Property("QueueKey") - .IsRequired() - .HasMaxLength(512) - .HasColumnType("TEXT"); - - b.Property("RetryIntervals") - .IsRequired() - .HasMaxLength(4096) - .HasColumnType("TEXT"); - - b.Property("Schedule") - .IsRequired() - .HasMaxLength(1024) - .HasColumnType("TEXT"); - - b.Property("TimeZone") - .IsRequired() - .HasMaxLength(64) - .HasColumnType("TEXT"); - - b.Property("UpdatedAt") - .HasColumnType("TEXT"); - - b.HasKey("Id"); - - b.ToTable("AtomizerSchedules", "Atomizer"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobErrorEntity", b => - { - b.HasOne("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", "Job") - .WithMany("Errors") - .HasForeignKey("JobId") - .OnDelete(DeleteBehavior.Cascade) - .IsRequired(); - - b.Navigation("Job"); - }); - - modelBuilder.Entity("Atomizer.EntityFrameworkCore.Entities.AtomizerJobEntity", b => - { - b.Navigation("Errors"); - }); -#pragma warning restore 612, 618 - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260503193354_AddIndexForJobKeyOnSchedulesTable.cs b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260503193354_AddIndexForJobKeyOnSchedulesTable.cs deleted file mode 100644 index 9ed54af..0000000 --- a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260503193354_AddIndexForJobKeyOnSchedulesTable.cs +++ /dev/null @@ -1,30 +0,0 @@ -ο»Ώusing Microsoft.EntityFrameworkCore.Migrations; - -#nullable disable - -namespace Atomizer.EFCore.Example.Data.Sqlite.Migrations -{ - /// - public partial class AddIndexForJobKeyOnSchedulesTable : Migration - { - /// - protected override void Up(MigrationBuilder migrationBuilder) - { - migrationBuilder.CreateIndex( - name: "IX_AtomizerSchedules_JobKey", - schema: "Atomizer", - table: "AtomizerSchedules", - column: "JobKey", - unique: true); - } - - /// - protected override void Down(MigrationBuilder migrationBuilder) - { - migrationBuilder.DropIndex( - name: "IX_AtomizerSchedules_JobKey", - schema: "Atomizer", - table: "AtomizerSchedules"); - } - } -} diff --git a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260503193354_AddIndexForJobKeyOnSchedulesTable.Designer.cs b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260504193615_Initial.Designer.cs similarity index 95% rename from samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260503193354_AddIndexForJobKeyOnSchedulesTable.Designer.cs rename to samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260504193615_Initial.Designer.cs index 2fcf0a0..75eb913 100644 --- a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260503193354_AddIndexForJobKeyOnSchedulesTable.Designer.cs +++ b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260504193615_Initial.Designer.cs @@ -11,8 +11,8 @@ namespace Atomizer.EFCore.Example.Data.Sqlite.Migrations { [DbContext(typeof(ExampleSqliteContext))] - [Migration("20260503193354_AddIndexForJobKeyOnSchedulesTable")] - partial class AddIndexForJobKeyOnSchedulesTable + [Migration("20260504193615_Initial")] + partial class Initial { /// protected override void BuildTargetModel(ModelBuilder modelBuilder) @@ -70,6 +70,10 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("TEXT"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("TEXT"); + b.Property("Payload") .IsRequired() .HasColumnType("TEXT"); @@ -81,7 +85,7 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) + .HasMaxLength(100) .HasColumnType("TEXT"); b.Property("RetryIntervals") @@ -90,12 +94,15 @@ protected override void BuildTargetModel(ModelBuilder modelBuilder) .HasColumnType("TEXT"); b.Property("ScheduleJobKey") - .HasMaxLength(512) + .HasMaxLength(255) .HasColumnType("TEXT"); b.Property("ScheduledAt") .HasColumnType("TEXT"); + b.Property("SequenceNumber") + .HasColumnType("INTEGER"); + b.Property("Status") .HasColumnType("INTEGER"); diff --git a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20250827145638_Initial.cs b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260504193615_Initial.cs similarity index 81% rename from samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20250827145638_Initial.cs rename to samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260504193615_Initial.cs index d79880b..6a85904 100644 --- a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20250827145638_Initial.cs +++ b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/20260504193615_Initial.cs @@ -11,7 +11,8 @@ public partial class Initial : Migration /// protected override void Up(MigrationBuilder migrationBuilder) { - migrationBuilder.EnsureSchema(name: "Atomizer"); + migrationBuilder.EnsureSchema( + name: "Atomizer"); migrationBuilder.CreateTable( name: "AtomizerJobs", @@ -19,7 +20,7 @@ protected override void Up(MigrationBuilder migrationBuilder) columns: table => new { Id = table.Column(type: "TEXT", nullable: false), - QueueKey = table.Column(type: "TEXT", maxLength: 512, nullable: false), + QueueKey = table.Column(type: "TEXT", maxLength: 100, nullable: false), PayloadType = table.Column(type: "TEXT", maxLength: 1024, nullable: false), Payload = table.Column(type: "TEXT", nullable: false), ScheduledAt = table.Column(type: "TEXT", nullable: false), @@ -32,14 +33,15 @@ protected override void Up(MigrationBuilder migrationBuilder) CompletedAt = table.Column(type: "TEXT", nullable: true), FailedAt = table.Column(type: "TEXT", nullable: true), LeaseToken = table.Column(type: "TEXT", maxLength: 512, nullable: true), - ScheduleJobKey = table.Column(type: "TEXT", maxLength: 512, nullable: true), + ScheduleJobKey = table.Column(type: "TEXT", maxLength: 255, nullable: true), IdempotencyKey = table.Column(type: "TEXT", maxLength: 512, nullable: true), + PartitionKey = table.Column(type: "TEXT", maxLength: 255, nullable: true), + SequenceNumber = table.Column(type: "INTEGER", nullable: true) }, constraints: table => { table.PrimaryKey("PK_AtomizerJobs", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "AtomizerSchedules", @@ -47,8 +49,8 @@ protected override void Up(MigrationBuilder migrationBuilder) columns: table => new { Id = table.Column(type: "TEXT", nullable: false), - JobKey = table.Column(type: "TEXT", maxLength: 512, nullable: false), - QueueKey = table.Column(type: "TEXT", maxLength: 512, nullable: false), + JobKey = table.Column(type: "TEXT", maxLength: 255, nullable: false), + QueueKey = table.Column(type: "TEXT", maxLength: 100, nullable: false), PayloadType = table.Column(type: "TEXT", maxLength: 1024, nullable: false), Payload = table.Column(type: "TEXT", nullable: false), Schedule = table.Column(type: "TEXT", maxLength: 1024, nullable: false), @@ -60,13 +62,12 @@ protected override void Up(MigrationBuilder migrationBuilder) NextRunAt = table.Column(type: "TEXT", nullable: false), LastEnqueueAt = table.Column(type: "TEXT", nullable: true), CreatedAt = table.Column(type: "TEXT", nullable: false), - UpdatedAt = table.Column(type: "TEXT", nullable: false), + UpdatedAt = table.Column(type: "TEXT", nullable: false) }, constraints: table => { table.PrimaryKey("PK_AtomizerSchedules", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "Products", @@ -76,13 +77,12 @@ protected override void Up(MigrationBuilder migrationBuilder) Name = table.Column(type: "TEXT", nullable: false), Price = table.Column(type: "TEXT", nullable: false), CreatedAt = table.Column(type: "TEXT", nullable: false), - Quantity = table.Column(type: "INTEGER", nullable: false), + Quantity = table.Column(type: "INTEGER", nullable: false) }, constraints: table => { table.PrimaryKey("PK_Products", x => x.Id); - } - ); + }); migrationBuilder.CreateTable( name: "AtomizerJobErrors", @@ -96,7 +96,7 @@ protected override void Up(MigrationBuilder migrationBuilder) ExceptionType = table.Column(type: "TEXT", maxLength: 1024, nullable: true), CreatedAt = table.Column(type: "TEXT", nullable: false), Attempt = table.Column(type: "INTEGER", nullable: false), - RuntimeIdentity = table.Column(type: "TEXT", maxLength: 255, nullable: true), + RuntimeIdentity = table.Column(type: "TEXT", maxLength: 255, nullable: true) }, constraints: table => { @@ -107,29 +107,40 @@ protected override void Up(MigrationBuilder migrationBuilder) principalSchema: "Atomizer", principalTable: "AtomizerJobs", principalColumn: "Id", - onDelete: ReferentialAction.Cascade - ); - } - ); + onDelete: ReferentialAction.Cascade); + }); migrationBuilder.CreateIndex( name: "IX_AtomizerJobErrors_JobId", schema: "Atomizer", table: "AtomizerJobErrors", - column: "JobId" - ); + column: "JobId"); + + migrationBuilder.CreateIndex( + name: "IX_AtomizerSchedules_JobKey", + schema: "Atomizer", + table: "AtomizerSchedules", + column: "JobKey", + unique: true); } /// protected override void Down(MigrationBuilder migrationBuilder) { - migrationBuilder.DropTable(name: "AtomizerJobErrors", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerJobErrors", + schema: "Atomizer"); - migrationBuilder.DropTable(name: "AtomizerSchedules", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerSchedules", + schema: "Atomizer"); - migrationBuilder.DropTable(name: "Products"); + migrationBuilder.DropTable( + name: "Products"); - migrationBuilder.DropTable(name: "AtomizerJobs", schema: "Atomizer"); + migrationBuilder.DropTable( + name: "AtomizerJobs", + schema: "Atomizer"); } } } diff --git a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/ExampleSqliteContextModelSnapshot.cs b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/ExampleSqliteContextModelSnapshot.cs index 958e552..a11ed5c 100644 --- a/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/ExampleSqliteContextModelSnapshot.cs +++ b/samples/Atomizer.EFCore.Example/Data/Sqlite/Migrations/ExampleSqliteContextModelSnapshot.cs @@ -67,6 +67,10 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasMaxLength(512) .HasColumnType("TEXT"); + b.Property("PartitionKey") + .HasMaxLength(255) + .HasColumnType("TEXT"); + b.Property("Payload") .IsRequired() .HasColumnType("TEXT"); @@ -78,7 +82,7 @@ protected override void BuildModel(ModelBuilder modelBuilder) b.Property("QueueKey") .IsRequired() - .HasMaxLength(512) + .HasMaxLength(100) .HasColumnType("TEXT"); b.Property("RetryIntervals") @@ -87,12 +91,15 @@ protected override void BuildModel(ModelBuilder modelBuilder) .HasColumnType("TEXT"); b.Property("ScheduleJobKey") - .HasMaxLength(512) + .HasMaxLength(255) .HasColumnType("TEXT"); b.Property("ScheduledAt") .HasColumnType("TEXT"); + b.Property("SequenceNumber") + .HasColumnType("INTEGER"); + b.Property("Status") .HasColumnType("INTEGER"); diff --git a/samples/Atomizer.EFCore.Example/Handlers/ProcessStockEventJob.cs b/samples/Atomizer.EFCore.Example/Handlers/ProcessStockEventJob.cs new file mode 100644 index 0000000..3726ce7 --- /dev/null +++ b/samples/Atomizer.EFCore.Example/Handlers/ProcessStockEventJob.cs @@ -0,0 +1,34 @@ +using Atomizer.EFCore.Example.Data.Postgres; + +namespace Atomizer.EFCore.Example.Handlers; + +public record StockEvent(Guid ProductId, string EventType, int Delta); + +public class ProcessStockEventJob(ExamplePostgresContext dbContext, ILogger logger) + : IAtomizerJob +{ + public async Task HandleAsync(StockEvent payload, JobContext context) + { + var product = await dbContext.Products.FindAsync( + [payload.ProductId], + context.CancellationToken + ); + + if (product == null) + { + logger.LogWarning("Product {ProductId} not found, skipping stock event", payload.ProductId); + return; + } + + product.Quantity += payload.Delta; + await dbContext.SaveChangesAsync(context.CancellationToken); + + logger.LogInformation( + "Stock event '{EventType}' applied to product {ProductId}: delta={Delta}, new quantity={Quantity}", + payload.EventType, + payload.ProductId, + payload.Delta, + product.Quantity + ); + } +} diff --git a/samples/Atomizer.EFCore.Example/Program.cs b/samples/Atomizer.EFCore.Example/Program.cs index a619e8f..a7ff760 100644 --- a/samples/Atomizer.EFCore.Example/Program.cs +++ b/samples/Atomizer.EFCore.Example/Program.cs @@ -155,4 +155,18 @@ await atomizer.ScheduleRecurringAsync( } ); +// FIFO example: stock events for the same product are partitioned by ProductId, +// guaranteeing they execute one-at-a-time in enqueue order. +app.MapPost( + "/stock-events", + async ([FromServices] IAtomizerClient atomizerClient, [FromBody] StockEvent stockEvent) => + { + var jobId = await atomizerClient.EnqueueAsync( + stockEvent, + options => options.PartitionKey = new PartitionKey(stockEvent.ProductId.ToString()) + ); + return Results.Accepted($"/jobs/{jobId}"); + } +); + app.Run(); diff --git a/src/Atomizer.EntityFrameworkCore/Configurations/AtomizerJobEntityConfiguration.cs b/src/Atomizer.EntityFrameworkCore/Configurations/AtomizerJobEntityConfiguration.cs index 62c41b3..628e6f1 100644 --- a/src/Atomizer.EntityFrameworkCore/Configurations/AtomizerJobEntityConfiguration.cs +++ b/src/Atomizer.EntityFrameworkCore/Configurations/AtomizerJobEntityConfiguration.cs @@ -30,7 +30,7 @@ public void Configure(EntityTypeBuilder builder) builder.ToTable("AtomizerJobs", _schema); builder.HasKey(job => job.Id); builder.Property(job => job.Id).ValueGeneratedOnAdd(); - builder.Property(job => job.QueueKey).IsRequired().HasMaxLength(512); + builder.Property(job => job.QueueKey).IsRequired().HasMaxLength(100); // QueueKey domain max = 100 builder.Property(job => job.PayloadType).IsRequired().HasMaxLength(1024); builder.Property(job => job.Payload).IsRequired(); builder.Property(job => job.ScheduledAt).IsRequired(); @@ -40,8 +40,8 @@ public void Configure(EntityTypeBuilder builder) builder.Property(job => job.CreatedAt).IsRequired(); builder.Property(job => job.CompletedAt).IsRequired(false); builder.Property(job => job.FailedAt).IsRequired(false); - builder.Property(job => job.LeaseToken).HasMaxLength(512); - builder.Property(job => job.ScheduleJobKey).HasMaxLength(512); + builder.Property(job => job.LeaseToken).HasMaxLength(512); // LeaseToken format: InstanceId:*:QueueKey:*:LeaseId β€” can be long + builder.Property(job => job.ScheduleJobKey).HasMaxLength(255); // JobKey domain max = 255 builder.Property(job => job.IdempotencyKey).HasMaxLength(512); builder.Property(job => job.UpdatedAt).IsRequired(); builder @@ -60,5 +60,7 @@ public void Configure(EntityTypeBuilder builder) c => c.ToArray() ) ); + builder.Property(job => job.PartitionKey).HasMaxLength(255).IsRequired(false); + builder.Property(job => job.SequenceNumber).IsRequired(false); } } diff --git a/src/Atomizer.EntityFrameworkCore/Entities/AtomizerJobEntity.cs b/src/Atomizer.EntityFrameworkCore/Entities/AtomizerJobEntity.cs index 08160d8..e00698d 100644 --- a/src/Atomizer.EntityFrameworkCore/Entities/AtomizerJobEntity.cs +++ b/src/Atomizer.EntityFrameworkCore/Entities/AtomizerJobEntity.cs @@ -53,6 +53,12 @@ public class AtomizerJobEntity /// Gets or sets the idempotency key used to deduplicate job insertions. public string? IdempotencyKey { get; set; } + /// Gets or sets the partition key grouping this job for FIFO processing, or null if unpartitioned. + public string? PartitionKey { get; set; } + + /// Gets or sets the monotonically increasing sequence number within (queue, partition key), or null if unpartitioned. + public long? SequenceNumber { get; set; } + /// Gets or sets the list of error records from previous failed attempts. public List Errors { get; set; } = new List(); } @@ -105,6 +111,8 @@ public static AtomizerJobEntity ToEntity(this AtomizerJob job) RetryIntervals = job.RetryStrategy.RetryIntervals, ScheduleJobKey = job.ScheduleJobKey?.ToString(), IdempotencyKey = job.IdempotencyKey, + PartitionKey = job.PartitionKey?.ToString(), + SequenceNumber = job.SequenceNumber, Errors = job.Errors.Select(err => err.ToEntity()).ToList(), }; } @@ -131,10 +139,15 @@ public static AtomizerJob ToAtomizerJob(this AtomizerJobEntity entity) CompletedAt = entity.CompletedAt, FailedAt = entity.FailedAt, LeaseToken = entity.LeaseToken != null ? new LeaseToken(entity.LeaseToken) : null, + // RetryStrategy.None serializes as [0ms] (length 1), so the normal round-trip for None + // is handled by the Intervals path. The length == 0 guard is a defensive fallback for + // corrupt rows with an empty RetryIntervals column; without it, Intervals([]) would throw. RetryStrategy = entity.RetryIntervals.Length == 0 ? RetryStrategy.None : RetryStrategy.Intervals(entity.RetryIntervals), ScheduleJobKey = entity.ScheduleJobKey != null ? new JobKey(entity.ScheduleJobKey) : null, IdempotencyKey = entity.IdempotencyKey, + PartitionKey = entity.PartitionKey != null ? new PartitionKey(entity.PartitionKey) : null, + SequenceNumber = entity.SequenceNumber, Errors = entity.Errors.Select(err => err.ToAtomizerJobError()).ToList(), }; } diff --git a/src/Atomizer.EntityFrameworkCore/Providers/ISqlDialect.cs b/src/Atomizer.EntityFrameworkCore/Providers/ISqlDialect.cs index 5a13e9c..0899b30 100644 --- a/src/Atomizer.EntityFrameworkCore/Providers/ISqlDialect.cs +++ b/src/Atomizer.EntityFrameworkCore/Providers/ISqlDialect.cs @@ -5,5 +5,13 @@ internal interface ISqlDialect FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize); FormattableString ReleaseLeasedJobs(LeaseToken leaseToken, DateTimeOffset now); FormattableString GetDueSchedules(DateTimeOffset now); - FormattableString UpsertScheduleAsync(AtomizerSchedule schedule, DateTimeOffset now); + FormattableString UpsertSchedule(AtomizerSchedule schedule, DateTimeOffset now); + + /// + /// Returns provider-specific SQL that inserts a partitioned job and atomically assigns + /// a monotonically increasing SequenceNumber scoped to the job's (queue, partition key). + /// + /// The partitioned job to insert. must not be null. + /// A ready for ExecuteSqlInterpolatedAsync. + FormattableString InsertJobWithSequence(AtomizerJob job); } diff --git a/src/Atomizer.EntityFrameworkCore/Providers/Sql/BaseSqlDialect.cs b/src/Atomizer.EntityFrameworkCore/Providers/Sql/BaseSqlDialect.cs new file mode 100644 index 0000000..701782b --- /dev/null +++ b/src/Atomizer.EntityFrameworkCore/Providers/Sql/BaseSqlDialect.cs @@ -0,0 +1,109 @@ +using System.Runtime.CompilerServices; +using Atomizer.EntityFrameworkCore.Entities; + +namespace Atomizer.EntityFrameworkCore.Providers.Sql; + +internal abstract class BaseSqlDialect : ISqlDialect +{ + // Job table and columns + protected readonly string _jTable; + protected readonly string _jId; + protected readonly string _jQueueKey; + protected readonly string _jPayloadType; + protected readonly string _jPayload; + protected readonly string _jScheduledAt; + protected readonly string _jVisibleAt; + protected readonly string _jStatus; + protected readonly string _jAttempts; + protected readonly string _jRetryIntervals; + protected readonly string _jCreatedAt; + protected readonly string _jUpdatedAt; + protected readonly string _jLeaseToken; + protected readonly string _jScheduleJobKey; + protected readonly string _jIdempotencyKey; + protected readonly string _jPartitionKey; + protected readonly string _jSequenceNumber; + + // Schedule table and columns + protected readonly string _sTable; + protected readonly string _sId; + protected readonly string _sJobKey; + protected readonly string _sQueueKey; + protected readonly string _sPayloadType; + protected readonly string _sPayload; + protected readonly string _sSchedule; + protected readonly string _sTimeZone; + protected readonly string _sMisfirePolicy; + protected readonly string _sMaxCatchUp; + protected readonly string _sEnabled; + protected readonly string _sRetryIntervals; + protected readonly string _sNextRunAt; + protected readonly string _sLastEnqueueAt; + protected readonly string _sCreatedAt; + protected readonly string _sUpdatedAt; + + protected readonly int _statusPending = (int)AtomizerEntityJobStatus.Pending; + protected readonly int _statusProcessing = (int)AtomizerEntityJobStatus.Processing; + + protected BaseSqlDialect(EntityMap jobs, EntityMap schedules) + { + var jc = jobs.Col; + _jTable = jobs.Table; + _jId = jc[nameof(AtomizerJobEntity.Id)]; + _jQueueKey = jc[nameof(AtomizerJobEntity.QueueKey)]; + _jPayloadType = jc[nameof(AtomizerJobEntity.PayloadType)]; + _jPayload = jc[nameof(AtomizerJobEntity.Payload)]; + _jScheduledAt = jc[nameof(AtomizerJobEntity.ScheduledAt)]; + _jVisibleAt = jc[nameof(AtomizerJobEntity.VisibleAt)]; + _jStatus = jc[nameof(AtomizerJobEntity.Status)]; + _jAttempts = jc[nameof(AtomizerJobEntity.Attempts)]; + _jRetryIntervals = jc[nameof(AtomizerJobEntity.RetryIntervals)]; + _jCreatedAt = jc[nameof(AtomizerJobEntity.CreatedAt)]; + _jUpdatedAt = jc[nameof(AtomizerJobEntity.UpdatedAt)]; + _jLeaseToken = jc[nameof(AtomizerJobEntity.LeaseToken)]; + _jScheduleJobKey = jc[nameof(AtomizerJobEntity.ScheduleJobKey)]; + _jIdempotencyKey = jc[nameof(AtomizerJobEntity.IdempotencyKey)]; + _jPartitionKey = jc[nameof(AtomizerJobEntity.PartitionKey)]; + _jSequenceNumber = jc[nameof(AtomizerJobEntity.SequenceNumber)]; + + var sc = schedules.Col; + _sTable = schedules.Table; + _sId = sc[nameof(AtomizerScheduleEntity.Id)]; + _sJobKey = sc[nameof(AtomizerScheduleEntity.JobKey)]; + _sQueueKey = sc[nameof(AtomizerScheduleEntity.QueueKey)]; + _sPayloadType = sc[nameof(AtomizerScheduleEntity.PayloadType)]; + _sPayload = sc[nameof(AtomizerScheduleEntity.Payload)]; + _sSchedule = sc[nameof(AtomizerScheduleEntity.Schedule)]; + _sTimeZone = sc[nameof(AtomizerScheduleEntity.TimeZone)]; + _sMisfirePolicy = sc[nameof(AtomizerScheduleEntity.MisfirePolicy)]; + _sMaxCatchUp = sc[nameof(AtomizerScheduleEntity.MaxCatchUp)]; + _sEnabled = sc[nameof(AtomizerScheduleEntity.Enabled)]; + _sRetryIntervals = sc[nameof(AtomizerScheduleEntity.RetryIntervals)]; + _sNextRunAt = sc[nameof(AtomizerScheduleEntity.NextRunAt)]; + _sLastEnqueueAt = sc[nameof(AtomizerScheduleEntity.LastEnqueueAt)]; + _sCreatedAt = sc[nameof(AtomizerScheduleEntity.CreatedAt)]; + _sUpdatedAt = sc[nameof(AtomizerScheduleEntity.UpdatedAt)]; + } + + protected static string SerializeIntervals(TimeSpan[] intervals) => + string.Join(";", Array.ConvertAll(intervals, ts => (long)ts.TotalMilliseconds)); + + public FormattableString ReleaseLeasedJobs(LeaseToken leaseToken, DateTimeOffset now) + { + var format = $$""" + UPDATE {{_jTable}} + SET {{_jStatus}} = {{_statusPending}}, + {{_jLeaseToken}} = NULL, + {{_jVisibleAt}} = NULL, + {{_jUpdatedAt}} = {0} + WHERE {{_jLeaseToken}} = {1} + AND {{_jStatus}} = {{_statusProcessing}}; + """; + return FormattableStringFactory.Create(format, now, leaseToken.Token); + } + + public abstract FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize); + public abstract FormattableString InsertJobWithSequence(AtomizerJob job); + public abstract FormattableString GetDueSchedules(DateTimeOffset now); + public abstract FormattableString UpsertSchedule(AtomizerSchedule schedule, DateTimeOffset now); +} diff --git a/src/Atomizer.EntityFrameworkCore/Providers/Sql/MySqlDialect.cs b/src/Atomizer.EntityFrameworkCore/Providers/Sql/MySqlDialect.cs index 36b7e06..28ee4f9 100644 --- a/src/Atomizer.EntityFrameworkCore/Providers/Sql/MySqlDialect.cs +++ b/src/Atomizer.EntityFrameworkCore/Providers/Sql/MySqlDialect.cs @@ -3,157 +3,188 @@ namespace Atomizer.EntityFrameworkCore.Providers.Sql; -internal sealed class MySqlDialect : ISqlDialect +internal sealed class MySqlDialect(EntityMap jobs, EntityMap schedules) : BaseSqlDialect(jobs, schedules) { - private readonly EntityMap _jobs; - private readonly EntityMap _schedules; - - public MySqlDialect(EntityMap jobs, EntityMap schedules) + public override FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize) { - _jobs = jobs; - _schedules = schedules; - } - - public FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize) - { - var table = _jobs.Table; - var c = _jobs.Col; - var colStatus = c[nameof(AtomizerJobEntity.Status)]; - var colQueueKey = c[nameof(AtomizerJobEntity.QueueKey)]; - var colVisibleAt = c[nameof(AtomizerJobEntity.VisibleAt)]; - var colScheduledAt = c[nameof(AtomizerJobEntity.ScheduledAt)]; - var colId = c[nameof(AtomizerJobEntity.Id)]; - var statusPending = (int)AtomizerEntityJobStatus.Pending; - var statusProcessing = (int)AtomizerEntityJobStatus.Processing; - var format = - $@"SELECT t.* -FROM {table} AS t -WHERE {colQueueKey} = {{0}} - AND ( - ( {colStatus} = {statusPending} - AND ( {colVisibleAt} IS NULL - OR {colVisibleAt} <= {{1}}) - AND {colScheduledAt} <= {{2}} - ) - OR - ( {colStatus} = {statusProcessing} - AND {colVisibleAt} <= {{3}} - ) - ) -ORDER BY {colScheduledAt}, {colId} -LIMIT {{4}} -FOR UPDATE SKIP LOCKED;"; - return FormattableStringFactory.Create(format, queueKey.Key, now, now, now, batchSize); + var format = $$""" + WITH blocked_partitions AS ( + SELECT DISTINCT {{_jPartitionKey}} + FROM {{_jTable}} + WHERE {{_jQueueKey}} = {0} + AND {{_jPartitionKey}} IS NOT NULL + AND ( + {{_jStatus}} = {{_statusProcessing}} + OR ({{_jStatus}} = {{_statusPending}} AND {{_jAttempts}} > 0) + ) + ), + partition_heads AS ( + SELECT j.{{_jPartitionKey}}, MIN(j.{{_jSequenceNumber}}) AS min_seq + FROM {{_jTable}} AS j + LEFT JOIN blocked_partitions bp ON j.{{_jPartitionKey}} = bp.{{_jPartitionKey}} + WHERE j.{{_jQueueKey}} = {1} + AND j.{{_jPartitionKey}} IS NOT NULL + AND bp.{{_jPartitionKey}} IS NULL + AND ( + (j.{{_jStatus}} = {{_statusPending}} + AND (j.{{_jVisibleAt}} IS NULL OR j.{{_jVisibleAt}} <= {10}) + AND j.{{_jScheduledAt}} <= {11}) + OR (j.{{_jStatus}} = {{_statusProcessing}} AND j.{{_jVisibleAt}} <= {12}) + ) + GROUP BY j.{{_jPartitionKey}} + ) + SELECT t.* + FROM {{_jTable}} AS t + LEFT JOIN partition_heads ph + ON t.{{_jPartitionKey}} = ph.{{_jPartitionKey}} + AND t.{{_jSequenceNumber}} = ph.min_seq + WHERE t.{{_jQueueKey}} = {2} + AND ( + (t.{{_jPartitionKey}} IS NULL + AND ( + (t.{{_jStatus}} = {{_statusPending}} + AND (t.{{_jVisibleAt}} IS NULL OR t.{{_jVisibleAt}} <= {3}) + AND t.{{_jScheduledAt}} <= {4}) + OR (t.{{_jStatus}} = {{_statusProcessing}} AND t.{{_jVisibleAt}} <= {5}) + ) + ) + OR + (t.{{_jPartitionKey}} IS NOT NULL AND ph.min_seq IS NOT NULL + AND ( + (t.{{_jStatus}} = {{_statusPending}} + AND (t.{{_jVisibleAt}} IS NULL OR t.{{_jVisibleAt}} <= {6}) + AND t.{{_jScheduledAt}} <= {7}) + OR (t.{{_jStatus}} = {{_statusProcessing}} AND t.{{_jVisibleAt}} <= {8}) + ) + ) + ) + ORDER BY t.{{_jScheduledAt}}, t.{{_jId}} + LIMIT {9} + FOR UPDATE SKIP LOCKED; + """; + return FormattableStringFactory.Create( + format, + queueKey.Key, // {0} blocked_partitions queue filter + queueKey.Key, // {1} partition_heads queue filter + queueKey.Key, // {2} outer SELECT queue filter + now, // {3} unpartitioned VisibleAt + now, // {4} unpartitioned ScheduledAt + now, // {5} unpartitioned Processing VisibleAt + now, // {6} partitioned VisibleAt + now, // {7} partitioned ScheduledAt + now, // {8} partitioned Processing VisibleAt + batchSize, // {9} LIMIT + now, // {10} partition_heads VisibleAt + now, // {11} partition_heads ScheduledAt + now // {12} partition_heads Processing VisibleAt + ); } - public FormattableString ReleaseLeasedJobs(LeaseToken leaseToken, DateTimeOffset now) + public override FormattableString InsertJobWithSequence(AtomizerJob job) { - var table = _jobs.Table; - var c = _jobs.Col; - var colStatus = c[nameof(AtomizerJobEntity.Status)]; - var colLeaseToken = c[nameof(AtomizerJobEntity.LeaseToken)]; - var colVisibleAt = c[nameof(AtomizerJobEntity.VisibleAt)]; - var colUpdatedAt = c[nameof(AtomizerJobEntity.UpdatedAt)]; - var statusPending = (int)AtomizerEntityJobStatus.Pending; - var statusProcessing = (int)AtomizerEntityJobStatus.Processing; - var format = - $@"UPDATE {table} -SET {colStatus} = {statusPending}, - {colLeaseToken} = NULL, - {colVisibleAt} = NULL, - {colUpdatedAt} = {{0}} -WHERE {colLeaseToken} = {{1}} - AND {colStatus} = {statusProcessing};"; - return FormattableStringFactory.Create(format, now, leaseToken.Token); + var entity = job.ToEntity(); + var retryIntervals = SerializeIntervals(entity.RetryIntervals); + var format = $$""" + INSERT INTO {{_jTable}} ( + {{_jId}}, {{_jQueueKey}}, {{_jPayloadType}}, {{_jPayload}}, + {{_jScheduledAt}}, {{_jVisibleAt}}, {{_jStatus}}, {{_jAttempts}}, + {{_jRetryIntervals}}, {{_jCreatedAt}}, {{_jUpdatedAt}}, + {{_jLeaseToken}}, {{_jScheduleJobKey}}, {{_jIdempotencyKey}}, + {{_jPartitionKey}}, {{_jSequenceNumber}} + ) + SELECT {0}, {1}, {2}, {3}, + {4}, {5}, {6}, {7}, + {8}, {9}, {10}, + {11}, {12}, {13}, + {14}, + COALESCE((SELECT MAX(max_seq) FROM (SELECT MAX({{_jSequenceNumber}}) AS max_seq FROM {{_jTable}} WHERE {{_jQueueKey}} = {15} AND {{_jPartitionKey}} = {16} FOR UPDATE) AS sub), 0) + 1; + """; + return FormattableStringFactory.Create( + format, + entity.Id, + entity.QueueKey, + entity.PayloadType, + entity.Payload, + entity.ScheduledAt, + entity.VisibleAt, + (int)entity.Status, + entity.Attempts, + retryIntervals, + entity.CreatedAt, + entity.UpdatedAt, + entity.LeaseToken, + entity.ScheduleJobKey, + entity.IdempotencyKey, + entity.PartitionKey, + entity.QueueKey, + entity.PartitionKey + ); } - public FormattableString GetDueSchedules(DateTimeOffset now) + public override FormattableString GetDueSchedules(DateTimeOffset now) { - var table = _schedules.Table; - var c = _schedules.Col; - var colEnabled = c[nameof(AtomizerScheduleEntity.Enabled)]; - var colNextRunAt = c[nameof(AtomizerScheduleEntity.NextRunAt)]; - var colId = c[nameof(AtomizerScheduleEntity.Id)]; - var format = - $@"SELECT t.* -FROM {table} AS t -WHERE {colNextRunAt} <= {{0}} - AND {colEnabled} = TRUE -ORDER BY {colNextRunAt}, {colId} -FOR UPDATE SKIP LOCKED;"; + var format = $$""" + SELECT t.* + FROM {{_sTable}} AS t + WHERE {{_sNextRunAt}} <= {0} + AND {{_sEnabled}} = TRUE + ORDER BY {{_sNextRunAt}}, {{_sId}} + FOR UPDATE SKIP LOCKED; + """; return FormattableStringFactory.Create(format, now); } - public FormattableString UpsertScheduleAsync(AtomizerSchedule schedule, DateTimeOffset now) + public override FormattableString UpsertSchedule(AtomizerSchedule schedule, DateTimeOffset now) { var entity = schedule.ToEntity(); - var table = _schedules.Table; - var c = _schedules.Col; - var colId = c[nameof(AtomizerScheduleEntity.Id)]; - var colJobKey = c[nameof(AtomizerScheduleEntity.JobKey)]; - var colQueueKey = c[nameof(AtomizerScheduleEntity.QueueKey)]; - var colPayloadType = c[nameof(AtomizerScheduleEntity.PayloadType)]; - var colPayload = c[nameof(AtomizerScheduleEntity.Payload)]; - var colSchedule = c[nameof(AtomizerScheduleEntity.Schedule)]; - var colTimeZone = c[nameof(AtomizerScheduleEntity.TimeZone)]; - var colMisfirePolicy = c[nameof(AtomizerScheduleEntity.MisfirePolicy)]; - var colMaxCatchUp = c[nameof(AtomizerScheduleEntity.MaxCatchUp)]; - var colEnabled = c[nameof(AtomizerScheduleEntity.Enabled)]; - var colRetryIntervals = c[nameof(AtomizerScheduleEntity.RetryIntervals)]; - var colNextRunAt = c[nameof(AtomizerScheduleEntity.NextRunAt)]; - var colLastEnqueueAt = c[nameof(AtomizerScheduleEntity.LastEnqueueAt)]; - var colCreatedAt = c[nameof(AtomizerScheduleEntity.CreatedAt)]; - var colUpdatedAt = c[nameof(AtomizerScheduleEntity.UpdatedAt)]; - var retryIntervals = string.Join( - ";", - Array.ConvertAll(entity.RetryIntervals, ts => (long)ts.TotalMilliseconds) - ); - var format = - $@"INSERT INTO {table} ( - {colId}, - {colJobKey}, - {colQueueKey}, - {colPayloadType}, - {colPayload}, - {colSchedule}, - {colTimeZone}, - {colMisfirePolicy}, - {colMaxCatchUp}, - {colEnabled}, - {colRetryIntervals}, - {colNextRunAt}, - {colLastEnqueueAt}, - {colCreatedAt}, - {colUpdatedAt} -) VALUES ( - {{0}}, - {{1}}, - {{2}}, - {{3}}, - {{4}}, - {{5}}, - {{6}}, - {{7}}, - {{8}}, - {{9}}, - {{10}}, - {{11}}, - {{12}}, - {{13}}, - {{14}} -) -ON DUPLICATE KEY UPDATE - {colQueueKey} = VALUES({colQueueKey}), - {colPayloadType} = VALUES({colPayloadType}), - {colPayload} = VALUES({colPayload}), - {colSchedule} = VALUES({colSchedule}), - {colTimeZone} = VALUES({colTimeZone}), - {colMisfirePolicy} = VALUES({colMisfirePolicy}), - {colMaxCatchUp} = VALUES({colMaxCatchUp}), - {colEnabled} = VALUES({colEnabled}), - {colRetryIntervals} = VALUES({colRetryIntervals}), - {colNextRunAt} = VALUES({colNextRunAt}), - {colUpdatedAt} = {{14}};"; + var retryIntervals = SerializeIntervals(entity.RetryIntervals); + var format = $$""" + INSERT INTO {{_sTable}} ( + {{_sId}}, + {{_sJobKey}}, + {{_sQueueKey}}, + {{_sPayloadType}}, + {{_sPayload}}, + {{_sSchedule}}, + {{_sTimeZone}}, + {{_sMisfirePolicy}}, + {{_sMaxCatchUp}}, + {{_sEnabled}}, + {{_sRetryIntervals}}, + {{_sNextRunAt}}, + {{_sLastEnqueueAt}}, + {{_sCreatedAt}}, + {{_sUpdatedAt}} + ) VALUES ( + {0}, + {1}, + {2}, + {3}, + {4}, + {5}, + {6}, + {7}, + {8}, + {9}, + {10}, + {11}, + {12}, + {13}, + {14} + ) + ON DUPLICATE KEY UPDATE + {{_sQueueKey}} = VALUES({{_sQueueKey}}), + {{_sPayloadType}} = VALUES({{_sPayloadType}}), + {{_sPayload}} = VALUES({{_sPayload}}), + {{_sSchedule}} = VALUES({{_sSchedule}}), + {{_sTimeZone}} = VALUES({{_sTimeZone}}), + {{_sMisfirePolicy}} = VALUES({{_sMisfirePolicy}}), + {{_sMaxCatchUp}} = VALUES({{_sMaxCatchUp}}), + {{_sEnabled}} = VALUES({{_sEnabled}}), + {{_sRetryIntervals}} = VALUES({{_sRetryIntervals}}), + {{_sNextRunAt}} = VALUES({{_sNextRunAt}}), + {{_sUpdatedAt}} = {14}; + """; return FormattableStringFactory.Create( format, entity.Id, diff --git a/src/Atomizer.EntityFrameworkCore/Providers/Sql/PostgreSqlDialect.cs b/src/Atomizer.EntityFrameworkCore/Providers/Sql/PostgreSqlDialect.cs index c1d66a2..ab7062f 100644 --- a/src/Atomizer.EntityFrameworkCore/Providers/Sql/PostgreSqlDialect.cs +++ b/src/Atomizer.EntityFrameworkCore/Providers/Sql/PostgreSqlDialect.cs @@ -3,157 +3,187 @@ namespace Atomizer.EntityFrameworkCore.Providers.Sql; -internal sealed class PostgreSqlDialect : ISqlDialect +internal sealed class PostgreSqlDialect(EntityMap jobs, EntityMap schedules) : BaseSqlDialect(jobs, schedules) { - private readonly EntityMap _jobs; - private readonly EntityMap _schedules; - - public PostgreSqlDialect(EntityMap jobs, EntityMap schedules) + public override FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize) { - _jobs = jobs; - _schedules = schedules; - } - - public FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize) - { - var table = _jobs.Table; - var c = _jobs.Col; - var colStatus = c[nameof(AtomizerJobEntity.Status)]; - var colQueueKey = c[nameof(AtomizerJobEntity.QueueKey)]; - var colVisibleAt = c[nameof(AtomizerJobEntity.VisibleAt)]; - var colScheduledAt = c[nameof(AtomizerJobEntity.ScheduledAt)]; - var colId = c[nameof(AtomizerJobEntity.Id)]; - var statusPending = (int)AtomizerEntityJobStatus.Pending; - var statusProcessing = (int)AtomizerEntityJobStatus.Processing; - var format = - $@"SELECT t.* -FROM {table} AS t -WHERE {colQueueKey} = {{0}} - AND ( - ( {colStatus} = {statusPending} - AND ( {colVisibleAt} IS NULL - OR {colVisibleAt} <= {{1}}) - AND {colScheduledAt} <= {{2}} - ) - OR - ( {colStatus} = {statusProcessing} - AND {colVisibleAt} <= {{3}} - ) - ) -ORDER BY {colScheduledAt}, {colId} -LIMIT {{4}} -FOR NO KEY UPDATE SKIP LOCKED;"; - return FormattableStringFactory.Create(format, queueKey.Key, now, now, now, batchSize); + var format = $$""" + WITH blocked_partitions AS ( + SELECT DISTINCT {{_jPartitionKey}} + FROM {{_jTable}} + WHERE {{_jQueueKey}} = {0} + AND {{_jPartitionKey}} IS NOT NULL + AND ( + {{_jStatus}} = {{_statusProcessing}} + OR ({{_jStatus}} = {{_statusPending}} AND {{_jAttempts}} > 0) + ) + ), + partition_heads AS ( + SELECT {{_jPartitionKey}}, MIN({{_jSequenceNumber}}) AS min_seq + FROM {{_jTable}} + WHERE {{_jQueueKey}} = {1} + AND {{_jPartitionKey}} IS NOT NULL + AND {{_jPartitionKey}} NOT IN (SELECT {{_jPartitionKey}} FROM blocked_partitions) + AND ( + ({{_jStatus}} = {{_statusPending}} + AND ({{_jVisibleAt}} IS NULL OR {{_jVisibleAt}} <= {10}) + AND {{_jScheduledAt}} <= {11}) + OR ({{_jStatus}} = {{_statusProcessing}} AND {{_jVisibleAt}} <= {12}) + ) + GROUP BY {{_jPartitionKey}} + ) + SELECT t.* + FROM {{_jTable}} AS t + LEFT JOIN partition_heads ph + ON t.{{_jPartitionKey}} = ph.{{_jPartitionKey}} + AND t.{{_jSequenceNumber}} = ph.min_seq + WHERE t.{{_jQueueKey}} = {2} + AND ( + (t.{{_jPartitionKey}} IS NULL + AND ( + (t.{{_jStatus}} = {{_statusPending}} + AND (t.{{_jVisibleAt}} IS NULL OR t.{{_jVisibleAt}} <= {3}) + AND t.{{_jScheduledAt}} <= {4}) + OR (t.{{_jStatus}} = {{_statusProcessing}} AND t.{{_jVisibleAt}} <= {5}) + ) + ) + OR + (t.{{_jPartitionKey}} IS NOT NULL AND ph.min_seq IS NOT NULL + AND ( + (t.{{_jStatus}} = {{_statusPending}} + AND (t.{{_jVisibleAt}} IS NULL OR t.{{_jVisibleAt}} <= {6}) + AND t.{{_jScheduledAt}} <= {7}) + OR (t.{{_jStatus}} = {{_statusProcessing}} AND t.{{_jVisibleAt}} <= {8}) + ) + ) + ) + ORDER BY t.{{_jScheduledAt}}, t.{{_jId}} + LIMIT {9} + FOR NO KEY UPDATE SKIP LOCKED; + """; + return FormattableStringFactory.Create( + format, + queueKey.Key, // {0} blocked_partitions queue filter + queueKey.Key, // {1} partition_heads queue filter + queueKey.Key, // {2} outer SELECT queue filter + now, // {3} unpartitioned VisibleAt + now, // {4} unpartitioned ScheduledAt + now, // {5} unpartitioned Processing VisibleAt + now, // {6} partitioned VisibleAt + now, // {7} partitioned ScheduledAt + now, // {8} partitioned Processing VisibleAt + batchSize, // {9} LIMIT + now, // {10} partition_heads VisibleAt + now, // {11} partition_heads ScheduledAt + now // {12} partition_heads Processing VisibleAt + ); } - public FormattableString ReleaseLeasedJobs(LeaseToken leaseToken, DateTimeOffset now) + public override FormattableString InsertJobWithSequence(AtomizerJob job) { - var table = _jobs.Table; - var c = _jobs.Col; - var colStatus = c[nameof(AtomizerJobEntity.Status)]; - var colLeaseToken = c[nameof(AtomizerJobEntity.LeaseToken)]; - var colVisibleAt = c[nameof(AtomizerJobEntity.VisibleAt)]; - var colUpdatedAt = c[nameof(AtomizerJobEntity.UpdatedAt)]; - var statusPending = (int)AtomizerEntityJobStatus.Pending; - var statusProcessing = (int)AtomizerEntityJobStatus.Processing; - var format = - $@"UPDATE {table} -SET {colStatus} = {statusPending}, - {colLeaseToken} = NULL, - {colVisibleAt} = NULL, - {colUpdatedAt} = {{0}} -WHERE {colLeaseToken} = {{1}} - AND {colStatus} = {statusProcessing};"; - return FormattableStringFactory.Create(format, now, leaseToken.Token); + var entity = job.ToEntity(); + var retryIntervals = SerializeIntervals(entity.RetryIntervals); + var format = $$""" + INSERT INTO {{_jTable}} ( + {{_jId}}, {{_jQueueKey}}, {{_jPayloadType}}, {{_jPayload}}, + {{_jScheduledAt}}, {{_jVisibleAt}}, {{_jStatus}}, {{_jAttempts}}, + {{_jRetryIntervals}}, {{_jCreatedAt}}, {{_jUpdatedAt}}, + {{_jLeaseToken}}, {{_jScheduleJobKey}}, {{_jIdempotencyKey}}, + {{_jPartitionKey}}, {{_jSequenceNumber}} + ) + SELECT {0}, {1}, {2}, {3}, + {4}, {5}, {6}, {7}, + {8}, {9}, {10}, + {11}, {12}, {13}, + {14}, + COALESCE((SELECT MAX({{_jSequenceNumber}}) FROM (SELECT {{_jSequenceNumber}} FROM {{_jTable}} WHERE {{_jQueueKey}} = {15} AND {{_jPartitionKey}} = {16} FOR NO KEY UPDATE) AS sub), 0) + 1; + """; + return FormattableStringFactory.Create( + format, + entity.Id, + entity.QueueKey, + entity.PayloadType, + entity.Payload, + entity.ScheduledAt, + entity.VisibleAt, + (int)entity.Status, + entity.Attempts, + retryIntervals, + entity.CreatedAt, + entity.UpdatedAt, + entity.LeaseToken, + entity.ScheduleJobKey, + entity.IdempotencyKey, + entity.PartitionKey, + entity.QueueKey, + entity.PartitionKey + ); } - public FormattableString GetDueSchedules(DateTimeOffset now) + public override FormattableString GetDueSchedules(DateTimeOffset now) { - var table = _schedules.Table; - var c = _schedules.Col; - var colEnabled = c[nameof(AtomizerScheduleEntity.Enabled)]; - var colNextRunAt = c[nameof(AtomizerScheduleEntity.NextRunAt)]; - var colId = c[nameof(AtomizerScheduleEntity.Id)]; - var format = - $@"SELECT t.* -FROM {table} AS t -WHERE {colEnabled} = TRUE - AND {colNextRunAt} <= {{0}} -ORDER BY {colNextRunAt}, {colId} -FOR NO KEY UPDATE SKIP LOCKED;"; + var format = $$""" + SELECT t.* + FROM {{_sTable}} AS t + WHERE {{_sEnabled}} = TRUE + AND {{_sNextRunAt}} <= {0} + ORDER BY {{_sNextRunAt}}, {{_sId}} + FOR NO KEY UPDATE SKIP LOCKED; + """; return FormattableStringFactory.Create(format, now); } - public FormattableString UpsertScheduleAsync(AtomizerSchedule schedule, DateTimeOffset now) + public override FormattableString UpsertSchedule(AtomizerSchedule schedule, DateTimeOffset now) { var entity = schedule.ToEntity(); - var table = _schedules.Table; - var c = _schedules.Col; - var colId = c[nameof(AtomizerScheduleEntity.Id)]; - var colJobKey = c[nameof(AtomizerScheduleEntity.JobKey)]; - var colQueueKey = c[nameof(AtomizerScheduleEntity.QueueKey)]; - var colPayloadType = c[nameof(AtomizerScheduleEntity.PayloadType)]; - var colPayload = c[nameof(AtomizerScheduleEntity.Payload)]; - var colSchedule = c[nameof(AtomizerScheduleEntity.Schedule)]; - var colTimeZone = c[nameof(AtomizerScheduleEntity.TimeZone)]; - var colMisfirePolicy = c[nameof(AtomizerScheduleEntity.MisfirePolicy)]; - var colMaxCatchUp = c[nameof(AtomizerScheduleEntity.MaxCatchUp)]; - var colEnabled = c[nameof(AtomizerScheduleEntity.Enabled)]; - var colRetryIntervals = c[nameof(AtomizerScheduleEntity.RetryIntervals)]; - var colNextRunAt = c[nameof(AtomizerScheduleEntity.NextRunAt)]; - var colLastEnqueueAt = c[nameof(AtomizerScheduleEntity.LastEnqueueAt)]; - var colCreatedAt = c[nameof(AtomizerScheduleEntity.CreatedAt)]; - var colUpdatedAt = c[nameof(AtomizerScheduleEntity.UpdatedAt)]; - var retryIntervals = string.Join( - ";", - Array.ConvertAll(entity.RetryIntervals, ts => (long)ts.TotalMilliseconds) - ); - var format = - $@"INSERT INTO {table} ( - {colId}, - {colJobKey}, - {colQueueKey}, - {colPayloadType}, - {colPayload}, - {colSchedule}, - {colTimeZone}, - {colMisfirePolicy}, - {colMaxCatchUp}, - {colEnabled}, - {colRetryIntervals}, - {colNextRunAt}, - {colLastEnqueueAt}, - {colCreatedAt}, - {colUpdatedAt} -) VALUES ( - {{0}}, - {{1}}, - {{2}}, - {{3}}, - {{4}}, - {{5}}, - {{6}}, - {{7}}, - {{8}}, - {{9}}, - {{10}}, - {{11}}, - {{12}}, - {{13}}, - {{14}} -) -ON CONFLICT ({colJobKey}) DO UPDATE SET - {colQueueKey} = EXCLUDED.{colQueueKey}, - {colPayloadType} = EXCLUDED.{colPayloadType}, - {colPayload} = EXCLUDED.{colPayload}, - {colSchedule} = EXCLUDED.{colSchedule}, - {colTimeZone} = EXCLUDED.{colTimeZone}, - {colMisfirePolicy} = EXCLUDED.{colMisfirePolicy}, - {colMaxCatchUp} = EXCLUDED.{colMaxCatchUp}, - {colEnabled} = EXCLUDED.{colEnabled}, - {colRetryIntervals} = EXCLUDED.{colRetryIntervals}, - {colNextRunAt} = EXCLUDED.{colNextRunAt}, - {colUpdatedAt} = EXCLUDED.{colUpdatedAt};"; + var retryIntervals = SerializeIntervals(entity.RetryIntervals); + var format = $$""" + INSERT INTO {{_sTable}} ( + {{_sId}}, + {{_sJobKey}}, + {{_sQueueKey}}, + {{_sPayloadType}}, + {{_sPayload}}, + {{_sSchedule}}, + {{_sTimeZone}}, + {{_sMisfirePolicy}}, + {{_sMaxCatchUp}}, + {{_sEnabled}}, + {{_sRetryIntervals}}, + {{_sNextRunAt}}, + {{_sLastEnqueueAt}}, + {{_sCreatedAt}}, + {{_sUpdatedAt}} + ) VALUES ( + {0}, + {1}, + {2}, + {3}, + {4}, + {5}, + {6}, + {7}, + {8}, + {9}, + {10}, + {11}, + {12}, + {13}, + {14} + ) + ON CONFLICT ({{_sJobKey}}) DO UPDATE SET + {{_sQueueKey}} = EXCLUDED.{{_sQueueKey}}, + {{_sPayloadType}} = EXCLUDED.{{_sPayloadType}}, + {{_sPayload}} = EXCLUDED.{{_sPayload}}, + {{_sSchedule}} = EXCLUDED.{{_sSchedule}}, + {{_sTimeZone}} = EXCLUDED.{{_sTimeZone}}, + {{_sMisfirePolicy}} = EXCLUDED.{{_sMisfirePolicy}}, + {{_sMaxCatchUp}} = EXCLUDED.{{_sMaxCatchUp}}, + {{_sEnabled}} = EXCLUDED.{{_sEnabled}}, + {{_sRetryIntervals}} = EXCLUDED.{{_sRetryIntervals}}, + {{_sNextRunAt}} = EXCLUDED.{{_sNextRunAt}}, + {{_sUpdatedAt}} = EXCLUDED.{{_sUpdatedAt}}; + """; return FormattableStringFactory.Create( format, entity.Id, diff --git a/src/Atomizer.EntityFrameworkCore/Providers/Sql/SqlServerDialect.cs b/src/Atomizer.EntityFrameworkCore/Providers/Sql/SqlServerDialect.cs index 883e61e..b0ee014 100644 --- a/src/Atomizer.EntityFrameworkCore/Providers/Sql/SqlServerDialect.cs +++ b/src/Atomizer.EntityFrameworkCore/Providers/Sql/SqlServerDialect.cs @@ -3,157 +3,186 @@ namespace Atomizer.EntityFrameworkCore.Providers.Sql; -internal sealed class SqlServerDialect : ISqlDialect +internal sealed class SqlServerDialect(EntityMap jobs, EntityMap schedules) : BaseSqlDialect(jobs, schedules) { - private readonly EntityMap _jobs; - private readonly EntityMap _schedules; - - public SqlServerDialect(EntityMap jobs, EntityMap schedules) - { - _jobs = jobs; - _schedules = schedules; - } - - public FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize) + public override FormattableString GetDueJobs(QueueKey queueKey, DateTimeOffset now, int batchSize) { - var table = _jobs.Table; - var c = _jobs.Col; - var colStatus = c[nameof(AtomizerJobEntity.Status)]; - var colQueueKey = c[nameof(AtomizerJobEntity.QueueKey)]; - var colVisibleAt = c[nameof(AtomizerJobEntity.VisibleAt)]; - var colScheduledAt = c[nameof(AtomizerJobEntity.ScheduledAt)]; - var colId = c[nameof(AtomizerJobEntity.Id)]; - var statusPending = (int)AtomizerEntityJobStatus.Pending; - var statusProcessing = (int)AtomizerEntityJobStatus.Processing; - var format = - $@"SELECT TOP({batchSize}) t.* -FROM {table} AS t WITH (UPDLOCK, READPAST, ROWLOCK) -WHERE {colQueueKey} = {{0}} - AND ( - ( {colStatus} = {statusPending} - AND ( {colVisibleAt} IS NULL - OR {colVisibleAt} <= {{1}}) - AND {colScheduledAt} <= {{2}} - ) - OR - ( {colStatus} = {statusProcessing} - AND {colVisibleAt} <= {{3}} - ) - ) -ORDER BY {colScheduledAt}, {colId};"; - return FormattableStringFactory.Create(format, queueKey.Key, now, now, now); + var format = $$""" + WITH blocked_partitions AS ( + SELECT DISTINCT {{_jPartitionKey}} + FROM {{_jTable}} + WHERE {{_jQueueKey}} = {0} + AND {{_jPartitionKey}} IS NOT NULL + AND ( + {{_jStatus}} = {{_statusProcessing}} + OR ({{_jStatus}} = {{_statusPending}} AND {{_jAttempts}} > 0) + ) + ), + partition_heads AS ( + SELECT {{_jPartitionKey}}, MIN({{_jSequenceNumber}}) AS min_seq + FROM {{_jTable}} + WHERE {{_jQueueKey}} = {1} + AND {{_jPartitionKey}} IS NOT NULL + AND {{_jPartitionKey}} NOT IN (SELECT {{_jPartitionKey}} FROM blocked_partitions) + AND ( + ({{_jStatus}} = {{_statusPending}} + AND ({{_jVisibleAt}} IS NULL OR {{_jVisibleAt}} <= {9}) + AND {{_jScheduledAt}} <= {10}) + OR ({{_jStatus}} = {{_statusProcessing}} AND {{_jVisibleAt}} <= {11}) + ) + GROUP BY {{_jPartitionKey}} + ) + SELECT TOP({{batchSize}}) t.* + FROM {{_jTable}} AS t WITH (UPDLOCK, READPAST, ROWLOCK) + LEFT JOIN partition_heads ph + ON t.{{_jPartitionKey}} = ph.{{_jPartitionKey}} + AND t.{{_jSequenceNumber}} = ph.min_seq + WHERE t.{{_jQueueKey}} = {2} + AND ( + (t.{{_jPartitionKey}} IS NULL + AND ( + (t.{{_jStatus}} = {{_statusPending}} + AND (t.{{_jVisibleAt}} IS NULL OR t.{{_jVisibleAt}} <= {3}) + AND t.{{_jScheduledAt}} <= {4}) + OR (t.{{_jStatus}} = {{_statusProcessing}} AND t.{{_jVisibleAt}} <= {5}) + ) + ) + OR + (t.{{_jPartitionKey}} IS NOT NULL AND ph.min_seq IS NOT NULL + AND ( + (t.{{_jStatus}} = {{_statusPending}} + AND (t.{{_jVisibleAt}} IS NULL OR t.{{_jVisibleAt}} <= {6}) + AND t.{{_jScheduledAt}} <= {7}) + OR (t.{{_jStatus}} = {{_statusProcessing}} AND t.{{_jVisibleAt}} <= {8}) + ) + ) + ) + ORDER BY t.{{_jScheduledAt}}, t.{{_jId}}; + """; + return FormattableStringFactory.Create( + format, + queueKey.Key, // {0} blocked_partitions queue filter + queueKey.Key, // {1} partition_heads queue filter + queueKey.Key, // {2} outer SELECT queue filter + now, // {3} unpartitioned VisibleAt + now, // {4} unpartitioned ScheduledAt + now, // {5} unpartitioned Processing VisibleAt + now, // {6} partitioned VisibleAt + now, // {7} partitioned ScheduledAt + now, // {8} partitioned Processing VisibleAt + now, // {9} partition_heads VisibleAt (batchSize is TOP(batchSize) inlined, not a placeholder) + now, // {10} partition_heads ScheduledAt + now // {11} partition_heads Processing VisibleAt + ); } - public FormattableString ReleaseLeasedJobs(LeaseToken leaseToken, DateTimeOffset now) + public override FormattableString InsertJobWithSequence(AtomizerJob job) { - var table = _jobs.Table; - var c = _jobs.Col; - var colStatus = c[nameof(AtomizerJobEntity.Status)]; - var colLeaseToken = c[nameof(AtomizerJobEntity.LeaseToken)]; - var colVisibleAt = c[nameof(AtomizerJobEntity.VisibleAt)]; - var colUpdatedAt = c[nameof(AtomizerJobEntity.UpdatedAt)]; - var statusPending = (int)AtomizerEntityJobStatus.Pending; - var statusProcessing = (int)AtomizerEntityJobStatus.Processing; - var format = - $@"UPDATE {table} -SET {colStatus} = {statusPending}, - {colLeaseToken} = NULL, - {colVisibleAt} = NULL, - {colUpdatedAt} = {{0}} -WHERE {colLeaseToken} = {{1}} - AND {colStatus} = {statusProcessing};"; - return FormattableStringFactory.Create(format, now, leaseToken.Token); + var entity = job.ToEntity(); + var retryIntervals = SerializeIntervals(entity.RetryIntervals); + var format = $$""" + INSERT INTO {{_jTable}} ( + {{_jId}}, {{_jQueueKey}}, {{_jPayloadType}}, {{_jPayload}}, + {{_jScheduledAt}}, {{_jVisibleAt}}, {{_jStatus}}, {{_jAttempts}}, + {{_jRetryIntervals}}, {{_jCreatedAt}}, {{_jUpdatedAt}}, + {{_jLeaseToken}}, {{_jScheduleJobKey}}, {{_jIdempotencyKey}}, + {{_jPartitionKey}}, {{_jSequenceNumber}} + ) + SELECT {0}, {1}, {2}, {3}, + {4}, {5}, {6}, {7}, + {8}, {9}, {10}, + {11}, {12}, {13}, + {14}, + COALESCE((SELECT MAX({{_jSequenceNumber}}) FROM {{_jTable}} WITH (UPDLOCK, HOLDLOCK) WHERE {{_jQueueKey}} = {15} AND {{_jPartitionKey}} = {16}), 0) + 1; + """; + return FormattableStringFactory.Create( + format, + entity.Id, + entity.QueueKey, + entity.PayloadType, + entity.Payload, + entity.ScheduledAt, + entity.VisibleAt, + (int)entity.Status, + entity.Attempts, + retryIntervals, + entity.CreatedAt, + entity.UpdatedAt, + entity.LeaseToken, + entity.ScheduleJobKey, + entity.IdempotencyKey, + entity.PartitionKey, + entity.QueueKey, + entity.PartitionKey + ); } - public FormattableString GetDueSchedules(DateTimeOffset now) + public override FormattableString GetDueSchedules(DateTimeOffset now) { - var table = _schedules.Table; - var c = _schedules.Col; - var colEnabled = c[nameof(AtomizerScheduleEntity.Enabled)]; - var colNextRunAt = c[nameof(AtomizerScheduleEntity.NextRunAt)]; - var colId = c[nameof(AtomizerScheduleEntity.Id)]; - var format = - $@"SELECT t.* -FROM {table} AS t WITH (UPDLOCK, READPAST, ROWLOCK) -WHERE {colNextRunAt} <= {{0}} - AND {colEnabled} = 1 -ORDER BY {colNextRunAt}, {colId};"; + var format = $$""" + SELECT t.* + FROM {{_sTable}} AS t WITH (UPDLOCK, READPAST, ROWLOCK) + WHERE {{_sNextRunAt}} <= {0} + AND {{_sEnabled}} = 1 + ORDER BY {{_sNextRunAt}}, {{_sId}}; + """; return FormattableStringFactory.Create(format, now); } - public FormattableString UpsertScheduleAsync(AtomizerSchedule schedule, DateTimeOffset now) + public override FormattableString UpsertSchedule(AtomizerSchedule schedule, DateTimeOffset now) { var entity = schedule.ToEntity(); - var table = _schedules.Table; - var c = _schedules.Col; - var colId = c[nameof(AtomizerScheduleEntity.Id)]; - var colJobKey = c[nameof(AtomizerScheduleEntity.JobKey)]; - var colQueueKey = c[nameof(AtomizerScheduleEntity.QueueKey)]; - var colPayloadType = c[nameof(AtomizerScheduleEntity.PayloadType)]; - var colPayload = c[nameof(AtomizerScheduleEntity.Payload)]; - var colSchedule = c[nameof(AtomizerScheduleEntity.Schedule)]; - var colTimeZone = c[nameof(AtomizerScheduleEntity.TimeZone)]; - var colMisfirePolicy = c[nameof(AtomizerScheduleEntity.MisfirePolicy)]; - var colMaxCatchUp = c[nameof(AtomizerScheduleEntity.MaxCatchUp)]; - var colEnabled = c[nameof(AtomizerScheduleEntity.Enabled)]; - var colRetryIntervals = c[nameof(AtomizerScheduleEntity.RetryIntervals)]; - var colNextRunAt = c[nameof(AtomizerScheduleEntity.NextRunAt)]; - var colLastEnqueueAt = c[nameof(AtomizerScheduleEntity.LastEnqueueAt)]; - var colCreatedAt = c[nameof(AtomizerScheduleEntity.CreatedAt)]; - var colUpdatedAt = c[nameof(AtomizerScheduleEntity.UpdatedAt)]; - var retryIntervals = string.Join( - ";", - Array.ConvertAll(entity.RetryIntervals, ts => (long)ts.TotalMilliseconds) - ); - var format = - $@"MERGE {table} WITH (HOLDLOCK) AS target -USING (SELECT {{0}}) AS src ({colJobKey}) -ON target.{colJobKey} = src.{colJobKey} -WHEN MATCHED THEN UPDATE SET - {colQueueKey} = {{1}}, - {colPayloadType} = {{2}}, - {colPayload} = {{3}}, - {colSchedule} = {{4}}, - {colTimeZone} = {{5}}, - {colMisfirePolicy} = {{6}}, - {colMaxCatchUp} = {{7}}, - {colEnabled} = {{8}}, - {colRetryIntervals} = {{9}}, - {colNextRunAt} = {{10}}, - {colUpdatedAt} = {{11}} -WHEN NOT MATCHED THEN INSERT ( - {colId}, - {colJobKey}, - {colQueueKey}, - {colPayloadType}, - {colPayload}, - {colSchedule}, - {colTimeZone}, - {colMisfirePolicy}, - {colMaxCatchUp}, - {colEnabled}, - {colRetryIntervals}, - {colNextRunAt}, - {colLastEnqueueAt}, - {colCreatedAt}, - {colUpdatedAt} -) VALUES ( - {{12}}, - {{0}}, - {{1}}, - {{2}}, - {{3}}, - {{4}}, - {{5}}, - {{6}}, - {{7}}, - {{8}}, - {{9}}, - {{10}}, - {{13}}, - {{14}}, - {{11}} -);"; + var retryIntervals = SerializeIntervals(entity.RetryIntervals); + var format = $$""" + MERGE {{_sTable}} WITH (HOLDLOCK) AS target + USING (SELECT {0}) AS src ({{_sJobKey}}) + ON target.{{_sJobKey}} = src.{{_sJobKey}} + WHEN MATCHED THEN UPDATE SET + {{_sQueueKey}} = {1}, + {{_sPayloadType}} = {2}, + {{_sPayload}} = {3}, + {{_sSchedule}} = {4}, + {{_sTimeZone}} = {5}, + {{_sMisfirePolicy}} = {6}, + {{_sMaxCatchUp}} = {7}, + {{_sEnabled}} = {8}, + {{_sRetryIntervals}} = {9}, + {{_sNextRunAt}} = {10}, + {{_sUpdatedAt}} = {11} + WHEN NOT MATCHED THEN INSERT ( + {{_sId}}, + {{_sJobKey}}, + {{_sQueueKey}}, + {{_sPayloadType}}, + {{_sPayload}}, + {{_sSchedule}}, + {{_sTimeZone}}, + {{_sMisfirePolicy}}, + {{_sMaxCatchUp}}, + {{_sEnabled}}, + {{_sRetryIntervals}}, + {{_sNextRunAt}}, + {{_sLastEnqueueAt}}, + {{_sCreatedAt}}, + {{_sUpdatedAt}} + ) VALUES ( + {12}, + {0}, + {1}, + {2}, + {3}, + {4}, + {5}, + {6}, + {7}, + {8}, + {9}, + {10}, + {13}, + {14}, + {11} + ); + """; return FormattableStringFactory.Create( format, entity.JobKey, // {0} diff --git a/src/Atomizer.EntityFrameworkCore/Storage/EntityFrameworkCoreStorage.cs b/src/Atomizer.EntityFrameworkCore/Storage/EntityFrameworkCoreStorage.cs index bbdf363..86fc0b4 100644 --- a/src/Atomizer.EntityFrameworkCore/Storage/EntityFrameworkCoreStorage.cs +++ b/src/Atomizer.EntityFrameworkCore/Storage/EntityFrameworkCoreStorage.cs @@ -38,7 +38,6 @@ public async Task InsertAsync(AtomizerJob job, CancellationToken cancellat { var entity = job.ToEntity(); - // @todo: make idempotency key unique with index var enforceIdempotency = job.IdempotencyKey != null; if (enforceIdempotency) @@ -54,10 +53,37 @@ public async Task InsertAsync(AtomizerJob job, CancellationToken cancellat job.IdempotencyKey, existing.Id ); + job.SequenceNumber = existing.SequenceNumber; return existing.Id; } } + if (job.PartitionKey != null && _providerCache is { IsSupportedProvider: true, Dialect: not null }) + { + var sql = _providerCache.Dialect.InsertJobWithSequence(job); + await _dbContext.Database.ExecuteSqlInterpolatedAsync(sql, cancellationToken); + + var assigned = await JobEntities + .Where(j => j.Id == job.Id) + .Select(j => j.SequenceNumber) + .FirstAsync(cancellationToken); + job.SequenceNumber = assigned; + return job.Id; + } + + if (job.PartitionKey != null && !_providerCache.IsSupportedProvider && _options.AllowUnsafeProviderFallback) + { + // LINQ fallback sequence assignment: not atomic under concurrency but safe for single-process use. + var partitionKeyStr = job.PartitionKey.ToString(); + var queueKeyStr = job.QueueKey.Key; + var maxSeq = await JobEntities + .AsNoTracking() + .Where(j => j.QueueKey == queueKeyStr && j.PartitionKey == partitionKeyStr) + .MaxAsync(j => j.SequenceNumber, cancellationToken); + entity.SequenceNumber = (maxSeq ?? 0L) + 1L; + job.SequenceNumber = entity.SequenceNumber; + } + JobEntities.Add(entity); await _dbContext.SaveChangesAsync(cancellationToken); return entity.Id; @@ -67,6 +93,10 @@ public async Task UpdateJobsAsync(IEnumerable jobs, CancellationTok { try { + // Clear the change tracker before attaching updated entities to avoid + // InvalidOperationException when the same entities were previously + // tracked by InsertAsync (or a prior UpdateJobsAsync call) on this context. + _dbContext.ChangeTracker.Clear(); JobEntities.UpdateRange(jobs.Select(j => j.ToEntity())); await _dbContext.SaveChangesAsync(cancellationToken); } @@ -101,28 +131,56 @@ CancellationToken cancellationToken // on the same process (or any second node) will both receive the same jobs. // AllowUnsafeProviderFallback is only safe with DegreeOfParallelism=1 and // a single process instance. It is not safe for production use. - return await JobEntities + var allForQueue = await JobEntities .AsNoTracking() + .Where(j => j.QueueKey == queueKey.Key) + .ToListAsync(cancellationToken); + + // 1) Collect blocked partitions: any partition key with a Processing job + // or a Pending job with prior attempts (retrying). + var blockedPartitions = allForQueue .Where(j => - j.QueueKey == queueKey.Key + j.PartitionKey != null && ( + j.Status == AtomizerEntityJobStatus.Processing + || (j.Status == AtomizerEntityJobStatus.Pending && j.Attempts > 0) + ) + ) + .Select(j => j.PartitionKey!) + .ToHashSet(); + + // 2) Find the lowest sequence number per unblocked partition (partition heads). + // Only consider Pending jobs that are due β€” Completed and Failed jobs must not + // block the next job from becoming the partition head. + var partitionHeads = allForQueue + .Where(j => + j.PartitionKey != null + && !blockedPartitions.Contains(j.PartitionKey) + && j.Status == AtomizerEntityJobStatus.Pending + && (j.VisibleAt == null || j.VisibleAt <= now) + && j.ScheduledAt <= now + ) + .GroupBy(j => j.PartitionKey!) + .Select(g => g.OrderBy(j => j.SequenceNumber).First().Id) + .ToHashSet(); + + // 3) Apply eligibility filter, FIFO partition-head filter, and batch size limit. + return allForQueue + .Where(j => + ( j.Status == AtomizerEntityJobStatus.Pending && (j.VisibleAt == null || j.VisibleAt <= now) && j.ScheduledAt <= now || (j.Status == AtomizerEntityJobStatus.Processing && j.VisibleAt <= now) // lease expired - ) + ) && (j.PartitionKey == null || partitionHeads.Contains(j.Id)) ) .OrderBy(j => j.ScheduledAt) .Take(batchSize) - .Select(job => job.ToAtomizerJob()) - .ToListAsync(cancellationToken); + .Select(j => j.ToAtomizerJob()) + .ToList(); } - throw new NotSupportedException( - "The current database provider is not supported. " - + "To bypass this check, set AllowUnsafeProviderFallback to true in EntityFrameworkCoreJobStorageOptions. " - + "Note that this may lead to unexpected behavior." - ); + throw UnsupportedProviderException(); } public async Task ReleaseLeasedAsync( @@ -168,7 +226,7 @@ public async Task UpsertScheduleAsync(AtomizerSchedule schedule, Cancellat if (_providerCache is { IsSupportedProvider: true, Dialect: not null }) { var now = _clock.UtcNow; - var sql = _providerCache.Dialect.UpsertScheduleAsync(schedule, now); + var sql = _providerCache.Dialect.UpsertSchedule(schedule, now); await _dbContext.Database.ExecuteSqlInterpolatedAsync(sql, cancellationToken); return await ScheduleEntities .Where(s => s.JobKey == entity.JobKey) @@ -178,7 +236,6 @@ public async Task UpsertScheduleAsync(AtomizerSchedule schedule, Cancellat if (!_providerCache.IsSupportedProvider && _options.AllowUnsafeProviderFallback) { - // Not race-safe - use only with 1 service running var existing = await ScheduleEntities .AsNoTracking() .FirstOrDefaultAsync(s => s.JobKey == entity.JobKey, cancellationToken); @@ -200,22 +257,23 @@ public async Task UpsertScheduleAsync(AtomizerSchedule schedule, Cancellat catch (DbUpdateException ex) { _logger.LogError(ex, "Failed to upsert schedule for job {JobKey}", schedule.JobKey); + throw; } return entity.Id; } - throw new NotSupportedException( - "The current database provider is not supported. " - + "To bypass this check, set AllowUnsafeProviderFallback to true in EntityFrameworkCoreJobStorageOptions. " - + "Note that this may lead to unexpected behavior." - ); + throw UnsupportedProviderException(); } public async Task UpdateSchedulesAsync(IEnumerable schedules, CancellationToken cancellationToken) { try { + // Clear the change tracker before attaching updated entities to avoid + // InvalidOperationException when the same entities were previously + // tracked by UpsertScheduleAsync (or a prior UpdateSchedulesAsync call) on this context. + _dbContext.ChangeTracker.Clear(); ScheduleEntities.UpdateRange(schedules.Select(s => s.ToEntity())); await _dbContext.SaveChangesAsync(cancellationToken); } @@ -255,11 +313,7 @@ CancellationToken cancellationToken .ToListAsync(cancellationToken); } - throw new NotSupportedException( - "The current database provider is not supported. " - + "To bypass this check, set AllowUnsafeProviderFallback to true in EntityFrameworkCoreJobStorageOptions. " - + "Note that this may lead to unexpected behavior." - ); + throw UnsupportedProviderException(); } public async Task ExecuteInLeaseAsync( @@ -316,4 +370,11 @@ CancellationToken cancellationToken throw; } } + + private static NotSupportedException UnsupportedProviderException() => + new( + "The current database provider is not supported. " + + "To bypass this check, set AllowUnsafeProviderFallback to true in EntityFrameworkCoreJobStorageOptions. " + + "Note that this may lead to unexpected behavior." + ); } diff --git a/src/Atomizer/Abstractions/IAtomizerClient.cs b/src/Atomizer/Abstractions/IAtomizerClient.cs index 0b7b658..974aa15 100644 --- a/src/Atomizer/Abstractions/IAtomizerClient.cs +++ b/src/Atomizer/Abstractions/IAtomizerClient.cs @@ -75,6 +75,15 @@ public sealed class EnqueueOptions /// Defaults to 3 attempts with 15 seconds delays /// public RetryStrategy RetryStrategy { get; set; } = RetryStrategy.Default; + + /// + /// The partition key used to enforce ordered (FIFO) processing within this queue. + /// + /// + /// When set, jobs sharing the same and queue are processed one at a time + /// in sequence-number order. Defaults to , meaning the job participates in no partition. + /// + public PartitionKey? PartitionKey { get; set; } } /// @@ -116,4 +125,14 @@ public sealed class RecurringOptions /// Defaults to true. /// public bool Enabled { get; set; } = true; + + /// + /// The partition key used to enforce ordered (FIFO) processing of recurring job occurrences. + /// + /// + /// When set, each occurrence enqueued from this schedule carries the same , + /// preventing overlapping occurrences of the same recurring job from processing concurrently. + /// Defaults to , meaning occurrences participate in no partition. + /// + public PartitionKey? PartitionKey { get; set; } } diff --git a/src/Atomizer/Abstractions/IAtomizerStorage.cs b/src/Atomizer/Abstractions/IAtomizerStorage.cs index 2f04e17..0f47769 100644 --- a/src/Atomizer/Abstractions/IAtomizerStorage.cs +++ b/src/Atomizer/Abstractions/IAtomizerStorage.cs @@ -11,6 +11,13 @@ public interface IAtomizerStorage /// The Atomizer job to be inserted. /// Cancellation token to cancel the operation. /// The unique identifier of the inserted job. + /// + /// For partitioned jobs ( is not ), implementations + /// must assign a monotonically increasing scoped to the + /// (queue, partition key) before returning. For unpartitioned jobs, + /// must remain . On an idempotency key collision, the existing job's sequence number + /// is assigned to the passed-in job. + /// Task InsertAsync(AtomizerJob job, CancellationToken cancellationToken); /// @@ -29,6 +36,15 @@ public interface IAtomizerStorage /// The maximum number of jobs to retrieve in this batch. /// Cancellation token to cancel the operation. /// A list of due Atomizer jobs. + /// + /// When partition keys are in use, this method enforces FIFO ordering: + /// + /// At most one job per (queue, partition key) is returned β€” the job with the lowest sequence number. + /// A partition is excluded entirely if any job within it is + /// or with prior attempts (Attempts > 0). + /// Jobs without a partition key are unaffected and returned normally alongside partitioned jobs. + /// + /// Task> GetDueJobsAsync( QueueKey queueKey, DateTimeOffset now, @@ -55,7 +71,7 @@ CancellationToken cancellationToken Task UpsertScheduleAsync(AtomizerSchedule schedule, CancellationToken cancellationToken); /// - /// Updates a range of existing schedules jobs in the storage. + /// Updates a range of existing Atomizer schedules in the storage. /// /// The collection of Atomizer schedules to be updated. /// Cancellation token to cancel the operation. @@ -84,6 +100,14 @@ CancellationToken cancellationToken /// /// Cancellation token to cancel the lease acquisition. /// The value returned by . + /// + /// Important: if the lease cannot be acquired (e.g. another worker already holds it), + /// the callback is not invoked and this method returns default(TResult). + /// Callers that do not need a return value should prefer the non-generic + /// + /// overload, which makes the no-op path explicit. Callers of this generic overload must + /// treat a default result as "lease not acquired β€” no work was done". + /// Task ExecuteInLeaseAsync( QueueKey queue, Func> callback, diff --git a/src/Atomizer/Core/AtomizerClient.cs b/src/Atomizer/Core/AtomizerClient.cs index 05ee77b..5044dac 100644 --- a/src/Atomizer/Core/AtomizerClient.cs +++ b/src/Atomizer/Core/AtomizerClient.cs @@ -7,7 +7,7 @@ namespace Atomizer.Core; /// Default implementation of that serializes payloads /// and delegates to the configured . /// -public class AtomizerClient : IAtomizerClient +public sealed class AtomizerClient : IAtomizerClient { private readonly IAtomizerServiceScopeFactory _serviceScopeFactory; private readonly IAtomizerJobSerializer _jobSerializer; @@ -84,7 +84,8 @@ public async Task ScheduleRecurringAsync( options.MisfirePolicy, options.MaxCatchUp, options.Enabled, - options.RetryStrategy + options.RetryStrategy, + options.PartitionKey ); using var scope = _serviceScopeFactory.CreateScope(); @@ -107,7 +108,8 @@ CancellationToken ct _clock.UtcNow, when, options.RetryStrategy, - options.IdempotencyKey + options.IdempotencyKey, + partitionKey: options.PartitionKey ); using var scope = _serviceScopeFactory.CreateScope(); @@ -116,7 +118,7 @@ CancellationToken ct _logger.LogDebug( "Enqueuing job {JobId} with payload type {PayloadType} to queue {QueueKey} at {ScheduledAt}", jobId, - job.PayloadType!.FullName, + job.PayloadType?.FullName, job.QueueKey, job.ScheduledAt ); diff --git a/src/Atomizer/Exceptions/InvalidPartitionKeyException.cs b/src/Atomizer/Exceptions/InvalidPartitionKeyException.cs new file mode 100644 index 0000000..59d4495 --- /dev/null +++ b/src/Atomizer/Exceptions/InvalidPartitionKeyException.cs @@ -0,0 +1,39 @@ +namespace Atomizer.Exceptions; + +/// +/// Thrown when a is constructed with an invalid value. +/// +public sealed class InvalidPartitionKeyException : ArgumentException +{ + /// + /// Initializes a new instance with the specified message. + /// + /// The error message. + public InvalidPartitionKeyException(string message) + : base(message) { } + + /// + /// Initializes a new instance with the specified message and inner exception. + /// + /// The error message. + /// The exception that caused this exception. + public InvalidPartitionKeyException(string message, Exception innerException) + : base(message, innerException) { } + + /// + /// Initializes a new instance with the specified message and parameter name. + /// + /// The error message. + /// The name of the parameter that caused the exception. + public InvalidPartitionKeyException(string message, string paramName) + : base(message, paramName) { } + + /// + /// Initializes a new instance with the specified message, parameter name, and inner exception. + /// + /// The error message. + /// The name of the parameter that caused the exception. + /// The exception that caused this exception. + public InvalidPartitionKeyException(string message, string paramName, Exception innerException) + : base(message, paramName, innerException) { } +} diff --git a/src/Atomizer/Models/AtomizerJob.cs b/src/Atomizer/Models/AtomizerJob.cs index 7945202..028708d 100644 --- a/src/Atomizer/Models/AtomizerJob.cs +++ b/src/Atomizer/Models/AtomizerJob.cs @@ -82,6 +82,36 @@ public class AtomizerJob : Model /// public string? IdempotencyKey { get; set; } + /// + /// Gets or sets the partition key that groups this job for ordered (FIFO) processing, + /// or if the job participates in no partition. + /// + public PartitionKey? PartitionKey { get; set; } + + /// + /// Gets or sets the monotonically increasing sequence number within the job's + /// (queue, partition key) group, or for unpartitioned jobs. + /// + /// + /// Assigned atomically by storage at insert time. A value of + /// indicates either an unpartitioned job or a job not yet inserted into storage. + /// + public long? SequenceNumber { get; set; } + + /// + /// Gets whether this job is currently holding its partition, preventing + /// later jobs in the same partition from being picked up. + /// + /// + /// A job holds its partition when it is actively , + /// or when it is with prior attempts (retrying). + /// Jobs without a always return . + /// + public bool IsPartitionBlocked => + PartitionKey != null && + (Status == AtomizerJobStatus.Processing || + (Status == AtomizerJobStatus.Pending && Attempts > 0)); + /// /// Gets or sets the list of error records from previous failed attempts. /// @@ -98,6 +128,7 @@ public class AtomizerJob : Model /// Optional retry strategy; defaults to . /// Optional key used to deduplicate identical jobs. /// Optional key linking this job to a recurring schedule. + /// Optional partition key for ordered (FIFO) processing within the queue. /// A new instance. public static AtomizerJob Create( QueueKey queueKey, @@ -107,7 +138,8 @@ public static AtomizerJob Create( DateTimeOffset scheduledAt, RetryStrategy? retryStrategy = null, string? idempotencyKey = null, - JobKey? scheduleJobKey = null + JobKey? scheduleJobKey = null, + PartitionKey? partitionKey = null ) { return new AtomizerJob @@ -124,6 +156,8 @@ public static AtomizerJob Create( UpdatedAt = createdAt, IdempotencyKey = idempotencyKey, ScheduleJobKey = scheduleJobKey, + PartitionKey = partitionKey, + SequenceNumber = null, }; } @@ -182,6 +216,11 @@ public void Attempt() /// The UTC time the job completed. public void MarkAsCompleted(DateTimeOffset completedAt) { + if (Status != AtomizerJobStatus.Processing) + { + throw new InvalidOperationException("Job must be in Processing status to mark as completed."); + } + CompletedAt = completedAt; UpdatedAt = completedAt; Status = AtomizerJobStatus.Completed; @@ -195,6 +234,11 @@ public void MarkAsCompleted(DateTimeOffset completedAt) /// The UTC time the job was permanently failed. public void MarkAsFailed(DateTimeOffset failedAt) { + if (Status != AtomizerJobStatus.Processing) + { + throw new InvalidOperationException("Job must be in Processing status to mark as failed."); + } + FailedAt = failedAt; UpdatedAt = failedAt; Status = AtomizerJobStatus.Failed; diff --git a/src/Atomizer/Models/AtomizerSchedule.cs b/src/Atomizer/Models/AtomizerSchedule.cs index 706bcc8..1078499 100644 --- a/src/Atomizer/Models/AtomizerSchedule.cs +++ b/src/Atomizer/Models/AtomizerSchedule.cs @@ -58,6 +58,16 @@ public class AtomizerSchedule : Model /// public RetryStrategy RetryStrategy { get; set; } = RetryStrategy.Default; + /// + /// Gets or sets the partition key applied to each job occurrence enqueued from this schedule, + /// or if occurrences participate in no partition. + /// + /// + /// When set, the is forwarded to every created + /// by ScheduleProcessor, enabling FIFO ordering across recurring job occurrences. + /// + public PartitionKey? PartitionKey { get; set; } + /// /// Gets or sets the UTC time of the next scheduled occurrence. /// @@ -94,6 +104,7 @@ public class AtomizerSchedule : Model /// Maximum missed runs to catch up. Defaults to 5. /// Whether the schedule is active. Defaults to true. /// Optional retry strategy; defaults to . + /// Optional partition key forwarded to each job occurrence for FIFO ordering. /// A new instance. public static AtomizerSchedule Create( JobKey jobKey, @@ -106,7 +117,8 @@ public static AtomizerSchedule Create( MisfirePolicy misfirePolicy = MisfirePolicy.ExecuteNow, int maxCatchUp = 5, bool enabled = true, - RetryStrategy? retryStrategy = null + RetryStrategy? retryStrategy = null, + PartitionKey? partitionKey = null ) { var atomizerSchedule = new AtomizerSchedule @@ -124,6 +136,7 @@ public static AtomizerSchedule Create( RetryStrategy = retryStrategy ?? RetryStrategy.Default, CreatedAt = createdAt, UpdatedAt = createdAt, + PartitionKey = partitionKey, }; atomizerSchedule.NextRunAt = diff --git a/src/Atomizer/Models/ValueObjects/PartitionKey.cs b/src/Atomizer/Models/ValueObjects/PartitionKey.cs new file mode 100644 index 0000000..e1ddda6 --- /dev/null +++ b/src/Atomizer/Models/ValueObjects/PartitionKey.cs @@ -0,0 +1,62 @@ +using Atomizer.Exceptions; +using Atomizer.Models.Base; + +namespace Atomizer; + +/// +/// Identifies a FIFO partition for ordered job processing. Maximum length is 255 characters. +/// +public sealed class PartitionKey : ValueObject +{ + /// + /// Initializes a new with the specified key. + /// + /// The partition key. Must be non-empty and at most 255 characters. + public PartitionKey(string key) + { + if (string.IsNullOrWhiteSpace(key)) + { + throw new InvalidPartitionKeyException("Partition key cannot be null or empty.", nameof(key)); + } + + if (key.Length > 255) + { + throw new InvalidPartitionKeyException("Partition key cannot exceed 255 characters.", nameof(key)); + } + + Key = key; + } + + /// + /// Gets the partition key string. + /// + public string Key { get; } + + /// + /// Returns the partition key string. + /// + public override string ToString() => Key; + + /// + /// Implicitly converts a to its string representation. + /// + /// The partition key to convert. + /// The partition key string. + public static implicit operator string(PartitionKey partitionKey) => partitionKey.Key; + + /// + /// Explicitly converts a string to a . + /// + /// The partition key string to convert. + /// A new wrapping the string. + public static explicit operator PartitionKey(string key) => new PartitionKey(key); + + /// + /// Returns the partition key string as the sole equality component. + /// + /// An enumerable containing the partition key string. + protected override IEnumerable GetEqualityValues() + { + yield return Key; + } +} diff --git a/src/Atomizer/Processing/JobProcessor.cs b/src/Atomizer/Processing/JobProcessor.cs index 4315469..b4d9fb7 100644 --- a/src/Atomizer/Processing/JobProcessor.cs +++ b/src/Atomizer/Processing/JobProcessor.cs @@ -63,6 +63,13 @@ public async Task ProcessAsync(AtomizerJob job, CancellationToken ct) catch (OperationCanceledException) when (ct.IsCancellationRequested) { _logger.LogWarning("Operation cancelled while processing job {JobId} on '{Queue}'", job.Id, job.QueueKey); + + // Undo the Attempt() increment before releasing: cancellation is not a failed attempt, + // so Pending+Attempts>0 must not leave the partition permanently blocked. + job.Attempts -= 1; + job.Release(_clock.UtcNow); + using var scope = _serviceScopeFactory.CreateScope(); + await scope.Storage.UpdateJobsAsync(new[] { job }, CancellationToken.None); } catch (Exception ex) { diff --git a/src/Atomizer/Scheduling/ScheduleProcessor.cs b/src/Atomizer/Scheduling/ScheduleProcessor.cs index 6073a42..c8097c6 100644 --- a/src/Atomizer/Scheduling/ScheduleProcessor.cs +++ b/src/Atomizer/Scheduling/ScheduleProcessor.cs @@ -46,7 +46,8 @@ public async Task ProcessAsync(AtomizerSchedule schedule, DateTimeOffset horizon occurrence, schedule.RetryStrategy, idempotencyKey, - schedule.JobKey + schedule.JobKey, + partitionKey: schedule.PartitionKey ); try diff --git a/src/Atomizer/Storage/InMemoryStorage.cs b/src/Atomizer/Storage/InMemoryStorage.cs index d9bc317..87286ca 100644 --- a/src/Atomizer/Storage/InMemoryStorage.cs +++ b/src/Atomizer/Storage/InMemoryStorage.cs @@ -12,6 +12,7 @@ public sealed class InMemoryStorage : IAtomizerStorage { private readonly ConcurrentDictionary _jobs = new(); private readonly ConcurrentDictionary> _queues = new(); + private readonly ConcurrentDictionary> _partitionSequences = new(); private readonly ConcurrentDictionary> _leasesByToken = new(); private readonly Dictionary _schedules = new(); @@ -39,8 +40,27 @@ public Task InsertAsync(AtomizerJob job, CancellationToken cancellationTok { cancellationToken.ThrowIfCancellationRequested(); - _jobs[job.Id] = job; + if (job.IdempotencyKey != null) + { + var existing = _jobs.Values.FirstOrDefault(j => j.IdempotencyKey == job.IdempotencyKey); + if (existing != null) + { + job.SequenceNumber = existing.SequenceNumber; + return Task.FromResult(existing.Id); + } + } + + if (job.PartitionKey != null) + { + var partitionSequences = _partitionSequences.GetOrAdd( + job.QueueKey, + _ => new ConcurrentDictionary() + ); + var seq = partitionSequences.AddOrUpdate(job.PartitionKey.Key, 1L, (_, current) => current + 1L); + job.SequenceNumber = seq; + } + _jobs[job.Id] = job; IndexIntoQueue(job); _logger.LogDebug( @@ -51,7 +71,6 @@ public Task InsertAsync(AtomizerJob job, CancellationToken cancellationTok ); EvictCompletedAndFailed(); - return Task.FromResult(job.Id); } @@ -96,15 +115,20 @@ CancellationToken cancellationToken now ); - List candidates; - if (!_queues.TryGetValue(queueKey, out var ids) || ids.IsEmpty) { _logger.LogDebug("LeaseBatch: queue {QueueKey} is empty", queueKey); return Task.FromResult((IReadOnlyList)Array.Empty()); } - candidates = ids + var blockedPartitions = new HashSet(); + foreach (var id in ids.Keys) + { + if (_jobs.TryGetValue(id, out var bj) && bj.IsPartitionBlocked) + blockedPartitions.Add(bj.PartitionKey!.Key); + } + + var eligible = ids .Keys.Select(id => _jobs.TryGetValue(id, out var j) ? j : null) .Where(j => j != null @@ -115,8 +139,18 @@ CancellationToken cancellationToken && j.ScheduledAt <= now ) || (j.Status == AtomizerJobStatus.Processing && j.VisibleAt <= now) // expired lease ) + && (j.PartitionKey == null || !blockedPartitions.Contains(j.PartitionKey.Key)) ) - .Select(j => j!) + .Select(j => j!); + + var unpartitioned = eligible.Where(j => j.PartitionKey == null); + var partitionHeads = eligible + .Where(j => j.PartitionKey != null) + .GroupBy(j => j.PartitionKey!.Key) + .Select(g => g.OrderBy(j => j.SequenceNumber).First()); + + var candidates = unpartitioned + .Concat(partitionHeads) .OrderBy(j => j.ScheduledAt) .ThenBy(j => j.CreatedAt) .Take(Math.Max(0, batchSize)) diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Atomizer.EntityFrameworkCore.Tests.csproj b/tests/Atomizer.EntityFrameworkCore.Tests/Atomizer.EntityFrameworkCore.Tests.csproj index a98529d..e114e14 100644 --- a/tests/Atomizer.EntityFrameworkCore.Tests/Atomizer.EntityFrameworkCore.Tests.csproj +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Atomizer.EntityFrameworkCore.Tests.csproj @@ -26,7 +26,6 @@ - all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Providers/MySqlDialectTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Providers/MySqlDialectTests.cs index 5de0de9..9aefd83 100644 --- a/tests/Atomizer.EntityFrameworkCore.Tests/Providers/MySqlDialectTests.cs +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Providers/MySqlDialectTests.cs @@ -1,4 +1,3 @@ -using Atomizer; using Atomizer.EntityFrameworkCore.Entities; using Atomizer.EntityFrameworkCore.Providers; using Atomizer.EntityFrameworkCore.Providers.Sql; @@ -60,7 +59,7 @@ public void ReleaseLeasedJobs_WhenCalled_ShouldContainUpdateStatement() } [Fact] - public void UpsertScheduleAsync_WhenCalled_ShouldContainOnDuplicateKeyUpdate() + public void UpsertSchedule_WhenCalled_ShouldContainOnDuplicateKeyUpdate() { var (jobs, schedules) = BuildMaps(); var dialect = new MySqlDialect(jobs, schedules); @@ -74,7 +73,7 @@ public void UpsertScheduleAsync_WhenCalled_ShouldContainOnDuplicateKeyUpdate() DateTimeOffset.UtcNow ); - var sql = dialect.UpsertScheduleAsync(schedule, DateTimeOffset.UtcNow); + var sql = dialect.UpsertSchedule(schedule, DateTimeOffset.UtcNow); sql.Format.Should().Contain("ON DUPLICATE KEY UPDATE"); } diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Providers/PostgreSqlDialectTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Providers/PostgreSqlDialectTests.cs index cf8bf32..35bcd51 100644 --- a/tests/Atomizer.EntityFrameworkCore.Tests/Providers/PostgreSqlDialectTests.cs +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Providers/PostgreSqlDialectTests.cs @@ -60,7 +60,7 @@ public void ReleaseLeasedJobs_WhenCalled_ShouldContainUpdateStatement() } [Fact] - public void UpsertScheduleAsync_WhenCalled_ShouldContainOnConflict() + public void UpsertSchedule_WhenCalled_ShouldContainOnConflict() { var (jobs, schedules) = BuildMaps(); var dialect = new PostgreSqlDialect(jobs, schedules); @@ -74,7 +74,7 @@ public void UpsertScheduleAsync_WhenCalled_ShouldContainOnConflict() DateTimeOffset.UtcNow ); - var sql = dialect.UpsertScheduleAsync(schedule, DateTimeOffset.UtcNow); + var sql = dialect.UpsertSchedule(schedule, DateTimeOffset.UtcNow); sql.Format.Should().Contain("ON CONFLICT"); sql.Format.Should().Contain("DO UPDATE SET"); diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Providers/SqlServerDialectTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Providers/SqlServerDialectTests.cs index 02fb925..d86c308 100644 --- a/tests/Atomizer.EntityFrameworkCore.Tests/Providers/SqlServerDialectTests.cs +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Providers/SqlServerDialectTests.cs @@ -60,7 +60,7 @@ public void ReleaseLeasedJobs_WhenCalled_ShouldContainUpdateStatement() } [Fact] - public void UpsertScheduleAsync_WhenCalled_ShouldContainMergeWithHoldlock() + public void UpsertSchedule_WhenCalled_ShouldContainMergeWithHoldlock() { var (jobs, schedules) = BuildMaps(); var dialect = new SqlServerDialect(jobs, schedules); @@ -74,7 +74,7 @@ public void UpsertScheduleAsync_WhenCalled_ShouldContainMergeWithHoldlock() DateTimeOffset.UtcNow ); - var sql = dialect.UpsertScheduleAsync(schedule, DateTimeOffset.UtcNow); + var sql = dialect.UpsertSchedule(schedule, DateTimeOffset.UtcNow); sql.Format.Should().Contain("MERGE"); sql.Format.Should().Contain("WITH (HOLDLOCK)"); diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/EntityFrameworkCoreStorageTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/EntityFrameworkCoreStorageTests.cs index 959d49e..eaaded3 100644 --- a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/EntityFrameworkCoreStorageTests.cs +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/EntityFrameworkCoreStorageTests.cs @@ -127,6 +127,8 @@ public async Task UpdateJobsAsync_WhenJobsExist_ShouldUpdateJobs() dbContext.ChangeTracker.Clear(); // Act + job1.Lease(FakeDataFactory.LeaseToken(), _clock.UtcNow, TimeSpan.FromMinutes(10)); + job2.Lease(FakeDataFactory.LeaseToken(), _clock.UtcNow, TimeSpan.FromMinutes(10)); job1.MarkAsCompleted(_clock.UtcNow); job2.MarkAsFailed(_clock.UtcNow); await storage.UpdateJobsAsync(new[] { job1, job2 }, CancellationToken.None); @@ -669,10 +671,7 @@ public async Task UpsertScheduleAsync_WhenScheduleExists_ShouldUpdateSchedule() public async ValueTask DisposeAsync() { await using var dbContext = _dbContextFactory(); - dbContext.Set().RemoveRange(dbContext.Set()); - dbContext.Set().RemoveRange(dbContext.Set()); - dbContext.Set().RemoveRange(dbContext.Set()); - await dbContext.SaveChangesAsync(TestContext.Current.CancellationToken); + await StorageTestCleanup.ClearAsync(dbContext, TestContext.Current.CancellationToken); } public ValueTask InitializeAsync() diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/MySql/MySqlStorageContractTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/MySql/MySqlStorageContractTests.cs new file mode 100644 index 0000000..685edb5 --- /dev/null +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/MySql/MySqlStorageContractTests.cs @@ -0,0 +1,39 @@ +using Atomizer.Abstractions; +using Atomizer.Core; +using Atomizer.EntityFrameworkCore.Storage; +using Atomizer.EntityFrameworkCore.Tests.Fixtures; +using Atomizer.EntityFrameworkCore.Tests.TestSetup.MySql; +using Atomizer.Tests.Utilities.StorageContract; +using Microsoft.Extensions.Logging; +using NSubstitute; + +namespace Atomizer.EntityFrameworkCore.Tests.Storage.MySql; + +[Collection(nameof(MySqlDatabaseFixture))] +public sealed class MySqlStorageContractTests(MySqlDatabaseFixture fixture) : AtomizerStorageContractTests +{ + private MySqlDbContext? _dbContext; + + protected override IAtomizerStorage CreateStorage(IAtomizerClock clock) + { + _dbContext = fixture.CreateNewDbContext(); + return new EntityFrameworkCoreStorage( + _dbContext, + new EntityFrameworkCoreJobStorageOptions(), + Substitute.For>>(), + clock + ); + } + + public override async ValueTask DisposeAsync() + { + if (_dbContext is not null) + await _dbContext.DisposeAsync(); + + // Delete errors before jobs to satisfy the FK constraint, then schedules. + // Use a bounded cancellation token so teardown does not hang indefinitely. + using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(30)); + await using var cleanupContext = fixture.CreateNewDbContext(); + await StorageTestCleanup.ClearAsync(cleanupContext, cts.Token); + } +} diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/Postgres/PostgresStorageContractTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/Postgres/PostgresStorageContractTests.cs new file mode 100644 index 0000000..77104db --- /dev/null +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/Postgres/PostgresStorageContractTests.cs @@ -0,0 +1,39 @@ +using Atomizer.Abstractions; +using Atomizer.Core; +using Atomizer.EntityFrameworkCore.Storage; +using Atomizer.EntityFrameworkCore.Tests.Fixtures; +using Atomizer.EntityFrameworkCore.Tests.TestSetup.Postgres; +using Atomizer.Tests.Utilities.StorageContract; +using Microsoft.Extensions.Logging; +using NSubstitute; + +namespace Atomizer.EntityFrameworkCore.Tests.Storage.Postgres; + +[Collection(nameof(PostgreSqlDatabaseFixture))] +public sealed class PostgresStorageContractTests(PostgreSqlDatabaseFixture fixture) : AtomizerStorageContractTests +{ + private PostgresDbContext? _dbContext; + + protected override IAtomizerStorage CreateStorage(IAtomizerClock clock) + { + _dbContext = fixture.CreateNewDbContext(); + return new EntityFrameworkCoreStorage( + _dbContext, + new EntityFrameworkCoreJobStorageOptions(), + Substitute.For>>(), + clock + ); + } + + public override async ValueTask DisposeAsync() + { + if (_dbContext is not null) + await _dbContext.DisposeAsync(); + + // Delete errors before jobs to satisfy the FK constraint, then schedules. + // Use a bounded cancellation token so teardown does not hang indefinitely. + using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(30)); + await using var cleanupContext = fixture.CreateNewDbContext(); + await StorageTestCleanup.ClearAsync(cleanupContext, cts.Token); + } +} diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/SqlServer/SqlServerStorageContractTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/SqlServer/SqlServerStorageContractTests.cs new file mode 100644 index 0000000..072c976 --- /dev/null +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/SqlServer/SqlServerStorageContractTests.cs @@ -0,0 +1,39 @@ +using Atomizer.Abstractions; +using Atomizer.Core; +using Atomizer.EntityFrameworkCore.Storage; +using Atomizer.EntityFrameworkCore.Tests.Fixtures; +using Atomizer.EntityFrameworkCore.Tests.TestSetup.SqlServer; +using Atomizer.Tests.Utilities.StorageContract; +using Microsoft.Extensions.Logging; +using NSubstitute; + +namespace Atomizer.EntityFrameworkCore.Tests.Storage.SqlServer; + +[Collection(nameof(SqlServerDatabaseFixture))] +public sealed class SqlServerStorageContractTests(SqlServerDatabaseFixture fixture) : AtomizerStorageContractTests +{ + private SqlServerDbContext? _dbContext; + + protected override IAtomizerStorage CreateStorage(IAtomizerClock clock) + { + _dbContext = fixture.CreateNewDbContext(); + return new EntityFrameworkCoreStorage( + _dbContext, + new EntityFrameworkCoreJobStorageOptions(), + Substitute.For>>(), + clock + ); + } + + public override async ValueTask DisposeAsync() + { + if (_dbContext is not null) + await _dbContext.DisposeAsync(); + + // Delete errors before jobs to satisfy the FK constraint, then schedules. + // Use a bounded cancellation token so teardown does not hang indefinitely. + using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(30)); + await using var cleanupContext = fixture.CreateNewDbContext(); + await StorageTestCleanup.ClearAsync(cleanupContext, cts.Token); + } +} diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/Sqlite/SqliteStorageContractTests.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/Sqlite/SqliteStorageContractTests.cs new file mode 100644 index 0000000..c54944e --- /dev/null +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/Sqlite/SqliteStorageContractTests.cs @@ -0,0 +1,49 @@ +using Atomizer.Abstractions; +using Atomizer.Core; +using Atomizer.EntityFrameworkCore.Storage; +using Atomizer.EntityFrameworkCore.Tests.Fixtures; +using Atomizer.EntityFrameworkCore.Tests.TestSetup.Sqlite; +using Atomizer.Tests.Utilities.StorageContract; +using Microsoft.Extensions.Logging; +using NSubstitute; + +namespace Atomizer.EntityFrameworkCore.Tests.Storage.Sqlite; + +/// +/// Contract tests for backed by SQLite. +/// SQLite is not a supported production provider; it exercises the LINQ fallback path +/// (AllowUnsafeProviderFallback = true), not the CTE dialect SQL. +/// The CTE dialect SQL is verified by the PostgreSQL, SQL Server, and MySQL subclasses. +/// +/// +/// The fallback path enforces FIFO semantics in-process, but it is not safe for concurrent +/// multi-node production use because it does not take provider-level row locks. +/// +[Collection(nameof(SqliteDatabaseFixture))] +public sealed class SqliteStorageContractTests(SqliteDatabaseFixture fixture) : AtomizerStorageContractTests +{ + private SqliteDbContext? _dbContext; + + protected override IAtomizerStorage CreateStorage(IAtomizerClock clock) + { + _dbContext = fixture.CreateNewDbContext(); + return new EntityFrameworkCoreStorage( + _dbContext, + new EntityFrameworkCoreJobStorageOptions { AllowUnsafeProviderFallback = true }, + Substitute.For>>(), + clock + ); + } + + public override async ValueTask DisposeAsync() + { + if (_dbContext is not null) + await _dbContext.DisposeAsync(); + + // Delete errors before jobs to satisfy the FK constraint, then schedules. + // Use a bounded cancellation token so teardown does not hang indefinitely. + using var cts = new CancellationTokenSource(TimeSpan.FromSeconds(30)); + await using var cleanupContext = fixture.CreateNewDbContext(); + await StorageTestCleanup.ClearAsync(cleanupContext, cts.Token); + } +} diff --git a/tests/Atomizer.EntityFrameworkCore.Tests/Storage/StorageTestCleanup.cs b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/StorageTestCleanup.cs new file mode 100644 index 0000000..390a6e4 --- /dev/null +++ b/tests/Atomizer.EntityFrameworkCore.Tests/Storage/StorageTestCleanup.cs @@ -0,0 +1,15 @@ +using Atomizer.EntityFrameworkCore.Entities; +using Microsoft.EntityFrameworkCore; + +namespace Atomizer.EntityFrameworkCore.Tests.Storage; + +internal static class StorageTestCleanup +{ + public static async Task ClearAsync(DbContext dbContext, CancellationToken cancellationToken) + { + dbContext.Set().RemoveRange(dbContext.Set()); + dbContext.Set().RemoveRange(dbContext.Set()); + dbContext.Set().RemoveRange(dbContext.Set()); + await dbContext.SaveChangesAsync(cancellationToken); + } +} diff --git a/tests/Atomizer.Tests.Utilities/Atomizer.Tests.Utilities.csproj b/tests/Atomizer.Tests.Utilities/Atomizer.Tests.Utilities.csproj index c35f584..d828433 100644 --- a/tests/Atomizer.Tests.Utilities/Atomizer.Tests.Utilities.csproj +++ b/tests/Atomizer.Tests.Utilities/Atomizer.Tests.Utilities.csproj @@ -3,12 +3,18 @@ net6.0;net8.0;net10.0 enable false - enable true + 12 - + + + + + + + diff --git a/tests/Atomizer.Tests.Utilities/StorageContract/AtomizerStorageContractTests.cs b/tests/Atomizer.Tests.Utilities/StorageContract/AtomizerStorageContractTests.cs new file mode 100644 index 0000000..d55c2fb --- /dev/null +++ b/tests/Atomizer.Tests.Utilities/StorageContract/AtomizerStorageContractTests.cs @@ -0,0 +1,356 @@ +using Atomizer.Abstractions; +using Atomizer.Core; +using Atomizer.Tests.Utilities.Stubs; +using Atomizer.Tests.Utilities.TestJobs; +using AwesomeAssertions; +using NSubstitute; + +namespace Atomizer.Tests.Utilities.StorageContract; + +/// +/// Abstract contract test base that verifies FIFO storage semantics (FIFO-07, FIFO-08, FIFO-09). +/// Subclass this in each storage backend test project and implement . +/// +/// Pre-condition: The returned by +/// must fully implement the FIFO partition-blocking rules +/// described in . Tests will fail if the +/// implementation does not enforce these rules. +/// +/// +public abstract class AtomizerStorageContractTests : IAsyncLifetime +{ + private readonly IAtomizerClock _clock = Substitute.For(); + protected DateTimeOffset _now; + protected IAtomizerStorage _sut = null!; + + /// + /// Creates a fresh storage instance for the test run. + /// + /// The clock instance the storage implementation must use. + /// A new implementation to test. + protected abstract IAtomizerStorage CreateStorage(IAtomizerClock clock); + + /// + public ValueTask InitializeAsync() + { + _now = DateTimeOffset.UtcNow; + _clock.UtcNow.Returns(_now); + _sut = CreateStorage(_clock); + return ValueTask.CompletedTask; + } + + /// + public virtual ValueTask DisposeAsync() => ValueTask.CompletedTask; + + // ------------------------------------------------------------------ + // FIFO-09: SequenceNumber assignment on InsertAsync + // ------------------------------------------------------------------ + + /// + /// FIFO-09: Partitioned jobs receive monotonically increasing sequence numbers. + /// + [Fact] + public async Task InsertAsync_WhenPartitionedJob_ShouldAssignMonotonicallyIncreasingSequenceNumber() + { + // Arrange + var partitionKey = new PartitionKey("order-123"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + // Act + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Assert + job1.SequenceNumber.Should().NotBeNull(); + job2.SequenceNumber.Should().NotBeNull(); + job2.SequenceNumber.Should().BeGreaterThan(job1.SequenceNumber!.Value); + } + + /// + /// FIFO-09: Unpartitioned jobs do not receive a sequence number. + /// + [Fact] + public async Task InsertAsync_WhenUnpartitionedJob_ShouldNotAssignSequenceNumber() + { + // Arrange + var job = CreateJob(); + + // Act + await _sut.InsertAsync(job, CancellationToken.None); + + // Assert + job.SequenceNumber.Should().BeNull(); + } + + /// + /// FIFO-09 / D-06: On an idempotency key collision, the existing job's sequence number + /// is assigned to the passed-in job. + /// + [Fact] + public async Task InsertAsync_WhenIdempotencyKeyCollision_ShouldAssignExistingSequenceNumber() + { + // Arrange + var partitionKey = new PartitionKey("orders"); + const string idempotencyKey = "idem-key-1"; + + var job1 = CreateJob(partitionKey: partitionKey, idempotencyKey: idempotencyKey); + await _sut.InsertAsync(job1, CancellationToken.None); + + var job2 = CreateJob(partitionKey: partitionKey, idempotencyKey: idempotencyKey); + + // Act + await _sut.InsertAsync(job2, CancellationToken.None); + + // Assert + job1.SequenceNumber.Should().NotBeNull(); + job2.SequenceNumber.Should().Be(job1.SequenceNumber); + } + + // ------------------------------------------------------------------ + // FIFO-07: GetDueJobsAsync returns at most one job per partition + // ------------------------------------------------------------------ + + /// + /// FIFO-07: When multiple jobs share a partition key, only the lowest-sequence-number + /// job is returned by . + /// + [Fact] + public async Task GetDueJobsAsync_WhenMultipleJobsInSamePartition_ShouldReturnOnlyLowestSequenceNumber() + { + // Arrange + var partitionKey = new PartitionKey("batch-key"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + + // Assert + result.Should().HaveCount(1); + result.Single().Id.Should().Be(job1.Id); + } + + /// + /// FIFO-07: Unpartitioned jobs are returned alongside the head of each partition. + /// + [Fact] + public async Task GetDueJobsAsync_WhenUnpartitionedJobsExist_ShouldReturnThemAlongsidePartitionedJobs() + { + // Arrange + var partitionedJob = CreateJob(partitionKey: new PartitionKey("p1")); + var unpartitionedJob = CreateJob(); + + await _sut.InsertAsync(partitionedJob, CancellationToken.None); + await _sut.InsertAsync(unpartitionedJob, CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + + // Assert + result.Should().HaveCount(2); + result.Should().Contain(j => j.Id == partitionedJob.Id); + result.Should().Contain(j => j.Id == unpartitionedJob.Id); + } + + // ------------------------------------------------------------------ + // FIFO-08: GetDueJobsAsync excludes entire partition when head is blocked + // ------------------------------------------------------------------ + + /// + /// FIFO-08: When the head job of a partition is Processing, the entire partition is excluded. + /// + [Fact] + public async Task GetDueJobsAsync_WhenPartitionIsBlockedByProcessing_ShouldExcludeEntirePartition() + { + // Arrange + var partitionKey = new PartitionKey("blocked-p"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Transition job1 to Processing and persist + job1.Lease(FakeDataFactory.LeaseToken(), _now, TimeSpan.FromMinutes(10)); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + + // Assert β€” entire partition invisible while job1 is Processing + result.Should().BeEmpty(); + } + + /// + /// FIFO-08: When the head job of a partition is Pending with prior attempts (retrying), + /// the entire partition is excluded. + /// + [Fact] + public async Task GetDueJobsAsync_WhenPartitionIsBlockedByPendingWithAttempts_ShouldExcludeEntirePartition() + { + // Arrange + var partitionKey = new PartitionKey("retry-p"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Simulate retry state: Lease β†’ Attempt β†’ Reschedule (Pending with Attempts = 1) + job1.Lease(FakeDataFactory.LeaseToken(), _now, TimeSpan.FromMinutes(10)); + job1.Attempt(); + job1.Reschedule(_now, _now); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + + // Assert β€” partition blocked while job1 is Pending with Attempts > 0 + result.Should().BeEmpty(); + } + + /// + /// FIFO-08: Two jobs sharing the same partition key string but in different queues + /// are treated as independent partitions. + /// + [Fact] + public async Task GetDueJobsAsync_WhenSamePartitionKeyInDifferentQueues_ShouldTreatAsIndependent() + { + var partitionKey = new PartitionKey("shared-key"); + var queueA = QueueKey.Default; + var queueB = new QueueKey("secondary"); + + var jobA = CreateJob(partitionKey: partitionKey, queueKey: queueA); + var jobB = CreateJob(partitionKey: partitionKey, queueKey: queueB); + + await _sut.InsertAsync(jobA, CancellationToken.None); + await _sut.InsertAsync(jobB, CancellationToken.None); + + jobA.Lease(FakeDataFactory.LeaseToken(), _now, TimeSpan.FromMinutes(10)); + await _sut.UpdateJobsAsync([jobA], CancellationToken.None); + + var result = await _sut.GetDueJobsAsync(queueB, _now, batchSize: 10, CancellationToken.None); + + result.Should().HaveCount(1); + result.Single().Id.Should().Be(jobB.Id); + } + + // ------------------------------------------------------------------ + // ReleaseLeasedAsync: partition unblocking + // ------------------------------------------------------------------ + + /// + /// Releasing a leased partition head must clear VisibleAt and make the job visible again, + /// unblocking the entire partition. + /// + [Fact] + public async Task ReleaseLeasedAsync_WhenPartitionHeadReleased_ShouldUnblockPartition() + { + var partitionKey = new PartitionKey("release-p"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + var leaseToken = FakeDataFactory.LeaseToken(); + job1.Lease(leaseToken, _now, TimeSpan.FromMinutes(10)); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + var blocked = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + blocked.Should().BeEmpty(); + + await _sut.ReleaseLeasedAsync(leaseToken, _now, CancellationToken.None); + + var unblocked = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + unblocked.Should().HaveCount(1); + unblocked.Single().Id.Should().Be(job1.Id); + } + + // ------------------------------------------------------------------ + // FIFO-13: terminal-state unblocking + // ------------------------------------------------------------------ + + /// + /// FIFO-13: When the head job of a partition completes successfully, the partition + /// is unblocked and the next job becomes eligible for processing. + /// + [Fact] + public async Task GetDueJobsAsync_WhenPartitionHeadCompleted_ShouldUnblockNextJob() + { + // Arrange + var partitionKey = new PartitionKey("complete-p"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Transition job1 to Completed: Lease β†’ Attempt β†’ MarkAsCompleted + job1.Lease(FakeDataFactory.LeaseToken(), _now, TimeSpan.FromMinutes(10)); + job1.Attempt(); + job1.MarkAsCompleted(_now); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + + // Assert β€” job2 is now the partition head and must be returned + result.Should().HaveCount(1); + result.Single().Id.Should().Be(job2.Id); + } + + /// + /// FIFO-13: When the head job of a partition exhausts its retries and is marked Failed, + /// the partition is unblocked and the next job becomes eligible for processing. + /// + [Fact] + public async Task GetDueJobsAsync_WhenPartitionHeadFailed_ShouldUnblockNextJob() + { + // Arrange + var partitionKey = new PartitionKey("failed-p"); + var job1 = CreateJob(partitionKey: partitionKey); + var job2 = CreateJob(partitionKey: partitionKey); + + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Transition job1 to Failed: Lease β†’ Attempt β†’ MarkAsFailed + job1.Lease(FakeDataFactory.LeaseToken(), _now, TimeSpan.FromMinutes(10)); + job1.Attempt(); + job1.MarkAsFailed(_now); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, batchSize: 10, CancellationToken.None); + + // Assert β€” job2 is now the partition head and must be returned + result.Should().HaveCount(1); + result.Single().Id.Should().Be(job2.Id); + } + + // ------------------------------------------------------------------ + // Helper + // ------------------------------------------------------------------ + + private AtomizerJob CreateJob( + PartitionKey? partitionKey = null, + string? idempotencyKey = null, + QueueKey? queueKey = null + ) + { + return AtomizerJob.Create( + queueKey ?? QueueKey.Default, + typeof(WriteLineJob), + "{}", + _now, + _now, + idempotencyKey: idempotencyKey, + partitionKey: partitionKey + ); + } +} diff --git a/tests/Atomizer.Tests/Atomizer.Tests.csproj b/tests/Atomizer.Tests/Atomizer.Tests.csproj index 01ea208..06d1fea 100644 --- a/tests/Atomizer.Tests/Atomizer.Tests.csproj +++ b/tests/Atomizer.Tests/Atomizer.Tests.csproj @@ -27,7 +27,6 @@ - all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/tests/Atomizer.Tests/Exceptions/InvalidPartitionKeyExceptionTests.cs b/tests/Atomizer.Tests/Exceptions/InvalidPartitionKeyExceptionTests.cs new file mode 100644 index 0000000..16d1761 --- /dev/null +++ b/tests/Atomizer.Tests/Exceptions/InvalidPartitionKeyExceptionTests.cs @@ -0,0 +1,72 @@ +using Atomizer.Exceptions; + +namespace Atomizer.Tests.Exceptions; + +/// +/// Unit tests for . +/// +public class InvalidPartitionKeyExceptionTests +{ + [Fact] + public void Constructor_WithMessage_ShouldSetMessage() + { + // Arrange & Act + var ex = new InvalidPartitionKeyException("test message"); + + // Assert + ex.Message.Should().Contain("test message"); + ex.InnerException.Should().BeNull(); + ex.ParamName.Should().BeNull(); + } + + [Fact] + public void Constructor_WithMessageAndInnerException_ShouldSetBoth() + { + // Arrange + var inner = new Exception("inner"); + + // Act + var ex = new InvalidPartitionKeyException("test message", inner); + + // Assert + ex.Message.Should().Contain("test message"); + ex.InnerException.Should().BeSameAs(inner); + } + + [Fact] + public void Constructor_WithMessageAndParamName_ShouldSetBoth() + { + // Arrange & Act + var ex = new InvalidPartitionKeyException("test message", "myParam"); + + // Assert + ex.Message.Should().Contain("test message"); + ex.ParamName.Should().Be("myParam"); + ex.InnerException.Should().BeNull(); + } + + [Fact] + public void Constructor_WithMessageParamNameAndInnerException_ShouldSetAll() + { + // Arrange + var inner = new Exception("inner"); + + // Act + var ex = new InvalidPartitionKeyException("test message", "myParam", inner); + + // Assert + ex.Message.Should().Contain("test message"); + ex.ParamName.Should().Be("myParam"); + ex.InnerException.Should().BeSameAs(inner); + } + + [Fact] + public void ShouldDerive_FromArgumentException() + { + // Arrange & Act + var ex = new InvalidPartitionKeyException("test"); + + // Assert + ex.Should().BeAssignableTo(); + } +} diff --git a/tests/Atomizer.Tests/Models/AtomizerJobPartitionTests.cs b/tests/Atomizer.Tests/Models/AtomizerJobPartitionTests.cs new file mode 100644 index 0000000..eea10e3 --- /dev/null +++ b/tests/Atomizer.Tests/Models/AtomizerJobPartitionTests.cs @@ -0,0 +1,74 @@ +namespace Atomizer.Tests.Models; + +/// +/// Unit tests for . +/// +public class AtomizerJobPartitionTests +{ + private static AtomizerJob CreateJob(PartitionKey? partitionKey = null) + { + return AtomizerJob.Create( + QueueKey.Default, + typeof(object), + "{}", + DateTimeOffset.UtcNow, + DateTimeOffset.UtcNow, + partitionKey: partitionKey + ); + } + + [Fact] + public void IsPartitionBlocked_WhenPartitionKeyIsNull_ShouldReturnFalse() + { + // Arrange + var job = CreateJob(partitionKey: null); + + // Assert + job.IsPartitionBlocked.Should().BeFalse(); + } + + [Fact] + public void IsPartitionBlocked_WhenStatusIsPendingAndAttemptsIsZero_ShouldReturnFalse() + { + // Arrange + var job = CreateJob(partitionKey: new PartitionKey("orders")); + // Create() already sets Status=Pending, Attempts=0 + + // Assert + job.IsPartitionBlocked.Should().BeFalse(); + } + + [Fact] + public void IsPartitionBlocked_WhenStatusIsProcessing_ShouldReturnTrue() + { + // Arrange + var job = CreateJob(partitionKey: new PartitionKey("orders")); + job.Lease( + new LeaseToken($"worker:*:default:*:{Guid.NewGuid()}"), + DateTimeOffset.UtcNow, + TimeSpan.FromMinutes(10) + ); + // Status is now Processing + + // Assert + job.IsPartitionBlocked.Should().BeTrue(); + } + + [Fact] + public void IsPartitionBlocked_WhenStatusIsPendingAndAttemptsGreaterThanZero_ShouldReturnTrue() + { + // Arrange + var job = CreateJob(partitionKey: new PartitionKey("orders")); + job.Lease( + new LeaseToken($"worker:*:default:*:{Guid.NewGuid()}"), + DateTimeOffset.UtcNow, + TimeSpan.FromMinutes(10) + ); + job.Attempt(); + job.Reschedule(DateTimeOffset.UtcNow.AddSeconds(15), DateTimeOffset.UtcNow); + // Status is now Pending, Attempts == 1 + + // Assert + job.IsPartitionBlocked.Should().BeTrue(); + } +} diff --git a/tests/Atomizer.Tests/Models/ValueObjects/PartitionKeyTests.cs b/tests/Atomizer.Tests/Models/ValueObjects/PartitionKeyTests.cs new file mode 100644 index 0000000..873cfd6 --- /dev/null +++ b/tests/Atomizer.Tests/Models/ValueObjects/PartitionKeyTests.cs @@ -0,0 +1,144 @@ +using Atomizer.Exceptions; + +namespace Atomizer.Tests.Models.ValueObjects; + +/// +/// Unit tests for . +/// +public class PartitionKeyTests +{ + [Fact] + public void Constructor_WithValidKey_ShouldSucceed() + { + // Arrange & Act + var pk = new PartitionKey("orders"); + + // Assert + pk.Key.Should().Be("orders"); + } + + [Fact] + public void Constructor_WithEmptyString_ShouldThrowInvalidPartitionKeyException() + { + // Arrange & Act + Action act = () => new PartitionKey(""); + + // Assert + act.Should() + .Throw() + .And.ParamName.Should() + .Be("key"); + } + + [Fact] + public void Constructor_WithWhitespaceOnly_ShouldThrowInvalidPartitionKeyException() + { + // Arrange & Act + Action act = () => new PartitionKey(" "); + + // Assert + act.Should() + .Throw() + .And.ParamName.Should() + .Be("key"); + } + + [Fact] + public void Constructor_WithNull_ShouldThrowInvalidPartitionKeyException() + { + // Arrange & Act + Action act = () => new PartitionKey(null!); + + // Assert + act.Should() + .Throw() + .And.ParamName.Should() + .Be("key"); + } + + [Fact] + public void Constructor_WithKeyExceeding255Chars_ShouldThrowInvalidPartitionKeyException() + { + // Arrange + var longKey = new string('x', 256); + + // Act + Action act = () => new PartitionKey(longKey); + + // Assert + act.Should() + .Throw() + .And.ParamName.Should() + .Be("key"); + } + + [Fact] + public void Constructor_WithExactly255Chars_ShouldSucceed() + { + // Arrange + var key = new string('x', 255); + + // Act + var pk = new PartitionKey(key); + + // Assert + pk.Key.Should().Be(key); + } + + [Fact] + public void ExplicitConversionFromString_ShouldCreatePartitionKey() + { + // Arrange & Act + var pk = (PartitionKey)"orders"; + + // Assert + pk.Key.Should().Be("orders"); + } + + [Fact] + public void ImplicitConversionToString_ShouldReturnKeyString() + { + // Arrange + var pk = new PartitionKey("orders"); + + // Act + string value = pk; + + // Assert + value.Should().Be("orders"); + } + + [Fact] + public void ToString_ShouldReturnKeyString() + { + // Arrange + var pk = new PartitionKey("orders"); + + // Act & Assert + pk.ToString().Should().Be("orders"); + } + + [Fact] + public void Equality_WithSameKey_ShouldBeEqual() + { + // Arrange + var pk1 = new PartitionKey("orders"); + var pk2 = new PartitionKey("orders"); + + // Assert + pk1.Should().Be(pk2); + (pk1 == pk2).Should().BeTrue(); + } + + [Fact] + public void Equality_WithDifferentKey_ShouldNotBeEqual() + { + // Arrange + var pk1 = new PartitionKey("orders"); + var pk2 = new PartitionKey("payments"); + + // Assert + pk1.Should().NotBe(pk2); + (pk1 != pk2).Should().BeTrue(); + } +} diff --git a/tests/Atomizer.Tests/Storage/InMemoryStorageContractTests.cs b/tests/Atomizer.Tests/Storage/InMemoryStorageContractTests.cs new file mode 100644 index 0000000..7bbe541 --- /dev/null +++ b/tests/Atomizer.Tests/Storage/InMemoryStorageContractTests.cs @@ -0,0 +1,21 @@ +using Atomizer.Abstractions; +using Atomizer.Core; +using Atomizer.Storage; +using Atomizer.Tests.Utilities.StorageContract; + +namespace Atomizer.Tests.Storage; + +/// +/// Concrete contract tests for . +/// Inherits the 8 FIFO contract tests from . +/// +public sealed class InMemoryStorageContractTests : AtomizerStorageContractTests +{ + /// + protected override IAtomizerStorage CreateStorage(IAtomizerClock clock) + { + var options = new InMemoryJobStorageOptions { AmountOfJobsToRetainInMemory = 100 }; + var logger = Substitute.For>(); + return new InMemoryStorage(options, clock, logger); + } +} diff --git a/tests/Atomizer.Tests/Storage/InMemoryStorageTests.cs b/tests/Atomizer.Tests/Storage/InMemoryStorageTests.cs index db5801a..50dd6cc 100644 --- a/tests/Atomizer.Tests/Storage/InMemoryStorageTests.cs +++ b/tests/Atomizer.Tests/Storage/InMemoryStorageTests.cs @@ -1,4 +1,5 @@ ο»Ώusing System.Collections.Concurrent; +using Atomizer; using Atomizer.Core; using Atomizer.Storage; @@ -252,5 +253,217 @@ public async Task GetDueSchedulesAsync_WhenDueSchedulesExist_ShouldGetSchedules( ); schedules[schedule.JobKey].JobKey.Should().Be(schedule.JobKey); } + + // ---- FIFO-09: InsertAsync sequence number assignment ---- + + [Fact] + public async Task InsertAsync_WhenPartitionedJob_ShouldAssignSequenceNumberStartingAtOne() + { + // Arrange + var pk = new PartitionKey("order-1"); + var job1 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p1", _now, _now, partitionKey: pk); + var job2 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p2", _now, _now, partitionKey: pk); + + // Act + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Assert + job1.SequenceNumber.Should().Be(1L); + job2.SequenceNumber.Should().Be(2L); + } + + [Fact] + public async Task InsertAsync_WhenPartitionedJobsInDifferentQueues_ShouldAssignIndependentSequences() + { + // Arrange + var pk = new PartitionKey("shared-key"); + var queueA = QueueKey.Default; + var queueB = new QueueKey("queue-b"); + var jobA = AtomizerJob.Create(queueA, typeof(string), "pa", _now, _now, partitionKey: pk); + var jobB = AtomizerJob.Create(queueB, typeof(string), "pb", _now, _now, partitionKey: pk); + + // Act + await _sut.InsertAsync(jobA, CancellationToken.None); + await _sut.InsertAsync(jobB, CancellationToken.None); + + // Assert β€” each queue starts its own sequence at 1 + jobA.SequenceNumber.Should().Be(1L); + jobB.SequenceNumber.Should().Be(1L); + } + + [Fact] + public async Task InsertAsync_WhenUnpartitionedJob_ShouldLeaveSequenceNumberNull() + { + // Arrange + var job = AtomizerJob.Create(QueueKey.Default, typeof(string), "p", _now, _now); + + // Act + await _sut.InsertAsync(job, CancellationToken.None); + + // Assert + job.SequenceNumber.Should().BeNull(); + } + + [Fact] + public async Task InsertAsync_WhenIdempotencyKeyCollision_ShouldReturnExistingIdAndAssignExistingSequenceNumber() + { + // Arrange + var pk = new PartitionKey("idem-pk"); + const string idemKey = "test-idem-key"; + var job1 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p1", _now, _now, idempotencyKey: idemKey, partitionKey: pk); + await _sut.InsertAsync(job1, CancellationToken.None); + + var job2 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p2", _now, _now, idempotencyKey: idemKey, partitionKey: pk); + + // Act + var returnedId = await _sut.InsertAsync(job2, CancellationToken.None); + + // Assert + returnedId.Should().Be(job1.Id); + job2.SequenceNumber.Should().Be(job1.SequenceNumber); + } + + [Fact] + public async Task InsertAsync_WhenIdempotencyKeyCollision_ShouldNotIncreaseJobCount() + { + // Arrange + const string idemKey = "idem-count-key"; + var pk = new PartitionKey("count-pk"); + var job1 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p1", _now, _now, idempotencyKey: idemKey, partitionKey: pk); + await _sut.InsertAsync(job1, CancellationToken.None); + + var job2 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p2", _now, _now, idempotencyKey: idemKey, partitionKey: pk); + + // Act + await _sut.InsertAsync(job2, CancellationToken.None); + + // Assert + var jobs = NonPublicSpy.GetFieldValue>( + "_jobs", + _sut + ); + jobs.Count.Should().Be(1); + } + + // ---- FIFO-07/FIFO-08: GetDueJobsAsync partition blocking ---- + + [Fact] + public async Task GetDueJobsAsync_WhenTwoJobsSharePartition_ShouldReturnOnlyLowestSequenceNumber() + { + // Arrange + var pk = new PartitionKey("fifo-batch"); + var job1 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p1", _now, _now, partitionKey: pk); + var job2 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p2", _now, _now, partitionKey: pk); + await _sut.InsertAsync(job1, CancellationToken.None); + await _sut.InsertAsync(job2, CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, 10, CancellationToken.None); + + // Assert β€” only head of partition returned + result.Should().HaveCount(1); + result[0].Id.Should().Be(job1.Id); + } + + [Fact] + public async Task GetDueJobsAsync_WhenPartitionHeadAndUnpartitionedJobExist_ShouldReturnBoth() + { + // Arrange + var pk = new PartitionKey("mixed-pk"); + var partitioned = AtomizerJob.Create(QueueKey.Default, typeof(string), "pp", _now, _now, partitionKey: pk); + var unpartitioned = AtomizerJob.Create(QueueKey.Default, typeof(string), "up", _now, _now); + await _sut.InsertAsync(partitioned, CancellationToken.None); + await _sut.InsertAsync(unpartitioned, CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, 10, CancellationToken.None); + + // Assert β€” both returned + result.Should().HaveCount(2); + result.Should().Contain(j => j.Id == partitioned.Id); + result.Should().Contain(j => j.Id == unpartitioned.Id); + } + + [Fact] + public async Task GetDueJobsAsync_WhenPartitionJobIsProcessing_ShouldReturnEmpty() + { + // Arrange + var pk = new PartitionKey("blocked-pk"); + var job1 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p1", _now, _now, partitionKey: pk); + await _sut.InsertAsync(job1, CancellationToken.None); + var leaseToken = new LeaseToken("inst:*:default:*:lease1"); + job1.Lease(leaseToken, _now, TimeSpan.FromMinutes(10)); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, 10, CancellationToken.None); + + // Assert β€” partition blocked while job is Processing + result.Should().BeEmpty(); + } + + [Fact] + public async Task GetDueJobsAsync_WhenPartitionJobIsPendingWithAttempts_ShouldReturnEmpty() + { + // Arrange + var pk = new PartitionKey("retry-pk"); + var job1 = AtomizerJob.Create(QueueKey.Default, typeof(string), "p1", _now, _now, partitionKey: pk); + await _sut.InsertAsync(job1, CancellationToken.None); + // Simulate retry state: Lease β†’ Attempt β†’ Reschedule (Pending with Attempts = 1) + var leaseToken = new LeaseToken("inst:*:default:*:lease2"); + job1.Lease(leaseToken, _now, TimeSpan.FromMinutes(10)); + job1.Attempt(); + job1.Reschedule(_now, _now); + await _sut.UpdateJobsAsync([job1], CancellationToken.None); + + // Act + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, 10, CancellationToken.None); + + // Assert β€” partition blocked while job is Pending with Attempts > 0 + result.Should().BeEmpty(); + } + + [Fact] + public async Task GetDueJobsAsync_WhenQueueABlockedPartitionSameKeyAsQueueB_ShouldReturnQueueBJobUnaffected() + { + // Arrange + var pk = new PartitionKey("cross-queue-pk"); + var queueB = new QueueKey("queue-b-test"); + var jobA = AtomizerJob.Create(QueueKey.Default, typeof(string), "pa", _now, _now, partitionKey: pk); + var jobB = AtomizerJob.Create(queueB, typeof(string), "pb", _now, _now, partitionKey: pk); + await _sut.InsertAsync(jobA, CancellationToken.None); + await _sut.InsertAsync(jobB, CancellationToken.None); + // Block partition in queue A + var leaseToken = new LeaseToken("inst:*:default:*:lease3"); + jobA.Lease(leaseToken, _now, TimeSpan.FromMinutes(10)); + await _sut.UpdateJobsAsync([jobA], CancellationToken.None); + + // Act β€” query queue B + var result = await _sut.GetDueJobsAsync(queueB, _now, 10, CancellationToken.None); + + // Assert β€” queue B is unaffected + result.Should().HaveCount(1); + result[0].Id.Should().Be(jobB.Id); + } + + [Fact] + public async Task GetDueJobsAsync_WhenProcessingJobHasExpiredVisibleAt_ShouldReturnIt() + { + // Arrange β€” Processing job with VisibleAt in the past (expired lease) + var job = AtomizerJob.Create(QueueKey.Default, typeof(string), "p", _now, _now); + await _sut.InsertAsync(job, CancellationToken.None); + var expiredNow = _now.AddMinutes(-10); + var leaseToken = new LeaseToken("inst:*:default:*:lease4"); + job.Lease(leaseToken, expiredNow, TimeSpan.FromMinutes(1)); // VisibleAt = expiredNow + 1min = _now - 9min + await _sut.UpdateJobsAsync([job], CancellationToken.None); + + // Act β€” query at _now (VisibleAt is in the past) + var result = await _sut.GetDueJobsAsync(QueueKey.Default, _now, 10, CancellationToken.None); + + // Assert β€” expired lease job is still returned (existing behavior must not regress) + result.Should().HaveCount(1); + result[0].Id.Should().Be(job.Id); + } } }