diff --git a/sdk/storage/azure-storage-blob/assets.json b/sdk/storage/azure-storage-blob/assets.json index 0c3832771777..f6d79f650b05 100644 --- a/sdk/storage/azure-storage-blob/assets.json +++ b/sdk/storage/azure-storage-blob/assets.json @@ -2,5 +2,5 @@ "AssetsRepo": "Azure/azure-sdk-assets", "AssetsRepoPrefixPath": "java", "TagPrefix": "java/storage/azure-storage-blob", - "Tag": "java/storage/azure-storage-blob_1f689f90f0" + "Tag": "java/storage/azure-storage-blob_f0eadf5927" } diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationAsyncUploadTests.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationAsyncUploadTests.java index 0d9b8e0e45e8..2527e707b29e 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationAsyncUploadTests.java +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationAsyncUploadTests.java @@ -5,6 +5,7 @@ import com.azure.core.http.HttpHeaders; import com.azure.core.util.BinaryData; +import com.azure.core.util.FluxUtil; import com.azure.storage.blob.models.BlobRequestConditions; import com.azure.storage.blob.models.PageRange; import com.azure.storage.blob.models.ParallelTransferOptions; @@ -25,13 +26,16 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; import reactor.core.publisher.Flux; import reactor.test.StepVerifier; import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; +import java.nio.channels.AsynchronousFileChannel; import java.nio.file.Files; +import java.nio.file.StandardOpenOption; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Collections; @@ -59,6 +63,13 @@ public class BlobContentValidationAsyncUploadTests extends BlobTestBase { private static final long LARGE_UPLOAD_BLOCK_SIZE_BYTES = 8L * Constants.MB; private static final int LARGE_UPLOAD_MAX_CONCURRENCY = 8; + /** + * {@link BlobTestBase#fuzzyParallelUploadLargeMultiPartCases()} starts at ~96 MiB; above this threshold the fuzzy + * parallel upload helpers stream from a temp source file and verify via {@code downloadToFile} + + * {@link BlobTestBase#compareFiles(File, File, long, long)} to avoid materializing the full payload twice in heap. + */ + private static final int FUZZY_PARALLEL_UPLOAD_FILE_ROUND_TRIP_THRESHOLD_BYTES = 96 * Constants.MB; + private static final String MD5_AND_CRC64_EXCLUSIVE_MESSAGE = "Only one form of transactional content validation may be used."; @@ -875,6 +886,104 @@ public void uploadChunkedRandomSizesRoundTripDataIntegrity() { + ")"); } + // ---------- Fuzzy parallel upload (async) ---------- + + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadPutBlobReplayableCases") + public void fuzzyParallelUploadPutBlobReplayableRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTripAsync("putBlobReplay", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // Staging-only cases: Put Block URLs include random IDs. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadSmallPayloadStagingCases") + public void fuzzyParallelUploadSmallPayloadRoundTripRequiresLiveStaging(int payloadBytes, long segmentBytes, + int maxConcurrency) throws IOException { + assertParallelUploadFuzzyRoundTripAsync("smallPayloadStaging", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // payload > segment for every tuple; always staging/Put Block. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadSub4MiBCases") + public void fuzzyParallelUploadSubFourMiBBlobRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTripAsync("subFourMiB", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // Staging-only cases. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadFourMiBBoundaryStagingCases") + public void fuzzyParallelUploadFourMiBBoundaryRoundTripRequiresLiveStaging(int payloadBytes, long segmentBytes, + int maxConcurrency) throws IOException { + assertParallelUploadFuzzyRoundTripAsync("fourMiBBoundaryStaging", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // Chunked uploads only. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadMediumMultiPartCases") + public void fuzzyParallelUploadMediumMultiPartRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTripAsync("mediumMultiPart", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // Large chunked uploads. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadLargeMultiPartCases") + public void fuzzyParallelUploadLargeMultiPartRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTripAsync("largeMultiPart", payloadBytes, segmentBytes, maxConcurrency); + } + + private void assertParallelUploadFuzzyRoundTripAsync(String caseKind, int payloadBytes, long segmentBytes, + int maxConcurrency) throws IOException { + BlobAsyncClient client = createBlobAsyncClientWithRequestSniffer(new CopyOnWriteArrayList<>()); + + ParallelTransferOptions parallelOptions = new ParallelTransferOptions().setBlockSizeLong(segmentBytes) + .setMaxSingleUploadSizeLong(segmentBytes) + .setMaxConcurrency(maxConcurrency); + + String assertionMessage = "Fuzzy parallel upload [" + caseKind + "] payloadBytes=" + payloadBytes + + ", segmentBytes=" + segmentBytes + ", maxConcurrency=" + maxConcurrency; + + if (payloadBytes >= FUZZY_PARALLEL_UPLOAD_FILE_ROUND_TRIP_THRESHOLD_BYTES) { + File sourceFile = getRandomFile(payloadBytes); + File outFile = Files.createTempFile("blob-cv-fuzzy-parallel-dl-async", ".bin").toFile(); + outFile.deleteOnExit(); + int readChunkSize = (int) Math.min(8L * Constants.MB, Math.max(64 * Constants.KB, segmentBytes)); + AsynchronousFileChannel channel + = AsynchronousFileChannel.open(sourceFile.toPath(), StandardOpenOption.READ); + try { + Flux data = FluxUtil.readFile(channel, readChunkSize, 0, payloadBytes); + BlobParallelUploadOptions options + = new BlobParallelUploadOptions(data).setParallelTransferOptions(parallelOptions) + .setRequestConditions(new BlobRequestConditions()) + .setContentValidationAlgorithm(ContentValidationAlgorithm.CRC64); + client.uploadWithResponse(options).block(); + } finally { + channel.close(); + } + client.downloadToFile(outFile.getPath(), true).block(); + assertTrue(compareFiles(sourceFile, outFile, 0, payloadBytes), assertionMessage); + if (!sourceFile.delete()) { + sourceFile.deleteOnExit(); + } + if (!outFile.delete()) { + outFile.deleteOnExit(); + } + } else { + byte[] randomData = getRandomByteArray(payloadBytes); + Flux data = Flux.just(ByteBuffer.wrap(randomData)); + BlobParallelUploadOptions options + = new BlobParallelUploadOptions(data).setParallelTransferOptions(parallelOptions) + .setRequestConditions(new BlobRequestConditions()) + .setContentValidationAlgorithm(ContentValidationAlgorithm.CRC64); + client.uploadWithResponse(options).block(); + byte[] downloaded = client.downloadContent().block().toBytes(); + assertArrayEquals(randomData, downloaded, assertionMessage); + } + } + @LiveOnly // This test is too large for the test proxy. @Test public void blockBlobSimpleUploadRandomSizeRoundTripDataIntegrity() { diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationUploadTests.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationUploadTests.java index 4820b771d4e6..66a6c29ac006 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationUploadTests.java +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobContentValidationUploadTests.java @@ -30,9 +30,11 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.MethodSource; import java.io.ByteArrayInputStream; import java.io.File; +import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.nio.ByteBuffer; @@ -66,6 +68,13 @@ public class BlobContentValidationUploadTests extends BlobTestBase { private static final long LARGE_UPLOAD_BLOCK_SIZE_BYTES = 8L * Constants.MB; private static final int LARGE_UPLOAD_MAX_CONCURRENCY = 8; + /** + * {@link BlobTestBase#fuzzyParallelUploadLargeMultiPartCases()} starts at ~96 MiB; above this threshold the fuzzy + * parallel upload helpers use temp files and streaming download/compare to avoid holding the full payload twice in + * heap (upload buffer + {@code downloadContent().toBytes()}). + */ + private static final int FUZZY_PARALLEL_UPLOAD_FILE_ROUND_TRIP_THRESHOLD_BYTES = 96 * Constants.MB; + private static final String MD5_AND_CRC64_EXCLUSIVE_MESSAGE = "Only one form of transactional content validation may be used."; @@ -1133,6 +1142,98 @@ public void uploadChunkedRandomSizesRoundTripDataIntegrity() { + ")"); } + // ---------- Fuzzy parallel upload (deterministic grids) ---------- + + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadPutBlobReplayableCases") + public void fuzzyParallelUploadPutBlobReplayableRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTrip("putBlobReplay", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // Staging-only cases: Put Block URLs include random IDs + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadSmallPayloadStagingCases") + public void fuzzyParallelUploadSmallPayloadRoundTripRequiresLiveStaging(int payloadBytes, long segmentBytes, + int maxConcurrency) throws IOException { + assertParallelUploadFuzzyRoundTrip("smallPayloadStaging", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // payload > segment for every tuple; always staging/Put Block. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadSub4MiBCases") + public void fuzzyParallelUploadSubFourMiBBlobRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTrip("subFourMiB", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // Staging-only cases. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadFourMiBBoundaryStagingCases") + public void fuzzyParallelUploadFourMiBBoundaryRoundTripRequiresLiveStaging(int payloadBytes, long segmentBytes, + int maxConcurrency) throws IOException { + assertParallelUploadFuzzyRoundTrip("fourMiBBoundaryStaging", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // payload > segment throughout; chunked upload. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadMediumMultiPartCases") + public void fuzzyParallelUploadMediumMultiPartRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTrip("mediumMultiPart", payloadBytes, segmentBytes, maxConcurrency); + } + + @LiveOnly // payload >> segment throughout; chunked upload / large payloads. + @ParameterizedTest + @MethodSource("com.azure.storage.blob.BlobTestBase#fuzzyParallelUploadLargeMultiPartCases") + public void fuzzyParallelUploadLargeMultiPartRoundTrip(int payloadBytes, long segmentBytes, int maxConcurrency) + throws IOException { + assertParallelUploadFuzzyRoundTrip("largeMultiPart", payloadBytes, segmentBytes, maxConcurrency); + } + + private void assertParallelUploadFuzzyRoundTrip(String caseKind, int payloadBytes, long segmentBytes, + int maxConcurrency) throws IOException { + BlobClient client = createBlobClientWithRequestSniffer(new CopyOnWriteArrayList<>()); + + ParallelTransferOptions parallelOptions = new ParallelTransferOptions().setBlockSizeLong(segmentBytes) + .setMaxSingleUploadSizeLong(segmentBytes) + .setMaxConcurrency(maxConcurrency); + + String assertionMessage = "Fuzzy parallel upload [" + caseKind + "] payloadBytes=" + payloadBytes + + ", segmentBytes=" + segmentBytes + ", maxConcurrency=" + maxConcurrency; + + if (payloadBytes >= FUZZY_PARALLEL_UPLOAD_FILE_ROUND_TRIP_THRESHOLD_BYTES) { + File sourceFile = getRandomFile(payloadBytes); + File outFile = Files.createTempFile("blob-cv-fuzzy-parallel-dl", ".bin").toFile(); + outFile.deleteOnExit(); + try (InputStream data = new FileInputStream(sourceFile)) { + BlobParallelUploadOptions options + = new BlobParallelUploadOptions(data).setParallelTransferOptions(parallelOptions) + .setRequestConditions(new BlobRequestConditions()) + .setContentValidationAlgorithm(ContentValidationAlgorithm.CRC64); + client.uploadWithResponse(options, null, Context.NONE); + } + client.downloadToFile(outFile.getPath(), true); + assertTrue(compareFiles(sourceFile, outFile, 0, payloadBytes), assertionMessage); + if (!sourceFile.delete()) { + sourceFile.deleteOnExit(); + } + if (!outFile.delete()) { + outFile.deleteOnExit(); + } + } else { + byte[] randomData = getRandomByteArray(payloadBytes); + InputStream data = new ByteArrayInputStream(randomData); + BlobParallelUploadOptions options + = new BlobParallelUploadOptions(data).setParallelTransferOptions(parallelOptions) + .setRequestConditions(new BlobRequestConditions()) + .setContentValidationAlgorithm(ContentValidationAlgorithm.CRC64); + client.uploadWithResponse(options, null, Context.NONE); + byte[] downloaded = client.downloadContent().toBytes(); + assertArrayEquals(randomData, downloaded, assertionMessage); + } + } + @LiveOnly // This test is too large for the test proxy. @Test public void blockBlobSimpleUploadRandomSizeRoundTripDataIntegrity() { diff --git a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobTestBase.java b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobTestBase.java index 514ff455fb90..1f5b30f0fff1 100644 --- a/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobTestBase.java +++ b/sdk/storage/azure-storage-blob/src/test/java/com/azure/storage/blob/BlobTestBase.java @@ -1518,4 +1518,129 @@ protected static long expectedStructuredMessageEncodedLengthChunked(int totalUne } return sum; } + + /** + * Every tuple keeps payloadBytes <= segmentBytes, so the parallel upload path issues a single Put Blob (no + * Put Block IDs), which replays under the test proxy unlike staging-heavy cases. + *

+ * Sizes are deliberately non-power-of-two (e.g. 7 * KB + 3) and use mixed segment ceilings (64 KiB + * through multi-MiB) to catch alignment and buffer edge cases; rows include the exact 4 MiB service boundary + * and several concurrency values (1–8) to exercise parallel request fan-out without live-only staging. + */ + protected static Stream fuzzyParallelUploadPutBlobReplayableCases() { + return Stream.of(Arguments.of(7 * Constants.KB + 3, 64L * Constants.KB, 1), + Arguments.of(7 * Constants.KB + 3, 128L * Constants.KB, 4), + Arguments.of(41 * Constants.KB + 17, 256L * Constants.KB, 1), + Arguments.of(41 * Constants.KB + 17, 256L * Constants.KB, 8), + Arguments.of(199 * Constants.KB + 5, 512L * Constants.KB, 2), + Arguments.of(512 * Constants.KB - 31, 1L * Constants.MB, 8), + Arguments.of(896 * Constants.KB + 101, 1L * Constants.MB, 6), + Arguments.of(4 * Constants.MB, 4L * Constants.MB, 1), + Arguments.of(4 * Constants.MB, 7L * Constants.MB + 919, 3)); + } + + /** + * payloadBytes > segmentBytes, so uploads still go through Put Block staging even though totals are only + * hundreds of KiB—too small for the proxy when block IDs vary per run. + *

+ * One row pairs a ~200 KiB payload with a 64 KiB segment and moderate concurrency; the other uses a + * ~512 KiB payload with a 1 KiB segment to force many tiny blocks (stress scheduling and per-block CRC64 + * framing) without the cost of the large multi-part grids. + */ + protected static Stream fuzzyParallelUploadSmallPayloadStagingCases() { + return Stream.of(Arguments.of(200 * Constants.KB, 64L * Constants.KB, 3), + Arguments.of(512 * Constants.KB - 31, 1L * Constants.KB, 1)); + } + + /** + * payloadBytes > segmentBytes and payloadBytes <= 4 * Constants.MB - 1 (the ceiling field),so the blob + * stays strictly under the 4 MiB transactional CRC64-header path while uploads remain + * chunked—live-only because of Put Block identity churn. + *

+ * Values mix MiB/KiB segment sizes with offsets (e.g. + 19, - 903) so part counts and last-block + * lengths are not powers of two; the last rows hug ceiling with awkward divisors in segmentBytes to + * stress remainder handling near the sub-4 MiB limit. + */ + protected static Stream fuzzyParallelUploadSub4MiBCases() { + final int ceiling = (4 * Constants.MB) - 1; + return Stream.of(Arguments.of(1 * Constants.MB + 1, 1L * Constants.MB, 1), + Arguments.of(1 * Constants.MB + 1, 2L * Constants.KB, 8), + Arguments.of((5 * Constants.MB) / 4 + 19, 256L * Constants.KB, 4), + Arguments.of(2 * Constants.MB - 903, 1L * Constants.MB, 2), + Arguments.of(2 * Constants.MB + 33, 1L * Constants.KB, 1), + Arguments.of(2 * Constants.MB + 33, 1L * Constants.MB, 8), + Arguments.of((11 * Constants.MB) / 4 - 17, 512L * Constants.KB, 6), + Arguments.of(3 * Constants.MB - 777, 2L * Constants.MB, 8), + Arguments.of(3 * Constants.MB - 1, 1L * Constants.MB, 1), Arguments.of(ceiling - 511, 1L * Constants.MB, 4), + Arguments.of(ceiling - 511, 1L * Constants.MB + 511, 2), + Arguments.of(ceiling, (long) (ceiling / 7 + 17), 3), Arguments.of(ceiling, (long) (ceiling / 2 + 1), 8)); + } + + /** + * Centers on 4 * Constants.MB - 1, exactly 4 * Constants.MB, and just above 4 MiB, with segment + * sizes spanning KiB through multi-MiB—exercising the SDK/service boundary where single-shot vs block staging and + * CRC64 header vs structured-message rules flip, while keeping deterministic Put Blob coverage in the replayable + * supplier above. + *

+ * Includes near-boundary payloads (e.g. -8192, +31, +8191 from 4 MiB) so neither total size nor last segment + * length aligns to typical buffer multiples. + */ + protected static Stream fuzzyParallelUploadFourMiBBoundaryStagingCases() { + final int minus = (4 * Constants.MB) - 1; + final int plus = (4 * Constants.MB) + 1; + return Stream.of(Arguments.of(minus, 1L * Constants.MB, 1), Arguments.of(minus, 512L * Constants.KB, 6), + Arguments.of(minus, 2L * Constants.MB, 8), Arguments.of((4 * Constants.MB) - 8192, 1L * Constants.KB, 4), + Arguments.of(4 * Constants.MB, (long) (4 * Constants.MB / 2), 8), + Arguments.of(4 * Constants.MB, 256L * Constants.KB, 2), Arguments.of(plus, 1L * Constants.MB, 1), + Arguments.of(plus, 2L * Constants.MB, 8), Arguments.of(plus, 1L * Constants.KB, 7), + Arguments.of((4 * Constants.MB) + 31, 511L * Constants.KB + 409, 4), + Arguments.of((4 * Constants.MB) + 8191, 3L * Constants.MB - 413, 6)); + } + + /** + * All rows keep payloadBytes > segmentBytes with totals roughly 6–80 MiB—large enough for meaningful parallel + * block fan-out and structured-message segments, but cheaper than {@link #fuzzyParallelUploadLargeMultiPartCases}. + *

+ * Block sizes step through common service limits (1–8 MiB, half-MiB tail values); concurrency 1–8 pairs with + * imbalanced payloads (e.g. 701, 333) to flush merge/retry edge cases. + */ + protected static Stream fuzzyParallelUploadMediumMultiPartCases() { + return Stream.of(Arguments.of(6 * Constants.MB + 701, Constants.MB, 1), + Arguments.of(6 * Constants.MB + 701, 3L * Constants.MB + 271, 4), + Arguments.of(9 * Constants.MB + 333, 2L * Constants.MB, 1), + Arguments.of(9 * Constants.MB + 333, 3L * Constants.MB + 199, 8), + Arguments.of(12 * Constants.MB + 901, 4L * Constants.MB + 901, 2), + Arguments.of(14 * Constants.MB, 500L * Constants.KB + 13, 6), + Arguments.of(18 * Constants.MB - 4021, 5L * Constants.MB - 701, 3), + Arguments.of(24 * Constants.MB, 8L * Constants.MB, 8), + Arguments.of(28 * Constants.MB + 56789, 7L * Constants.MB + 13, 2), + Arguments.of(31 * Constants.MB, 1024L * Constants.KB + 17, 4), + Arguments.of(40 * Constants.MB + 12345, 7L * Constants.MB + 13, 3), + Arguments.of(48 * Constants.MB - 777, 5L * Constants.MB + 809L * Constants.KB, 6), + Arguments.of(56 * Constants.MB + 19, 9L * Constants.MB + 4096, 8), + Arguments.of(72 * Constants.MB, 4L * Constants.MB + 65536, 8), + Arguments.of(80 * Constants.MB + 321, 13L * Constants.MB - 3073, 1)); + } + + /** + * Stresses high block counts and long-running parallel uploads (~96–320 MiB payloads) with service-realistic segment + * sizes (8–61 MiB class) and heavy concurrency. + *

+ * The final rows use named near-256/288/320 MiB totals with irregular byte tails to keep total bytes and + * block remainders off common multiples while still bounding runtime for Live-only CI. + */ + protected static Stream fuzzyParallelUploadLargeMultiPartCases() { + final int payload257MiBPlus = (int) (257L * Constants.MB + 18881); + final int payload288MiBPlus = (int) (288L * Constants.MB + 7777); + final int payload320MiBPlus = (int) (320L * Constants.MB + 1999); + return Stream.of(Arguments.of(96 * Constants.MB + 17, 8L * Constants.MB + 511, 2), + Arguments.of(112 * Constants.MB, 15L * Constants.MB + 4096, 8), + Arguments.of(128 * Constants.MB + 45673, 17L * Constants.MB - 11264 + 173, 4), + Arguments.of(160 * Constants.MB + 12345, 12L * Constants.MB + 8192, 8), + Arguments.of(192 * Constants.MB + 9876, 31L * Constants.MB - 513, 8), + Arguments.of(224 * Constants.MB, 23L * Constants.MB + 524288, 8), + Arguments.of(payload257MiBPlus, 61L * Constants.MB + 23L * Constants.KB, 6), + Arguments.of(payload288MiBPlus, 36L * Constants.MB + 513, 8), + Arguments.of(payload320MiBPlus, 16L * Constants.MB + 511, 8)); + } }