From 1dcb428309e90547964b7d37f3f4046f67187620 Mon Sep 17 00:00:00 2001 From: Anatoly Karlov Date: Tue, 24 Mar 2026 21:10:39 +0700 Subject: [PATCH] release FileStoragePresignedMultipartServlet --- README.md | 85 ++- minio-local-cluster/minio-cluster.yml | 51 -- minio-local-cluster/minio.yml | 29 - minio-local-cluster/nginx-minio-cluster.conf | 104 ---- minio-local-cluster/nginx-minio.conf | 98 --- pom.xml | 22 +- .../storage/configuration/S3ClientConfig.java | 53 -- .../configuration/S3SdkV2ClientConfig.java | 4 + .../properties/S3Properties.java | 22 - .../properties/S3SdkV2Properties.java | 3 + .../storage/handler/FileStorageHandler.java | 56 +- .../FileStoragePresignedMultipartHandler.java | 72 +++ .../service/S3FileMetadataService.java | 172 ++++++ .../S3LegacyMultipartStorageService.java | 128 ++++ .../S3PresignedMultipartStorageService.java | 350 +++++++++++ .../file/storage/service/S3Service.java | 348 ----------- .../storage/service/S3StorageService.java | 373 ++++++++++++ .../file/storage/service/S3V2Service.java | 574 ------------------ .../file/storage/service/StorageService.java | 28 - .../exception/ExtractMetadataException.java | 12 - .../exception/FileNotFoundException.java | 12 - .../service/exception/StorageException.java | 12 - .../exception/WaitingUploadException.java | 12 - .../FileStoragePresignedMultipartServlet.java | 30 + src/main/resources/application.yml | 1 + .../file/storage/CatchExceptionsTest.java | 48 -- .../vality/file/storage/FileStorageTest.java | 221 +++++-- 27 files changed, 1398 insertions(+), 1522 deletions(-) delete mode 100644 minio-local-cluster/minio-cluster.yml delete mode 100644 minio-local-cluster/minio.yml delete mode 100644 minio-local-cluster/nginx-minio-cluster.conf delete mode 100644 minio-local-cluster/nginx-minio.conf delete mode 100644 src/main/java/dev/vality/file/storage/configuration/S3ClientConfig.java delete mode 100644 src/main/java/dev/vality/file/storage/configuration/properties/S3Properties.java create mode 100644 src/main/java/dev/vality/file/storage/handler/FileStoragePresignedMultipartHandler.java create mode 100644 src/main/java/dev/vality/file/storage/service/S3FileMetadataService.java create mode 100644 src/main/java/dev/vality/file/storage/service/S3LegacyMultipartStorageService.java create mode 100644 src/main/java/dev/vality/file/storage/service/S3PresignedMultipartStorageService.java delete mode 100644 src/main/java/dev/vality/file/storage/service/S3Service.java create mode 100644 src/main/java/dev/vality/file/storage/service/S3StorageService.java delete mode 100644 src/main/java/dev/vality/file/storage/service/S3V2Service.java delete mode 100644 src/main/java/dev/vality/file/storage/service/StorageService.java delete mode 100644 src/main/java/dev/vality/file/storage/service/exception/ExtractMetadataException.java delete mode 100644 src/main/java/dev/vality/file/storage/service/exception/FileNotFoundException.java delete mode 100644 src/main/java/dev/vality/file/storage/service/exception/StorageException.java delete mode 100644 src/main/java/dev/vality/file/storage/service/exception/WaitingUploadException.java create mode 100644 src/main/java/dev/vality/file/storage/servlet/FileStoragePresignedMultipartServlet.java delete mode 100644 src/test/java/dev/vality/file/storage/CatchExceptionsTest.java diff --git a/README.md b/README.md index db6babc9..0c931faf 100644 --- a/README.md +++ b/README.md @@ -1,11 +1,52 @@ # file-storage -Сервис, обращающийся напрямую к s3 через AWS JAVA SDK. Используется для генерации pre-signed URL that can be used to -access an Amazon S3 resource without requiring the user of the URL to know the account's AWS security credentials. +`file-storage` — сервис для хранения файлов поверх S3-совместимого object storage. +Сервис выступает как orchestration/control-plane над бакетом: -## Параметры запуска +- создаёт идентификаторы файлов; +- сохраняет и читает metadata; +- выдаёт pre-signed URL для upload/download; +- поддерживает multipart upload. -Для работы с 2 версией `AWS SDK S3 V2` +Сервис ориентирован на S3-compatible backends, в тестах используются MinIO и Ceph. + +## Что умеет + +- Single upload через pre-signed `PUT URL` +- Download через pre-signed `GET URL` +- Получение `FileData` по `fileDataId` +- Multipart upload в двух режимах: + - legacy data-plane flow через thrift + - direct-to-S3 presigned multipart flow со статусом загрузки + +## API + +Сервис поднимает два thrift endpoint: + +- `/file_storage/v2` + Основной совместимый API для single upload, download, metadata и legacy multipart flow. +- `/file_storage/presigned-multipart/v1` + Отдельный API для presigned multipart upload, где части загружаются напрямую в S3-compatible storage. + +Точные контракты находятся в артефакте `file-storage-proto`. + +Для presigned multipart flow публичный lifecycle выглядит так: + +- `CreateMultipartUpload` — открыть multipart-сессию и получить её состояние +- `GetMultipartUpload` — получить текущее состояние multipart-сессии +- `PresignMultipartUploadPart` — получить URL и обязательные headers для загрузки части +- `CompleteMultipartUpload` — завершить multipart upload по списку `etag + sequence_part` +- `AbortMultipartUpload` — прервать multipart upload + +Новый multipart API возвращает статус загрузки: + +- `pending_upload` +- `uploaded` +- `aborted` + +## Конфигурация + +Основные параметры задаются через `application.yml`: ```yaml s3-sdk-v2: @@ -14,29 +55,31 @@ s3-sdk-v2: region: 'RU' access-key: 'minio' secret-key: 'minio123' + multipart-url-ttl: '1h' ``` -## Minio +Что означают параметры: -Если сервисом используется 2 версия `AWS SDK S3 V2`, и в качестве s3 кластера используется `minio`, то для поддержки -версионирования объектов __кластер должен использовать минимум несколько драйверов при старте__ для включения -механизма `Erasure Code` +- `endpoint` — URL S3-compatible storage +- `bucket-name` — бакет, в котором сервис хранит файлы и metadata +- `region` — S3 region +- `access-key` / `secret-key` — credentials для доступа к storage +- `multipart-url-ttl` — TTL для presigned URL на multipart parts -Для включения механизма `Erasure Code` запуск сервера `minio` с использованием нескольких драйверов может выглядеть -следующим образом +Сервис сам создаёт бакет и включает versioning, если это поддерживается backend-ом. -```shell -minio server /data{1...12} -``` +## Metadata Model + +Для каждого `fileDataId` сервис хранит: -Цитата из официальной документации -> **Versioning feature is only available in erasure coded and distributed erasure coded setups.** +- metadata объекта; +- сам файл как отдельную object version. -Источники +Из этого строятся: -- [versioning-guide](https://docs.min.io/docs/minio-bucket-versioning-guide.html) -- [erasure-code-quickstart-guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide) +- `getFileData` +- `generateDownloadUrl` +- multipart completion flow -В репозитории в папке [minio-local-cluster](./minio-local-cluster/) содержатся примеры `docker-compose` манифестов -(спизж**ных из официальной репы https://github.com/minio/minio/tree/master/docs/orchestration/docker-compose) -для локального запуска сервера `minio` с включенным механизмом `Erasure Code` +Иными словами, клиентский контракт завязан на `fileDataId`, а конкретные object versions остаются внутренней деталью +реализации. diff --git a/minio-local-cluster/minio-cluster.yml b/minio-local-cluster/minio-cluster.yml deleted file mode 100644 index 53a95a14..00000000 --- a/minio-local-cluster/minio-cluster.yml +++ /dev/null @@ -1,51 +0,0 @@ -version: '3.7' - -# Settings and configurations that are common for all containers -x-minio-common: &minio-common - image: quay.io/minio/minio:RELEASE.2021-10-13T00-23-17Z - command: server --console-address ":9001" http://minio{1...4}/data{1...2} - expose: - - "9000" - - "9001" - environment: - MINIO_ROOT_USER: minio - MINIO_ROOT_PASSWORD: minio123 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] - interval: 30s - timeout: 20s - retries: 3 - -# starts 4 docker containers running minio server instances. -# using nginx reverse proxy, load balancing, you can access -# it through port 9000. -services: - minio1: - <<: *minio-common - hostname: minio1 - - minio2: - <<: *minio-common - hostname: minio2 - - minio3: - <<: *minio-common - hostname: minio3 - - minio4: - <<: *minio-common - hostname: minio4 - - nginx: - image: nginx:1.19.2-alpine - hostname: nginx - volumes: - - ./nginx-minio-cluster.conf:/etc/nginx/nginx.conf:ro - ports: - - "9000:9000" - - "9001:9001" - depends_on: - - minio1 - - minio2 - - minio3 - - minio4 diff --git a/minio-local-cluster/minio.yml b/minio-local-cluster/minio.yml deleted file mode 100644 index b6bb0c44..00000000 --- a/minio-local-cluster/minio.yml +++ /dev/null @@ -1,29 +0,0 @@ -version: '3.7' - -services: - minio: - image: quay.io/minio/minio:RELEASE.2021-10-13T00-23-17Z - command: server --console-address ":9001" /data{1...12} - hostname: minio - expose: - - "9000" - - "9001" - environment: - MINIO_ROOT_USER: minio - MINIO_ROOT_PASSWORD: minio123 - healthcheck: - test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"] - interval: 30s - timeout: 20s - retries: 3 - - nginx: - image: nginx:1.19.2-alpine - hostname: nginx - volumes: - - ./nginx-minio.conf:/etc/nginx/nginx.conf:ro - ports: - - "9000:9000" - - "9001:9001" - depends_on: - - minio diff --git a/minio-local-cluster/nginx-minio-cluster.conf b/minio-local-cluster/nginx-minio-cluster.conf deleted file mode 100644 index abc57e3f..00000000 --- a/minio-local-cluster/nginx-minio-cluster.conf +++ /dev/null @@ -1,104 +0,0 @@ -user nginx; -worker_processes auto; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 4096; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - sendfile on; - keepalive_timeout 65; - - # include /etc/nginx/conf.d/*.conf; - - upstream minio { - server minio1:9000; - server minio2:9000; - server minio3:9000; - server minio4:9000; - } - - upstream console { - ip_hash; - server minio1:9001; - server minio2:9001; - server minio3:9001; - server minio4:9001; - } - - server { - listen 9000; - listen [::]:9000; - server_name localhost; - - # To allow special characters in headers - ignore_invalid_headers off; - # Allow any size file to be uploaded. - # Set to a value such as 1000m; to restrict file size to a specific value - client_max_body_size 0; - # To disable buffering - proxy_buffering off; - - location / { - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_connect_timeout 300; - # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 - proxy_http_version 1.1; - proxy_set_header Connection ""; - chunked_transfer_encoding off; - - proxy_pass http://minio; - } - } - - server { - listen 9001; - listen [::]:9001; - server_name localhost; - - # To allow special characters in headers - ignore_invalid_headers off; - # Allow any size file to be uploaded. - # Set to a value such as 1000m; to restrict file size to a specific value - client_max_body_size 0; - # To disable buffering - proxy_buffering off; - - location / { - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-NginX-Proxy true; - - # This is necessary to pass the correct IP to be hashed - real_ip_header X-Real-IP; - - proxy_connect_timeout 300; - - # To support websocket - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - - chunked_transfer_encoding off; - - proxy_pass http://console; - } - } -} diff --git a/minio-local-cluster/nginx-minio.conf b/minio-local-cluster/nginx-minio.conf deleted file mode 100644 index 591f942f..00000000 --- a/minio-local-cluster/nginx-minio.conf +++ /dev/null @@ -1,98 +0,0 @@ -user nginx; -worker_processes auto; - -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; - -events { - worker_connections 4096; -} - -http { - include /etc/nginx/mime.types; - default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /var/log/nginx/access.log main; - sendfile on; - keepalive_timeout 65; - - # include /etc/nginx/conf.d/*.conf; - - upstream minio { - server minio:9000; - } - - upstream console { - ip_hash; - server minio:9001; - } - - server { - listen 9000; - listen [::]:9000; - server_name localhost; - - # To allow special characters in headers - ignore_invalid_headers off; - # Allow any size file to be uploaded. - # Set to a value such as 1000m; to restrict file size to a specific value - client_max_body_size 0; - # To disable buffering - proxy_buffering off; - - location / { - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - - proxy_connect_timeout 300; - # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 - proxy_http_version 1.1; - proxy_set_header Connection ""; - chunked_transfer_encoding off; - - proxy_pass http://minio; - } - } - - server { - listen 9001; - listen [::]:9001; - server_name localhost; - - # To allow special characters in headers - ignore_invalid_headers off; - # Allow any size file to be uploaded. - # Set to a value such as 1000m; to restrict file size to a specific value - client_max_body_size 0; - # To disable buffering - proxy_buffering off; - - location / { - proxy_set_header Host $http_host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header X-NginX-Proxy true; - - # This is necessary to pass the correct IP to be hashed - real_ip_header X-Real-IP; - - proxy_connect_timeout 300; - - # To support websocket - proxy_http_version 1.1; - proxy_set_header Upgrade $http_upgrade; - proxy_set_header Connection "upgrade"; - - chunked_transfer_encoding off; - - proxy_pass http://console; - } - } -} diff --git a/pom.xml b/pom.xml index e545a712..2b56807c 100644 --- a/pom.xml +++ b/pom.xml @@ -6,7 +6,7 @@ dev.vality service-parent-pom - 3.1.6 + 3.1.9 file-storage @@ -27,10 +27,18 @@ + + dev.vality.woody + woody-thrift + + + dev.vality.woody + woody-api + dev.vality file-storage-proto - 1.49-f01d2d9 + SNAPSHOT dev.vality @@ -75,15 +83,19 @@ + + org.projectlombok + lombok + com.amazonaws aws-java-sdk-s3 - 1.12.451 + 1.12.797 software.amazon.awssdk s3 - 2.20.48 + 2.42.19 jakarta.servlet @@ -99,7 +111,7 @@ dev.vality testcontainers-annotations - 3.3.2 + 4.1.3 test diff --git a/src/main/java/dev/vality/file/storage/configuration/S3ClientConfig.java b/src/main/java/dev/vality/file/storage/configuration/S3ClientConfig.java deleted file mode 100644 index 1f88bbc7..00000000 --- a/src/main/java/dev/vality/file/storage/configuration/S3ClientConfig.java +++ /dev/null @@ -1,53 +0,0 @@ -package dev.vality.file.storage.configuration; - -import com.amazonaws.ClientConfiguration; -import com.amazonaws.auth.AWSCredentialsProviderChain; -import com.amazonaws.auth.AWSStaticCredentialsProvider; -import com.amazonaws.auth.BasicAWSCredentials; -import com.amazonaws.auth.EnvironmentVariableCredentialsProvider; -import com.amazonaws.client.builder.AwsClientBuilder; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import com.amazonaws.services.s3.transfer.TransferManager; -import com.amazonaws.services.s3.transfer.TransferManagerBuilder; -import dev.vality.file.storage.configuration.properties.S3Properties; -import lombok.RequiredArgsConstructor; -import org.springframework.context.annotation.Bean; - -//@Configuration -@RequiredArgsConstructor -//@EnableConfigurationProperties(S3Properties.class) -public class S3ClientConfig { - - private final S3Properties s3Properties; - - @Bean - public TransferManager transferManager(AmazonS3 s3Client) { - return TransferManagerBuilder.standard() - .withS3Client(s3Client) - .build(); - } - - @Bean - public AmazonS3 s3Client() { - return AmazonS3ClientBuilder.standard() - .withCredentials( - new AWSCredentialsProviderChain( - new EnvironmentVariableCredentialsProvider(), - new AWSStaticCredentialsProvider( - new BasicAWSCredentials( - s3Properties.getAccessKey(), - s3Properties.getSecretKey())))) - .withPathStyleAccessEnabled(true) - .withEndpointConfiguration( - new AwsClientBuilder.EndpointConfiguration( - s3Properties.getEndpoint(), - s3Properties.getSigningRegion())) - .withClientConfiguration( - new ClientConfiguration() - .withProtocol(s3Properties.getClientProtocol()) - .withSignerOverride(s3Properties.getSignerOverride()) - .withMaxErrorRetry(s3Properties.getClientMaxErrorRetry())) - .build(); - } -} diff --git a/src/main/java/dev/vality/file/storage/configuration/S3SdkV2ClientConfig.java b/src/main/java/dev/vality/file/storage/configuration/S3SdkV2ClientConfig.java index 4a50e76b..4cf65db5 100644 --- a/src/main/java/dev/vality/file/storage/configuration/S3SdkV2ClientConfig.java +++ b/src/main/java/dev/vality/file/storage/configuration/S3SdkV2ClientConfig.java @@ -7,6 +7,8 @@ import org.springframework.context.annotation.Configuration; import software.amazon.awssdk.auth.credentials.AwsBasicCredentials; import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider; +import software.amazon.awssdk.core.checksums.RequestChecksumCalculation; +import software.amazon.awssdk.core.checksums.ResponseChecksumValidation; import software.amazon.awssdk.regions.Region; import software.amazon.awssdk.services.s3.S3Client; import software.amazon.awssdk.services.s3.S3Configuration; @@ -51,6 +53,8 @@ public S3Client s3SdkV2Client() { .serviceConfiguration(S3Configuration.builder() .pathStyleAccessEnabled(true) .build()) + .requestChecksumCalculation(RequestChecksumCalculation.WHEN_REQUIRED) + .responseChecksumValidation(ResponseChecksumValidation.WHEN_REQUIRED) .build(); } } diff --git a/src/main/java/dev/vality/file/storage/configuration/properties/S3Properties.java b/src/main/java/dev/vality/file/storage/configuration/properties/S3Properties.java deleted file mode 100644 index 75b73755..00000000 --- a/src/main/java/dev/vality/file/storage/configuration/properties/S3Properties.java +++ /dev/null @@ -1,22 +0,0 @@ -package dev.vality.file.storage.configuration.properties; - -import com.amazonaws.Protocol; -import lombok.Getter; -import lombok.Setter; - -@Getter -@Setter -//@Component -//@ConfigurationProperties("s3") -public class S3Properties { - - private String endpoint; - private String bucketName; - private String signingRegion; - private Protocol clientProtocol; - private Integer clientMaxErrorRetry; - private String signerOverride; - private String accessKey; - private String secretKey; - -} diff --git a/src/main/java/dev/vality/file/storage/configuration/properties/S3SdkV2Properties.java b/src/main/java/dev/vality/file/storage/configuration/properties/S3SdkV2Properties.java index 8b994ea7..7a23b68c 100644 --- a/src/main/java/dev/vality/file/storage/configuration/properties/S3SdkV2Properties.java +++ b/src/main/java/dev/vality/file/storage/configuration/properties/S3SdkV2Properties.java @@ -5,6 +5,8 @@ import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; +import java.time.Duration; + @Getter @Setter @Component @@ -16,5 +18,6 @@ public class S3SdkV2Properties { private String region; private String accessKey; private String secretKey; + private Duration multipartUrlTtl = Duration.ofHours(1); } diff --git a/src/main/java/dev/vality/file/storage/handler/FileStorageHandler.java b/src/main/java/dev/vality/file/storage/handler/FileStorageHandler.java index 4d197d3b..e45a2475 100644 --- a/src/main/java/dev/vality/file/storage/handler/FileStorageHandler.java +++ b/src/main/java/dev/vality/file/storage/handler/FileStorageHandler.java @@ -1,8 +1,8 @@ package dev.vality.file.storage.handler; import dev.vality.file.storage.*; -import dev.vality.file.storage.service.StorageService; -import dev.vality.file.storage.service.exception.FileNotFoundException; +import dev.vality.file.storage.service.S3LegacyMultipartStorageService; +import dev.vality.file.storage.service.S3StorageService; import dev.vality.file.storage.util.CheckerUtil; import dev.vality.geck.common.util.TypeUtil; import dev.vality.msgpack.Value; @@ -14,13 +14,15 @@ import java.net.URL; import java.time.Instant; import java.util.Map; +import java.util.NoSuchElementException; @Service @Slf4j @RequiredArgsConstructor public class FileStorageHandler implements FileStorageSrv.Iface { - private final StorageService storageService; + private final S3StorageService storageService; + private final S3LegacyMultipartStorageService legacyMultipartStorageService; @Override public NewFileResult createNewFile(Map metadata, String expiresAt) throws TException { @@ -36,7 +38,7 @@ public String generateDownloadUrl(String fileDataId, String expiresAt) throws TE Instant instant = TypeUtil.stringToInstant(expiresAt); URL url = storageService.generateDownloadUrl(fileDataId, instant); return url.toString(); - } catch (FileNotFoundException e) { + } catch (NoSuchElementException e) { throw fileNotFound(e); } } @@ -46,23 +48,7 @@ public FileData getFileData(String fileDataId) throws TException { try { CheckerUtil.checkString(fileDataId, "Bad request parameter, fileDataId required and not empty arg"); return storageService.getFileData(fileDataId); - } catch (FileNotFoundException e) { - throw fileNotFound(e); - } - } - - /** - * @deprecated This method will be remove in next version. - */ - @Override - @Deprecated(forRemoval = true) - public FileData getMultipartFileData(String fileDataId) throws TException { - try { - log.info("Receive request for get multipart file data with fileDataId={}", fileDataId); - FileData multipartFileData = storageService.getMultipartFileData(fileDataId); - log.info("Successfully get multipart file data with fileDataId={}", fileDataId); - return multipartFileData; - } catch (FileNotFoundException e) { + } catch (NoSuchElementException e) { throw fileNotFound(e); } } @@ -70,7 +56,7 @@ public FileData getMultipartFileData(String fileDataId) throws TException { @Override public CreateMultipartUploadResult createMultipartUpload(Map metadata) { log.info("Receive request for create multipart upload with metadata={}", metadata); - CreateMultipartUploadResult result = storageService.createMultipartUpload(metadata); + CreateMultipartUploadResult result = legacyMultipartStorageService.createMultipartUpload(metadata); log.info("Successfully create multipart upload, fileId={}, uploadId={}", result.getFileDataId(), result.getMultipartUploadId()); return result; @@ -80,7 +66,7 @@ public CreateMultipartUploadResult createMultipartUpload(Map meta public UploadMultipartResult uploadMultipart(UploadMultipartRequestData request) { log.debug("Receive request for upload file part, fileId={}, uploadId={}, sequencePart={}", request.getFileDataId(), request.getMultipartUploadId(), request.getSequencePart()); - UploadMultipartResult result = storageService.uploadMultipart(request); + UploadMultipartResult result = legacyMultipartStorageService.uploadMultipart(request); log.debug("Successfully upload file part, fileId={}, uploadId={}, partId={}", request.getFileDataId(), request.getMultipartUploadId(), result.getPartId()); return result; @@ -90,33 +76,13 @@ public UploadMultipartResult uploadMultipart(UploadMultipartRequestData request) public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) { log.info("Receive request for complete multipart upload, fileId={}, uploadId={}", request.getFileDataId(), request.getMultipartUploadId()); - CompleteMultipartUploadResult result = storageService.completeMultipartUpload(request); + CompleteMultipartUploadResult result = legacyMultipartStorageService.completeMultipartUpload(request); log.info("Successfully complete multipart upload, fileId={}, url={}", request.getFileDataId(), result.getUploadUrl()); return result; } - /** - * @deprecated This method will be remove in next version. - */ - @Override - @Deprecated(forRemoval = true) - public String generateMultipartDownloadUrl(String fileDataId, String expiresAt) throws TException { - try { - log.info("Receive request for generate download url with fileDataId={}", fileDataId); - CheckerUtil.checkString(fileDataId, "Bad request parameter, fileDataId required and not empty arg"); - CheckerUtil.checkString(expiresAt, "Bad request parameter, expiresAt required and not empty arg"); - Instant instant = TypeUtil.stringToInstant(expiresAt); - URL url = storageService.generateMultipartDownloadUrl(fileDataId, instant); - log.info("Successfully generate download url with fileDataId={}", fileDataId); - log.debug("Generated download url={}", url); - return url.toString(); - } catch (FileNotFoundException e) { - throw fileNotFound(e); - } - } - - private FileNotFound fileNotFound(FileNotFoundException e) { + private FileNotFound fileNotFound(NoSuchElementException e) { log.warn("File not found", e); return new FileNotFound(); } diff --git a/src/main/java/dev/vality/file/storage/handler/FileStoragePresignedMultipartHandler.java b/src/main/java/dev/vality/file/storage/handler/FileStoragePresignedMultipartHandler.java new file mode 100644 index 00000000..7f317e31 --- /dev/null +++ b/src/main/java/dev/vality/file/storage/handler/FileStoragePresignedMultipartHandler.java @@ -0,0 +1,72 @@ +package dev.vality.file.storage.handler; + +import dev.vality.file.storage.*; +import dev.vality.file.storage.service.S3PresignedMultipartStorageService; +import dev.vality.msgpack.Value; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; + +import java.util.Map; +import java.util.NoSuchElementException; + +@Service +@Slf4j +@RequiredArgsConstructor +public class FileStoragePresignedMultipartHandler implements FileStoragePresignedMultipartSrv.Iface { + + private final S3PresignedMultipartStorageService presignedMultipartStorageService; + + @Override + public PresignedMultipartUpload createMultipartUpload(Map metadata) { + log.info("Receive request for create presigned multipart upload with metadata={}", metadata); + PresignedMultipartUpload result = presignedMultipartStorageService.createMultipartUpload(metadata); + log.info("Successfully create presigned multipart upload, fileId={}, uploadId={}", + result.getFileDataId(), result.getMultipartUploadId()); + return result; + } + + @Override + public PresignedMultipartUpload getMultipartUpload(String fileDataId) throws FileNotFound { + log.debug("Receive request for get presigned multipart upload, fileId={}", fileDataId); + try { + PresignedMultipartUpload result = presignedMultipartStorageService.getMultipartUpload(fileDataId); + log.debug("Successfully got presigned multipart upload, fileId={}, uploadId={}, uploadStatus={}", + result.getFileDataId(), result.getMultipartUploadId(), result.getUploadStatus()); + return result; + } catch (NoSuchElementException ex) { + throw new FileNotFound(); + } + } + + @Override + public PresignMultipartUploadPartResult presignMultipartUploadPart(PresignMultipartUploadPartRequest request) { + log.debug("Receive request for presign file part upload, fileId={}, uploadId={}, sequencePart={}", + request.getFileDataId(), request.getMultipartUploadId(), request.getSequencePart()); + PresignMultipartUploadPartResult result = presignedMultipartStorageService.presignMultipartUploadPart(request); + log.debug("Successfully presigned file part upload, fileId={}, uploadId={}, sequencePart={}", + request.getFileDataId(), request.getMultipartUploadId(), result.getSequencePart()); + return result; + } + + @Override + public CompletePresignedMultipartUploadResult completeMultipartUpload( + CompletePresignedMultipartUploadRequest request) { + log.info("Receive request for complete presigned multipart upload, fileId={}, uploadId={}", + request.getFileDataId(), request.getMultipartUploadId()); + CompletePresignedMultipartUploadResult result = + presignedMultipartStorageService.completeMultipartUpload(request); + log.info("Successfully complete presigned multipart upload, fileId={}, fileUrl={}, uploadStatus={}", + request.getFileDataId(), result.getFileUrl(), result.getUploadStatus()); + return result; + } + + @Override + public void abortMultipartUpload(AbortMultipartUploadRequest request) { + log.info("Receive request for abort presigned multipart upload, fileId={}, uploadId={}", + request.getFileDataId(), request.getMultipartUploadId()); + presignedMultipartStorageService.abortMultipartUpload(request); + log.info("Successfully aborted presigned multipart upload, fileId={}, uploadId={}", + request.getFileDataId(), request.getMultipartUploadId()); + } +} diff --git a/src/main/java/dev/vality/file/storage/service/S3FileMetadataService.java b/src/main/java/dev/vality/file/storage/service/S3FileMetadataService.java new file mode 100644 index 00000000..d6a51a4f --- /dev/null +++ b/src/main/java/dev/vality/file/storage/service/S3FileMetadataService.java @@ -0,0 +1,172 @@ +package dev.vality.file.storage.service; + +import dev.vality.file.storage.FileUploadStatus; +import dev.vality.file.storage.configuration.properties.S3SdkV2Properties; +import dev.vality.file.storage.util.DamselUtil; +import dev.vality.msgpack.Value; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import java.time.Instant; +import java.util.*; +import java.util.stream.Collectors; + +@Service +@Slf4j +@RequiredArgsConstructor +public class S3FileMetadataService { + + private static final String FILE_ID = "x-vality-file-id"; + private static final String CREATED_AT = "x-vality-created-at"; + private static final String METADATA = "x-vality-metadata-"; + private static final String MULTIPART_UPLOAD_ID = "x-vality-multipart-upload-id"; + private static final String UPLOAD_STATUS = "x-vality-upload-status"; + + private final S3SdkV2Properties s3SdkV2Properties; + private final S3Client s3SdkV2Client; + + public void uploadFileMetadata(Map metadata, String fileId) { + putObjectMetadata(fileId, buildS3Metadata(metadata, fileId, null, null, Instant.now().toString())); + } + + public StoredFileMetadata uploadMultipartFileMetadata(Map metadata, + String fileId, + String multipartUploadId, + FileUploadStatus uploadStatus) { + String createdAt = Instant.now().toString(); + putObjectMetadata( + fileId, + buildS3Metadata(metadata, fileId, multipartUploadId, uploadStatus, createdAt)); + return new StoredFileMetadata(fileId, createdAt, metadata, multipartUploadId, uploadStatus); + } + + public StoredFileMetadata updateMultipartFileMetadata(String fileId, + String multipartUploadId, + FileUploadStatus uploadStatus) { + StoredFileMetadata existing = getLatestFileMetadata(fileId) + .orElseThrow(() -> new NoSuchElementException( + String.format("File metadata not found, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()))); + String actualMultipartUploadId = multipartUploadId != null ? multipartUploadId : existing.multipartUploadId(); + putObjectMetadata( + fileId, + buildS3Metadata( + existing.metadata(), + fileId, + actualMultipartUploadId, + uploadStatus, + existing.createdAt())); + return new StoredFileMetadata( + fileId, + existing.createdAt(), + existing.metadata(), + actualMultipartUploadId, + uploadStatus); + } + + public Optional getLatestFileMetadata(String fileId) { + try { + List versions = s3SdkV2Client.listObjectVersions(ListObjectVersionsRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .prefix(fileId) + .build()) + .versions(); + return versions.stream() + .sorted(Comparator.comparing(ObjectVersion::lastModified).reversed()) + .map(ObjectVersion::versionId) + .map(versionId -> loadObjectMetadata(fileId, versionId)) + .flatMap(Optional::stream) + .findFirst(); + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format("Failed to get latest file metadata, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()), + ex); + } + } + + private void putObjectMetadata(String fileId, Map s3Metadata) { + try { + var request = PutObjectRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .metadata(s3Metadata) + .build(); + var putObjectResponse = s3SdkV2Client.putObject(request, RequestBody.empty()); + var response = putObjectResponse.sdkHttpResponse(); + log.info("Check upload object version with file metadata result {}:{}", + response.statusCode(), response.statusText()); + if (response.isSuccessful()) { + log.info("Object version with file metadata was uploaded, fileId={}, bucketName={}", + fileId, s3SdkV2Properties.getBucketName()); + } else { + throw new IllegalStateException(String.format( + "Failed to upload object version with file metadata, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName())); + } + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format("Failed to upload object version with file metadata, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()), + ex); + } + } + + private HashMap buildS3Metadata(Map metadata, + String fileId, + String multipartUploadId, + FileUploadStatus uploadStatus, + String createdAt) { + var s3Metadata = new HashMap(); + s3Metadata.put(FILE_ID, fileId); + s3Metadata.put(CREATED_AT, createdAt); + if (multipartUploadId != null) { + s3Metadata.put(MULTIPART_UPLOAD_ID, multipartUploadId); + } + if (uploadStatus != null) { + s3Metadata.put(UPLOAD_STATUS, Integer.toString(uploadStatus.getValue())); + } + metadata.forEach((key, value) -> s3Metadata.put(METADATA + key, DamselUtil.toJsonString(value))); + return s3Metadata; + } + + private Optional loadObjectMetadata(String fileId, String versionId) { + GetObjectResponse objectResponse = s3SdkV2Client.getObject( + GetObjectRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .versionId(versionId) + .build(), + (response, inputStream) -> response); + if (!objectResponse.hasMetadata() || !objectResponse.metadata().containsKey(CREATED_AT)) { + return Optional.empty(); + } + Map s3Metadata = objectResponse.metadata(); + Map metadata = s3Metadata.entrySet().stream() + .filter(entry -> entry.getKey().startsWith(METADATA) && entry.getValue() != null) + .collect(Collectors.toMap( + entry -> entry.getKey().substring(METADATA.length()), + entry -> DamselUtil.fromJson(entry.getValue(), Value.class))); + FileUploadStatus uploadStatus = Optional.ofNullable(s3Metadata.get(UPLOAD_STATUS)) + .map(Integer::parseInt) + .map(FileUploadStatus::findByValue) + .orElse(null); + return Optional.of(new StoredFileMetadata( + fileId, + s3Metadata.get(CREATED_AT), + metadata, + s3Metadata.get(MULTIPART_UPLOAD_ID), + uploadStatus)); + } + + public record StoredFileMetadata(String fileId, + String createdAt, + Map metadata, + String multipartUploadId, + FileUploadStatus uploadStatus) { + } +} diff --git a/src/main/java/dev/vality/file/storage/service/S3LegacyMultipartStorageService.java b/src/main/java/dev/vality/file/storage/service/S3LegacyMultipartStorageService.java new file mode 100644 index 00000000..e063e1a0 --- /dev/null +++ b/src/main/java/dev/vality/file/storage/service/S3LegacyMultipartStorageService.java @@ -0,0 +1,128 @@ +package dev.vality.file.storage.service; + +import dev.vality.file.storage.*; +import dev.vality.file.storage.CompleteMultipartUploadRequest; +import dev.vality.file.storage.configuration.properties.S3SdkV2Properties; +import dev.vality.msgpack.Value; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.core.sync.RequestBody; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; + +import java.util.List; +import java.util.Map; + +@Service +@Slf4j +@RequiredArgsConstructor +public class S3LegacyMultipartStorageService { + + private final S3SdkV2Properties s3SdkV2Properties; + private final S3Client s3SdkV2Client; + private final S3PresignedMultipartStorageService presignedMultipartStorageService; + + public CreateMultipartUploadResult createMultipartUpload(Map metadata) { + var multipartUpload = presignedMultipartStorageService.createMultipartUpload(metadata); + return new CreateMultipartUploadResult() + .setFileDataId(multipartUpload.getFileDataId()) + .setMultipartUploadId(multipartUpload.getMultipartUploadId()); + } + + public UploadMultipartResult uploadMultipart(UploadMultipartRequestData requestData) { + String fileId = requestData.getFileDataId(); + String multipartUploadId = requestData.getMultipartUploadId(); + try { + var uploadPartRequest = UploadPartRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .uploadId(multipartUploadId) + .partNumber(requestData.getSequencePart()) + .contentLength((long) requestData.getContentLength()) + .build(); + RequestBody requestBody = RequestBody.fromBytes(requestData.getContent()); + UploadPartResponse uploadPartResponse = s3SdkV2Client.uploadPart(uploadPartRequest, requestBody); + var response = uploadPartResponse.sdkHttpResponse(); + log.info("Check file part upload result {}:{}", + response.statusCode(), response.statusText()); + if (response.isSuccessful()) { + log.info("File part was uploaded, fileId={}, bucketName={}, uploadId={}, partId={}", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId, uploadPartResponse.eTag()); + } else { + throw new IllegalStateException(String.format( + "Failed to upload file part, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId)); + } + return new UploadMultipartResult() + .setPartId(uploadPartResponse.eTag()) + .setSequencePart(requestData.getSequencePart()); + } catch (SdkException ex) { + throw new IllegalStateException( + String.format("Failed to upload file part, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId), + ex); + } + } + + public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) { + String fileId = request.getFileDataId(); + String multipartUploadId = request.getMultipartUploadId(); + try { + var completeRequest = buildRequest(request, fileId, multipartUploadId); + CompleteMultipartUploadResponse completeResponse = s3SdkV2Client.completeMultipartUpload(completeRequest); + var response = completeResponse.sdkHttpResponse(); + log.info("Check complete multipart upload result {}:{}", + response.statusCode(), response.statusText()); + if (response.isSuccessful()) { + log.info("Multipart upload was completed, fileId={}, bucketName={}, uploadId={}", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId); + } else { + throw new IllegalStateException(String.format( + "Failed to complete multipart upload, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId)); + } + String objectUrl = s3SdkV2Client.utilities().getUrl(GetUrlRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .build()) + .toExternalForm(); + log.info("Create url for multipart uploaded file, url={}, fileId={}, bucketName={}, uploadId={}", + objectUrl, fileId, s3SdkV2Properties.getBucketName(), multipartUploadId); + return new CompleteMultipartUploadResult() + .setUploadUrl(objectUrl); + } catch (SdkException ex) { + throw new IllegalStateException( + String.format("Failed to complete multipart upload, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId), + ex); + } + } + + private software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest buildRequest( + CompleteMultipartUploadRequest request, + String fileId, + String multipartUploadId) { + List completedParts = request.getCompletedParts().stream() + .map(this::buildCompletedPart) + .toList(); + CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder() + .parts(completedParts) + .build(); + return software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .uploadId(multipartUploadId) + .multipartUpload(completedUpload) + .build(); + } + + private CompletedPart buildCompletedPart(CompletedMultipart completedMultipart) { + return CompletedPart.builder() + .eTag(completedMultipart.getPartId()) + .partNumber(completedMultipart.getSequencePart()) + .build(); + } + +} diff --git a/src/main/java/dev/vality/file/storage/service/S3PresignedMultipartStorageService.java b/src/main/java/dev/vality/file/storage/service/S3PresignedMultipartStorageService.java new file mode 100644 index 00000000..1a537e47 --- /dev/null +++ b/src/main/java/dev/vality/file/storage/service/S3PresignedMultipartStorageService.java @@ -0,0 +1,350 @@ +package dev.vality.file.storage.service; + +import dev.vality.file.storage.*; +import dev.vality.file.storage.AbortMultipartUploadRequest; +import dev.vality.file.storage.configuration.properties.S3SdkV2Properties; +import dev.vality.msgpack.Value; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import software.amazon.awssdk.core.exception.SdkException; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.UploadPartPresignRequest; + +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.*; + +@Service +@Slf4j +@RequiredArgsConstructor +public class S3PresignedMultipartStorageService { + + private static final String FILENAME_METADATA = "filename"; + private static final String CONTENT_MD5_HEADER = "Content-MD5"; + private static final String CHECKSUM_SHA256_HEADER = "x-amz-checksum-sha256"; + + private final S3SdkV2Properties s3SdkV2Properties; + private final S3Client s3SdkV2Client; + private final S3Presigner s3Presigner; + private final S3FileMetadataService s3FileMetadataService; + + public PresignedMultipartUpload createMultipartUpload(Map metadata) { + String fileName = extractFileName(metadata); + String fileId = UUID.randomUUID().toString(); + CreateMultipartUploadResponse createResponse = createS3MultipartUpload(fileId, fileName); + String multipartUploadId = createResponse.uploadId(); + try { + S3FileMetadataService.StoredFileMetadata storedMetadata = s3FileMetadataService.uploadMultipartFileMetadata( + metadata, + fileId, + multipartUploadId, + FileUploadStatus.pending_upload); + return buildMultipartUpload(storedMetadata); + } catch (RuntimeException ex) { + abortQuietly(fileId, multipartUploadId); + throw ex; + } + } + + public PresignedMultipartUpload getMultipartUpload(String fileId) { + S3FileMetadataService.StoredFileMetadata storedMetadata = s3FileMetadataService.getLatestFileMetadata(fileId) + .orElseThrow(() -> new NoSuchElementException( + String.format("Multipart upload not found, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()))); + return buildMultipartUpload(storedMetadata); + } + + public PresignMultipartUploadPartResult presignMultipartUploadPart(PresignMultipartUploadPartRequest request) { + String fileId = request.getFileDataId(); + String multipartUploadId = request.getMultipartUploadId(); + S3FileMetadataService.StoredFileMetadata storedMetadata = + requirePendingMultipartUpload(fileId, multipartUploadId); + Instant expiresAt = Instant.now().plus(s3SdkV2Properties.getMultipartUrlTtl()); + try { + UploadPartRequest uploadPartRequest = buildUploadPartRequest(request, fileId, multipartUploadId); + var presignRequest = UploadPartPresignRequest.builder() + .signatureDuration(s3SdkV2Properties.getMultipartUrlTtl()) + .uploadPartRequest(uploadPartRequest) + .build(); + var presignedRequest = s3Presigner.presignUploadPart(presignRequest); + log.info("Multipart upload part url was presigned, fileId={}, bucketName={}, uploadId={}, sequencePart={}", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId, request.getSequencePart()); + log.debug("Presigned multipart http request={}", presignedRequest.httpRequest()); + return new PresignMultipartUploadPartResult() + .setSequencePart(request.getSequencePart()) + .setUploadUrl(presignedRequest.url().toString()) + .setExpiresAt(expiresAt.toString()) + .setRequiredHeaders(buildRequiredHeaders(request)); + } catch (SdkException ex) { + throw new IllegalStateException( + String.format("Failed to presign multipart upload part, fileId=%s, bucketName=%s, uploadId=%s, " + + "sequencePart=%s, uploadStatus=%s", + fileId, + s3SdkV2Properties.getBucketName(), + multipartUploadId, + request.getSequencePart(), + storedMetadata.uploadStatus()), + ex); + } + } + + public CompletePresignedMultipartUploadResult completeMultipartUpload( + CompletePresignedMultipartUploadRequest request) { + String fileId = request.getFileDataId(); + String multipartUploadId = request.getMultipartUploadId(); + requirePendingMultipartUpload(fileId, multipartUploadId); + validateCompletedParts(request.getCompletedParts(), fileId, multipartUploadId); + try { + var completeRequest = buildRequest(request, fileId, multipartUploadId); + var completeResponse = s3SdkV2Client.completeMultipartUpload(completeRequest); + var response = completeResponse.sdkHttpResponse(); + log.info("Check complete presigned multipart upload result {}:{}", + response.statusCode(), response.statusText()); + if (!response.isSuccessful()) { + throw new IllegalStateException(String.format( + "Failed to complete presigned multipart upload, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId)); + } + S3FileMetadataService.StoredFileMetadata storedMetadata = s3FileMetadataService.updateMultipartFileMetadata( + fileId, + multipartUploadId, + FileUploadStatus.uploaded); + log.info("Presigned multipart upload was completed, fileId={}, bucketName={}, uploadId={}", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId); + return new CompletePresignedMultipartUploadResult() + .setFileDataId(fileId) + .setMultipartUploadId(multipartUploadId) + .setFileUrl(buildObjectUrl(fileId)) + .setUploadStatus(storedMetadata.uploadStatus()); + } catch (SdkException ex) { + throw new IllegalStateException( + String.format( + "Failed to complete presigned multipart upload, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId), + ex); + } + } + + public void abortMultipartUpload(AbortMultipartUploadRequest request) { + String fileId = request.getFileDataId(); + String multipartUploadId = request.getMultipartUploadId(); + requirePendingMultipartUpload(fileId, multipartUploadId); + try { + var abortRequest = software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .uploadId(multipartUploadId) + .build(); + AbortMultipartUploadResponse abortResponse = s3SdkV2Client.abortMultipartUpload(abortRequest); + var response = abortResponse.sdkHttpResponse(); + log.info("Check abort presigned multipart upload result {}:{}", + response.statusCode(), response.statusText()); + if (!response.isSuccessful()) { + throw new IllegalStateException(String.format( + "Failed to abort presigned multipart upload, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId)); + } + s3FileMetadataService.updateMultipartFileMetadata(fileId, multipartUploadId, FileUploadStatus.aborted); + } catch (SdkException ex) { + throw new IllegalStateException( + String.format("Failed to abort presigned multipart upload, fileId=%s, bucketName=%s, uploadId=%s", + fileId, s3SdkV2Properties.getBucketName(), multipartUploadId), + ex); + } + } + + private CreateMultipartUploadResponse createS3MultipartUpload(String fileId, String fileName) { + try { + var createRequest = CreateMultipartUploadRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .contentDisposition("attachment;filename=" + URLEncoder.encode(fileName, StandardCharsets.UTF_8)) + .build(); + CreateMultipartUploadResponse createResponse = s3SdkV2Client.createMultipartUpload(createRequest); + var response = createResponse.sdkHttpResponse(); + log.info("Check create presigned multipart upload result {}:{}", + response.statusCode(), response.statusText()); + if (!response.isSuccessful()) { + throw new IllegalStateException(String.format( + "Failed to create presigned multipart upload, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName())); + } + log.info("Presigned multipart upload was created, fileId={}, bucketName={}, uploadId={}", + fileId, s3SdkV2Properties.getBucketName(), createResponse.uploadId()); + return createResponse; + } catch (SdkException ex) { + throw new IllegalStateException( + String.format("Failed to create presigned multipart upload, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()), + ex); + } + } + + private UploadPartRequest buildUploadPartRequest(PresignMultipartUploadPartRequest request, + String fileId, + String multipartUploadId) { + var builder = UploadPartRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .uploadId(multipartUploadId) + .partNumber(request.getSequencePart()); + if (request.isSetContentLength()) { + builder.contentLength(request.getContentLength()); + } + if (request.isSetContentMd5()) { + builder.contentMD5(request.getContentMd5()); + } + if (request.isSetChecksumSha256()) { + builder.checksumSHA256(request.getChecksumSha256()); + } + return builder.build(); + } + + private Map buildRequiredHeaders(PresignMultipartUploadPartRequest request) { + Map headers = new LinkedHashMap<>(); + if (request.isSetContentMd5()) { + headers.put(CONTENT_MD5_HEADER, request.getContentMd5()); + } + if (request.isSetChecksumSha256()) { + headers.put(CHECKSUM_SHA256_HEADER, request.getChecksumSha256()); + } + return headers; + } + + private software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest buildRequest( + CompletePresignedMultipartUploadRequest request, + String fileId, + String multipartUploadId) { + List completedParts = request.getCompletedParts().stream() + .sorted(Comparator.comparingInt(CompletedPresignedMultipart::getSequencePart)) + .map(this::buildCompletedPart) + .toList(); + CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder() + .parts(completedParts) + .build(); + return software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .uploadId(multipartUploadId) + .multipartUpload(completedUpload) + .build(); + } + + private CompletedPart buildCompletedPart(CompletedPresignedMultipart completedMultipart) { + return CompletedPart.builder() + .eTag(completedMultipart.getEtag()) + .partNumber(completedMultipart.getSequencePart()) + .build(); + } + + private void validateCompletedParts(List completedParts, + String fileId, + String multipartUploadId) { + if (completedParts == null || completedParts.isEmpty()) { + throw new IllegalArgumentException(String.format( + "Completed parts must not be empty, fileId=%s, uploadId=%s", + fileId, multipartUploadId)); + } + var distinctPartNumbers = completedParts.stream() + .map(CompletedPresignedMultipart::getSequencePart) + .distinct() + .count(); + if (distinctPartNumbers != completedParts.size()) { + throw new IllegalArgumentException(String.format( + "Completed parts must contain unique sequencePart values, fileId=%s, uploadId=%s", + fileId, multipartUploadId)); + } + completedParts.forEach(part -> { + if (part.getSequencePart() <= 0) { + throw new IllegalArgumentException(String.format( + "Completed part sequencePart must be positive, fileId=%s, uploadId=%s", + fileId, multipartUploadId)); + } + if (part.getEtag() == null || part.getEtag().isBlank()) { + throw new IllegalArgumentException(String.format( + "Completed part etag must not be blank, fileId=%s, uploadId=%s, sequencePart=%s", + fileId, multipartUploadId, part.getSequencePart())); + } + }); + } + + private S3FileMetadataService.StoredFileMetadata requirePendingMultipartUpload(String fileId, + String multipartUploadId) { + S3FileMetadataService.StoredFileMetadata storedMetadata = s3FileMetadataService.getLatestFileMetadata(fileId) + .orElseThrow(() -> new NoSuchElementException( + String.format("Multipart upload not found, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()))); + if (!multipartUploadId.equals(storedMetadata.multipartUploadId())) { + throw new IllegalStateException(String.format( + "Multipart upload id does not match stored state, fileId=%s, bucketName=%s, actualUploadId=%s, " + + "expectedUploadId=%s", + fileId, + s3SdkV2Properties.getBucketName(), + multipartUploadId, + storedMetadata.multipartUploadId())); + } + if (storedMetadata.uploadStatus() != FileUploadStatus.pending_upload) { + throw new IllegalStateException(String.format( + "Multipart upload is not pending, fileId=%s, bucketName=%s, uploadId=%s, uploadStatus=%s", + fileId, + s3SdkV2Properties.getBucketName(), + multipartUploadId, + storedMetadata.uploadStatus())); + } + return storedMetadata; + } + + private PresignedMultipartUpload buildMultipartUpload(S3FileMetadataService.StoredFileMetadata storedMetadata) { + PresignedMultipartUpload multipartUpload = new PresignedMultipartUpload() + .setFileDataId(storedMetadata.fileId()) + .setMultipartUploadId(storedMetadata.multipartUploadId()) + .setCreatedAt(storedMetadata.createdAt()) + .setMetadata(storedMetadata.metadata()) + .setUploadStatus(storedMetadata.uploadStatus()); + Value fileName = storedMetadata.metadata().get(FILENAME_METADATA); + if (fileName != null && fileName.isSetStr()) { + multipartUpload.setFileName(fileName.getStr()); + } + if (storedMetadata.uploadStatus() == FileUploadStatus.uploaded) { + multipartUpload.setFileUrl(buildObjectUrl(storedMetadata.fileId())); + } + return multipartUpload; + } + + private String buildObjectUrl(String fileId) { + String objectUrl = s3SdkV2Client.utilities().getUrl(GetUrlRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .build()) + .toExternalForm(); + log.info("Create object url for multipart uploaded file, url={}, fileId={}, bucketName={}", + objectUrl, fileId, s3SdkV2Properties.getBucketName()); + return objectUrl; + } + + private String extractFileName(Map metadata) { + Value fileName = metadata.get(FILENAME_METADATA); + if (fileName == null || !fileName.isSetStr() || fileName.getStr().isBlank()) { + throw new IllegalArgumentException("Can't create multipart upload object without fileName"); + } + return fileName.getStr(); + } + + private void abortQuietly(String fileId, String multipartUploadId) { + try { + s3SdkV2Client.abortMultipartUpload(software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest + .builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .uploadId(multipartUploadId) + .build()); + } catch (Exception ex) { + log.warn("Failed to abort leaked multipart upload after metadata write failure, fileId={}, uploadId={}", + fileId, multipartUploadId, ex); + } + } +} diff --git a/src/main/java/dev/vality/file/storage/service/S3Service.java b/src/main/java/dev/vality/file/storage/service/S3Service.java deleted file mode 100644 index d81375e8..00000000 --- a/src/main/java/dev/vality/file/storage/service/S3Service.java +++ /dev/null @@ -1,348 +0,0 @@ -package dev.vality.file.storage.service; - -import com.amazonaws.HttpMethod; -import com.amazonaws.SdkBaseException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.model.*; -import com.amazonaws.services.s3.transfer.TransferManager; -import com.amazonaws.services.s3.transfer.Upload; -import dev.vality.file.storage.*; -import dev.vality.file.storage.CompleteMultipartUploadRequest; -import dev.vality.file.storage.CompleteMultipartUploadResult; -import dev.vality.file.storage.configuration.properties.S3Properties; -import dev.vality.file.storage.service.exception.ExtractMetadataException; -import dev.vality.file.storage.service.exception.FileNotFoundException; -import dev.vality.file.storage.service.exception.StorageException; -import dev.vality.file.storage.service.exception.WaitingUploadException; -import dev.vality.file.storage.util.DamselUtil; -import dev.vality.msgpack.Value; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; -import lombok.extern.slf4j.Slf4j; - -import jakarta.annotation.PostConstruct; -import jakarta.annotation.PreDestroy; -import java.io.ByteArrayInputStream; -import java.io.IOException; -import java.io.InputStream; -import java.net.URL; -import java.time.Instant; -import java.util.*; -import java.util.stream.Collectors; - -import static java.lang.String.format; - -//@Service -@Slf4j -@RequiredArgsConstructor -public class S3Service implements StorageService { - - private static final String FILE_DATA_ID = "x-rbkmoney-file-data-id"; - private static final String FILE_ID = "x-rbkmoney-file-id"; - private static final String CREATED_AT = "x-rbkmoney-created-at"; - private static final String METADATA = "x-rbkmoney-metadata-"; - private static final String FILENAME_PARAM = "filename="; - public static final String METHOD_NOT_SUPPORTED = "Method not supported"; - - private final TransferManager transferManager; - private final AmazonS3 s3Client; - private final S3Properties s3Properties; - private String bucketName; - - @PostConstruct - public void init() { - this.bucketName = s3Properties.getBucketName(); - bucketInit(); - } - - @Override - public NewFileResult createNewFile(Map metadata, Instant expirationTime) { - String fileDataId = id(); - String fileId = id(); - - log.info("Trying to create NewFileResult, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - - FileDto fileDto = fileDto(fileDataId, fileId, metadata); - - // записывается неизменяемый фейковый файл с метаданными, в котором находится ссылка на реальный файл - uploadEmptyFileWithMetadata(fileDataId, fileDto); - - // генерируется ссылка на выгрузку файла в хранилище напрямую в цеф по ключу fileId - log.info("Generate Upload Url, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - URL uploadUrl = generatePresignedUrl(fileDataId, fileId, expirationTime, HttpMethod.PUT); - - log.info("NewFileResult has been successfully created, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - - return new NewFileResult(fileDataId, uploadUrl.toString()); - } - - @Override - public URL generateDownloadUrl(String fileDataId, Instant expirationTime) { - log.info("Trying to generate Download Url, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - - // достается неизменяемый фейковый файл с метаданными - log.info("Extract file, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - FileDto fileDto = getFileDto(fileDataId); - - // генерируем ссылку на загрузку файла из хранилища напрямую в цеф по ключу fileId - log.info("Generate Download Url, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - URL generatePresignedUrl = generatePresignedUrl( - fileDto.getFileDataId(), - fileDto.getFileId(), - expirationTime, - HttpMethod.GET); - - log.info("Download Url has been successfully generate, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - - return generatePresignedUrl; - } - - @Override - public FileData getFileData(String fileDataId) { - log.info("Trying to get FileData, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - - // достается неизменяемый фейковый файл с метаданными - log.info("Extract file, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - FileDto fileDto = getFileDto(fileDataId); - - // достается реальный файл формата s3 - String fileName = getFileName(fileDataId, fileDto); - - log.info("FileData has been successfully got, fileDataId='{}', bucketId='{}'", fileDataId, bucketName); - - return new FileData(fileDto.getFileDataId(), fileName, fileDto.getCreatedAt(), fileDto.getMetadata()); - } - - @Override - public FileData getMultipartFileData(String fileDataId) { - throw new UnsupportedOperationException(METHOD_NOT_SUPPORTED); - } - - @Override - public CreateMultipartUploadResult createMultipartUpload(Map metadata) { - throw new UnsupportedOperationException(METHOD_NOT_SUPPORTED); - } - - @Override - public UploadMultipartResult uploadMultipart(UploadMultipartRequestData requestData) { - throw new UnsupportedOperationException(METHOD_NOT_SUPPORTED); - } - - @Override - public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) { - throw new UnsupportedOperationException(METHOD_NOT_SUPPORTED); - } - - @Override - public URL generateMultipartDownloadUrl(String fileDataId, Instant expirationTime) { - throw new UnsupportedOperationException(METHOD_NOT_SUPPORTED); - } - - @PreDestroy - public void terminate() { - transferManager.shutdownNow(true); - } - - private void bucketInit() { - try { - if (!s3Client.doesBucketExistV2(bucketName)) { - s3Client.createBucket(bucketName); - } - } catch (SdkBaseException ex) { - throw new StorageException( - format("Failed to create bucket, bucketName=%s", bucketName), - ex - ); - } - } - - private void uploadEmptyFileWithMetadata(String fileDataId, FileDto fileDto) { - try { - PutObjectRequest putObjectRequest = putObjectRequest(fileDataId, fileDto, byteArrayInputStream()); - - Upload upload = transferManager.upload(putObjectRequest); - - upload.waitForUploadResult(); - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new WaitingUploadException( - format( - "Thread is interrupted while waiting for the file upload to complete, " + - "fileDataId=%s, bucketId=%s", - fileDataId, bucketName - ) - ); - } catch (SdkBaseException ex) { - throw new StorageException( - format("Failed to upload file, fileDataId=%s, bucketId=%s", fileDataId, bucketName), - ex - ); - } - } - - private URL generatePresignedUrl(String fileDataId, String fileId, Instant expirationTime, HttpMethod httpMethod) { - try { - GeneratePresignedUrlRequest request = generatePresignedUrlRequest(fileId, expirationTime, httpMethod); - - URL url = s3Client.generatePresignedUrl(request); - - checkNotNull("PresignedUrl", fileDataId, url); - - return url; - } catch (SdkBaseException ex) { - throw new StorageException( - format("Failed to generate PresignedUrl, fileDataId=%s, bucketId=%s", fileDataId, bucketName), - ex - ); - } - } - - private FileDto getFileDto(String fileDataId) { - S3Object s3Object = getS3Object(fileDataId, fileDataId); - - checkRealFileStatus(fileDataId, s3Object); - - return getFileDto(fileDataId, s3Object.getObjectMetadata()); - } - - private FileDto getFileDto(String fileDataId, ObjectMetadata objectMetadata) { - String id = getUserMetadataParameter(fileDataId, objectMetadata, FILE_DATA_ID); - String fileId = getFileIdFromObjectMetadata(fileDataId, objectMetadata); - String createdAt = getUserMetadataParameter(fileDataId, objectMetadata, CREATED_AT); - Map metadata = objectMetadata.getUserMetadata().entrySet().stream() - .filter(entry -> entry.getKey().startsWith(METADATA) && entry.getValue() != null) - .collect( - Collectors.toMap( - o -> o.getKey().substring(METADATA.length()), - o -> DamselUtil.fromJson(o.getValue(), Value.class) - ) - ); - return new FileDto(id, fileId, createdAt, metadata); - } - - private String getFileName(String fileDataId, FileDto fileDto) { - S3Object s3Object = getS3Object(fileDataId, fileDto.getFileId()); - - return extractFileName(s3Object); - } - - private S3Object getS3Object(String fileDataId, String id) { - try (S3Object object = s3Client.getObject(new GetObjectRequest(bucketName, id))) { - - checkNotNull("S3Object", fileDataId, object); - - return object; - } catch (SdkBaseException ex) { - throw new StorageException( - format("Failed to get S3Object, fileDataId=%s, bucketId=%s", fileDataId, bucketName), - ex - ); - } catch (IOException ex) { - throw new StorageException( - format("Unable to close S3Object, fileDataId=%s, bucketId=%s", fileDataId, bucketName), - ex); - } - } - - private void checkRealFileStatus(String fileDataId, S3Object s3Object) { - try { - ObjectMetadata objectMetadata = s3Object.getObjectMetadata(); - - String fileId = getFileIdFromObjectMetadata(fileDataId, objectMetadata); - if (s3Client.doesObjectExist(bucketName, fileId)) { - return; - } - } catch (SdkBaseException ex) { - throw new StorageException( - format("Failed to check on exist the file, fileDataId=%s, bucketId=%s", fileDataId, bucketName), - ex - ); - } - - // если файл не соотвествует условиям, блокируем доступ к нему - throw new FileNotFoundException(format("S3Object is null, fileDataId=%s, bucketId=%s", fileDataId, bucketName)); - } - - private String getFileIdFromObjectMetadata(String fileDataId, ObjectMetadata objectMetadata) { - return getUserMetadataParameter(fileDataId, objectMetadata, FILE_ID); - } - - private String getUserMetadataParameter(String fileDataId, ObjectMetadata objectMetadata, String key) { - return Optional.ofNullable(objectMetadata.getUserMetaDataOf(key)) - .orElseThrow( - () -> new ExtractMetadataException( - format( - "Failed to extract metadata parameter, fileDataId=%s, bucketId=%s, key=%s", - fileDataId, bucketName, key - ) - ) - ); - } - - private String extractFileName(S3Object s3Object) { - String contentDisposition = s3Object.getObjectMetadata().getContentDisposition(); - int fileNameIndex = contentDisposition.lastIndexOf(FILENAME_PARAM) + FILENAME_PARAM.length(); - return contentDisposition.substring(fileNameIndex); - } - - private void checkNotNull(String objectType, String fileDataId, Object object) { - if (Objects.isNull(object)) { - throw new FileNotFoundException( - format("%s is null, fileDataId=%s, bucketId=%s", objectType, fileDataId, bucketName)); - } - } - - private String id() { - return UUID.randomUUID().toString(); - } - - private ByteArrayInputStream byteArrayInputStream() { - return new ByteArrayInputStream(new byte[0]); - } - - private FileDto fileDto(String fileDataId, String fileId, Map metadata) { - return new FileDto(fileDataId, fileId, Instant.now().toString(), metadata); - } - - private PutObjectRequest putObjectRequest(String fileDataId, FileDto fileDto, InputStream inputStream) { - return new PutObjectRequest( - bucketName, - fileDataId, - inputStream, - objectMetadata(fileDto) - ); - } - - private ObjectMetadata objectMetadata(FileDto fileDto) { - ObjectMetadata objectMetadata = new ObjectMetadata(); - objectMetadata.addUserMetadata(FILE_DATA_ID, fileDto.getFileDataId()); - objectMetadata.addUserMetadata(FILE_ID, fileDto.getFileId()); - objectMetadata.addUserMetadata(CREATED_AT, fileDto.getCreatedAt()); - fileDto.getMetadata().forEach( - (key, value) -> objectMetadata.addUserMetadata(METADATA + key, DamselUtil.toJsonString(value)) - ); - return objectMetadata; - } - - private GeneratePresignedUrlRequest generatePresignedUrlRequest( - String fileId, - Instant expirationTime, - HttpMethod httpMethod) { - return new GeneratePresignedUrlRequest(bucketName, fileId) - .withMethod(httpMethod) - .withExpiration(Date.from(expirationTime)); - } - - @RequiredArgsConstructor - @Getter - @ToString - private class FileDto { - - private final String fileDataId; - private final String fileId; - private final String createdAt; - private final Map metadata; - - } -} diff --git a/src/main/java/dev/vality/file/storage/service/S3StorageService.java b/src/main/java/dev/vality/file/storage/service/S3StorageService.java new file mode 100644 index 00000000..ae8073e1 --- /dev/null +++ b/src/main/java/dev/vality/file/storage/service/S3StorageService.java @@ -0,0 +1,373 @@ +package dev.vality.file.storage.service; + +import dev.vality.file.storage.FileData; +import dev.vality.file.storage.NewFileResult; +import dev.vality.file.storage.configuration.properties.S3SdkV2Properties; +import dev.vality.file.storage.util.DamselUtil; +import dev.vality.msgpack.Value; +import jakarta.annotation.PostConstruct; +import lombok.RequiredArgsConstructor; +import lombok.extern.slf4j.Slf4j; +import org.springframework.stereotype.Service; +import software.amazon.awssdk.http.SdkHttpResponse; +import software.amazon.awssdk.services.s3.S3Client; +import software.amazon.awssdk.services.s3.model.*; +import software.amazon.awssdk.services.s3.presigner.S3Presigner; +import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; +import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; +import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest; + +import java.net.URL; +import java.time.Duration; +import java.time.Instant; +import java.util.*; +import java.util.function.Predicate; +import java.util.stream.Collectors; + +@Service +@Slf4j +@RequiredArgsConstructor +public class S3StorageService { + + private static final String CREATED_AT = "x-vality-created-at"; + private static final String METADATA = "x-vality-metadata-"; + private static final String FILENAME_PARAM = "filename="; + private static final Duration MIN_PRESIGN_DURATION = Duration.ofSeconds(1); + + private final S3SdkV2Properties s3SdkV2Properties; + private final S3Client s3SdkV2Client; + private final S3Presigner s3Presigner; + private final S3FileMetadataService s3FileMetadataService; + + @PostConstruct + public void init() { + if (!doesBucketExist()) { + createBucket(); + enableBucketVersioning(); + } + } + + public NewFileResult createNewFile(Map metadata, Instant expirationTime) { + var fileId = UUID.randomUUID().toString(); + s3FileMetadataService.uploadFileMetadata(metadata, fileId); + var url = presignUploadUrl(expirationTime, fileId); + return new NewFileResult(fileId, url.toString()); + } + + public URL generateDownloadUrl(String fileId, Instant expirationTime) { + var versions = getObjectVersions(fileId); + var responseCache = new HashMap(); + var fileVersionId = getFileVersionId(fileId, versions, responseCache); + PresignedGetObjectRequest presignedRequest = getPresignedRequest(fileId, expirationTime, fileVersionId); + return presignedRequest.url(); + } + + public FileData getFileData(String fileId) { + var versions = getObjectVersions(fileId); + var responseCache = new HashMap(); + var fileMetadataVersionId = getFileMetadataVersionId(fileId, versions, responseCache); + var metadataObjectResponse = getObject(fileId, fileMetadataVersionId, responseCache); + var fileMetadata = getFileMetadata(fileId, fileMetadataVersionId, metadataObjectResponse); + var fileVersionId = getFileVersionId(fileId, versions, responseCache); + var fileObjectResponse = getObject(fileId, fileVersionId, responseCache); + String fileName = getFileName(fileId, fileVersionId, fileObjectResponse); + return new FileData( + fileMetadata.fileId(), + fileName, + fileMetadata.createdAt(), + fileMetadata.metadata()); + } + + // единственный доступный вариант проверки существования бакета на данный момент через catch + // в репе сдк висит таска https://github.com/aws/aws-sdk-java-v2/issues/392#issuecomment-880224831 + // в первой версии сдк тоже через catch проверка на существование + // разница только в том, что проверка идет через метод S3Client#getBucketAcl + // во второй версии тоже есть этот метод, не уверен в чем разница с выбранным вариантом, + // но везде советуют его + private boolean doesBucketExist() { + try { + var request = HeadBucketRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .build(); + var headBucketResponse = s3SdkV2Client.headBucket(request); + var response = headBucketResponse.sdkHttpResponse(); + log.info(String.format("Check exist bucket result %d:%s", + response.statusCode(), response.statusText())); + if (response.isSuccessful()) { + log.info("Bucket is exist, bucketName={}", s3SdkV2Properties.getBucketName()); + } else { + throw new IllegalStateException(String.format( + "Failed to check bucket on exist, bucketName=%s", s3SdkV2Properties.getBucketName())); + } + return true; + } catch (NoSuchBucketException ex) { + log.info("Bucket does not exist, bucketName={}", s3SdkV2Properties.getBucketName()); + return false; + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format("Failed to check bucket on exist, bucketName=%s", s3SdkV2Properties.getBucketName()), + ex); + } + } + + private void createBucket() { + try { + var s3Waiter = s3SdkV2Client.waiter(); + var createBucketRequest = CreateBucketRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .build(); + s3SdkV2Client.createBucket(createBucketRequest); + var headBucketRequest = HeadBucketRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .build(); + // Wait until the bucket is created and print out the response. + s3Waiter.waitUntilBucketExists(headBucketRequest) + .matched() + .response() + .ifPresent(headBucketResponse -> { + var response = headBucketResponse.sdkHttpResponse(); + log.info("Check created bucket result {}:{}", response.statusCode(), response.statusText()); + if (response.isSuccessful()) { + log.info("Bucket has been created, bucketName={}", s3SdkV2Properties.getBucketName()); + } else { + throw new IllegalStateException(String.format( + "Failed to create bucket, bucketName=%s", s3SdkV2Properties.getBucketName())); + } + }); + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format("Failed to create bucket, bucketName=%s", s3SdkV2Properties.getBucketName()), + ex); + } + } + + private void enableBucketVersioning() { + try { + var request = PutBucketVersioningRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .versioningConfiguration(VersioningConfiguration.builder() + .status(BucketVersioningStatus.ENABLED) + .build()) + .build(); + var putBucketVersioningResponse = s3SdkV2Client.putBucketVersioning(request); + var response = putBucketVersioningResponse.sdkHttpResponse(); + log.info("Check enable versioning bucket result {}:{}", response.statusCode(), response.statusText()); + if (response.isSuccessful()) { + log.info("Versioning bucket has been enabled, bucketName={}", s3SdkV2Properties.getBucketName()); + } else { + throw new IllegalStateException(String.format( + "Failed to enable bucket versioning, bucketName=%s", s3SdkV2Properties.getBucketName())); + } + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format("Failed to enable bucket versioning, " + + "bucketName=%s", s3SdkV2Properties.getBucketName()), + ex); + } + } + + private URL presignUploadUrl(Instant expirationTime, String fileId) { + var presignRequest = PutObjectPresignRequest.builder() + .signatureDuration(resolvePresignDuration(expirationTime)) + .putObjectRequest(PutObjectRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .build()) + .build(); + var presignedRequest = s3Presigner.presignPutObject(presignRequest); + log.info("Upload url was presigned, fileId={}, bucketName={}", fileId, s3SdkV2Properties.getBucketName()); + log.debug("Presigned http request={}", presignedRequest.httpRequest().toString()); + return presignedRequest.url(); + } + + private List getObjectVersions(String fileId) { + try { + var request = ListObjectVersionsRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .prefix(fileId) + .build(); + var listObjectVersionsResponse = s3SdkV2Client.listObjectVersions(request); + var response = listObjectVersionsResponse.sdkHttpResponse(); + log.info("Check list object versions result {}:{}", response.statusCode(), response.statusText()); + if (response.isSuccessful()) { + var versions = listObjectVersionsResponse.versions(); + log.info("List object versions has been got, fileId={}, bucketName={}, objectVersions={}, ", + fileId, + s3SdkV2Properties.getBucketName(), + versions.stream().map(ObjectVersion::toString).collect(Collectors.joining(","))); + return versions; + } else { + throw new IllegalStateException(String.format( + "Failed to get list object versions, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName())); + } + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format( + "Failed to get list object versions, fileId=%s, bucketName=%s", + fileId, s3SdkV2Properties.getBucketName()), + ex); + } + } + + private String getFileMetadataVersionId(String fileId, + List versions, + Map responseCache) { + return findVersionId( + fileId, + versions, + responseCache, + this::isMetadataVersion, + "file metadata"); + } + + private FileMetadata getFileMetadata(String fileId, + String versionId, + GetObjectResponse objectResponse) { + if (objectResponse.hasMetadata() && !objectResponse.metadata().isEmpty()) { + var s3Metadata = objectResponse.metadata(); + var metadata = s3Metadata.entrySet().stream() + .filter(entry -> entry.getKey().startsWith(METADATA) + && entry.getValue() != null) + .collect(Collectors.toMap( + o -> o.getKey().substring(METADATA.length()), + o -> DamselUtil.fromJson(o.getValue(), Value.class))); + return new FileMetadata(fileId, s3Metadata.get(CREATED_AT), metadata); + } else { + throw new IllegalStateException(String.format( + "Object version with file metadata is empty, " + + "fileId=%s, fileMetadataVersionId=%s, bucketName=%s", + fileId, versionId, s3SdkV2Properties.getBucketName())); + } + } + + private String getFileVersionId(String fileId, + List versions, + Map responseCache) { + return findVersionId( + fileId, + versions, + responseCache, + this::isUploadedFileVersion, + "uploaded file"); + } + + private String findVersionId(String fileId, + List versions, + Map responseCache, + Predicate responsePredicate, + String objectKind) { + return versions.stream() + .sorted(Comparator.comparing(ObjectVersion::lastModified).reversed()) + .map(ObjectVersion::versionId) + .filter(versionId -> responsePredicate.test(getObject(fileId, versionId, responseCache))) + .findFirst() + .orElseThrow(() -> new NoSuchElementException(String.format( + "Object version with %s not found, fileId=%s, bucketName=%s, objectVersions=%s", + objectKind, + fileId, + s3SdkV2Properties.getBucketName(), + versions.stream().map(ObjectVersion::toString).collect(Collectors.joining(","))))); + } + + private boolean isMetadataVersion(GetObjectResponse objectResponse) { + return objectResponse.hasMetadata() && objectResponse.metadata().containsKey(CREATED_AT); + } + + private boolean isUploadedFileVersion(GetObjectResponse objectResponse) { + return Optional.ofNullable(objectResponse.contentDisposition()) + .or(() -> objectResponse.sdkHttpResponse().firstMatchingHeader("Content-Disposition")) + .isPresent(); + } + + private GetObjectResponse getObject(String fileId, + String versionId, + Map responseCache) { + return responseCache.computeIfAbsent(versionId, id -> loadObject(fileId, id)); + } + + private GetObjectResponse loadObject(String fileId, String versionId) { + try { + var request = GetObjectRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .versionId(versionId) + .build(); + GetObjectResponse objectResponse = s3SdkV2Client.getObject( + request, + (getObjectResponse, inputStream) -> getObjectResponse); + SdkHttpResponse response = objectResponse.sdkHttpResponse(); + log.info(String.format("Check get object result %d:%s", + response.statusCode(), response.statusText())); + if (response.isSuccessful()) { + log.info("Object version with file has been got, " + + "fileId={}, fileVersionId={}, bucketName={}", + fileId, versionId, s3SdkV2Properties.getBucketName()); + return objectResponse; + } else { + throw new IllegalStateException(String.format( + "Failed to get object version with file, " + + "fileId=%s, fileVersionId=%s, bucketName=%s", + fileId, versionId, s3SdkV2Properties.getBucketName())); + } + } catch (S3Exception ex) { + throw new IllegalStateException( + String.format( + "Failed to get object version with file, " + + "fileId=%s, fileVersionId=%s, bucketName=%s", + fileId, versionId, s3SdkV2Properties.getBucketName()), + ex); + } + } + + private String getFileName(String fileId, + String fileVersionId, + GetObjectResponse getObjectResponse) { + var response = getObjectResponse.sdkHttpResponse(); + return Optional.ofNullable(getObjectResponse.contentDisposition()) + .map(this::extractFileName) + .or(() -> response.firstMatchingHeader("Content-Disposition") + .map(this::extractFileName)) + .orElseThrow(() -> new IllegalStateException(String.format( + "Header 'Content-Disposition' in object version with file is empty, " + + "fileId=%s, fileVersionId=%s, bucketName=%s", + fileId, fileVersionId, s3SdkV2Properties.getBucketName()))); + } + + private String extractFileName(String contentDisposition) { + int fileNameIndex = contentDisposition.lastIndexOf(FILENAME_PARAM) + FILENAME_PARAM.length(); + return contentDisposition.substring(fileNameIndex).replaceAll("\"", ""); + } + + private PresignedGetObjectRequest getPresignedRequest(String fileId, Instant expirationTime, String fileVersionId) { + var presignRequest = GetObjectPresignRequest.builder() + .signatureDuration(resolvePresignDuration(expirationTime)) + .getObjectRequest(GetObjectRequest.builder() + .bucket(s3SdkV2Properties.getBucketName()) + .key(fileId) + .versionId(fileVersionId) + .build()) + .build(); + var presignedRequest = s3Presigner.presignGetObject(presignRequest); + log.info("Download url was presigned, fileId={}, bucketName={}, isBrowserExecutable={}", + fileId, s3SdkV2Properties.getBucketName(), presignedRequest.isBrowserExecutable()); + log.debug("Presigned http request={}", presignedRequest.httpRequest().toString()); + return presignedRequest; + } + + private Duration resolvePresignDuration(Instant expirationTime) { + var signatureDuration = Duration.between(Instant.now(), expirationTime); + if (signatureDuration.isNegative() || signatureDuration.isZero()) { + throw new IllegalArgumentException(String.format( + "Expiration time must be in the future, expirationTime=%s", expirationTime)); + } + if (signatureDuration.compareTo(MIN_PRESIGN_DURATION) < 0) { + return MIN_PRESIGN_DURATION; + } + return signatureDuration; + } + + private record FileMetadata(String fileId, String createdAt, Map metadata) { + + } +} diff --git a/src/main/java/dev/vality/file/storage/service/S3V2Service.java b/src/main/java/dev/vality/file/storage/service/S3V2Service.java deleted file mode 100644 index b9409009..00000000 --- a/src/main/java/dev/vality/file/storage/service/S3V2Service.java +++ /dev/null @@ -1,574 +0,0 @@ -package dev.vality.file.storage.service; - -import dev.vality.file.storage.*; -import dev.vality.file.storage.CompleteMultipartUploadRequest; -import dev.vality.file.storage.configuration.properties.S3SdkV2Properties; -import dev.vality.file.storage.service.exception.FileNotFoundException; -import dev.vality.file.storage.service.exception.StorageException; -import dev.vality.file.storage.util.DamselUtil; -import dev.vality.msgpack.Value; -import lombok.Getter; -import lombok.RequiredArgsConstructor; -import lombok.ToString; -import lombok.extern.slf4j.Slf4j; -import org.springframework.stereotype.Service; -import org.springframework.util.CollectionUtils; -import software.amazon.awssdk.core.exception.SdkException; -import software.amazon.awssdk.core.sync.RequestBody; -import software.amazon.awssdk.http.SdkHttpResponse; -import software.amazon.awssdk.services.s3.S3Client; -import software.amazon.awssdk.services.s3.model.*; -import software.amazon.awssdk.services.s3.presigner.S3Presigner; -import software.amazon.awssdk.services.s3.presigner.model.GetObjectPresignRequest; -import software.amazon.awssdk.services.s3.presigner.model.PresignedGetObjectRequest; -import software.amazon.awssdk.services.s3.presigner.model.PutObjectPresignRequest; - -import jakarta.annotation.PostConstruct; -import java.net.URL; -import java.net.URLEncoder; -import java.nio.charset.StandardCharsets; -import java.time.Duration; -import java.time.Instant; -import java.util.*; -import java.util.function.Predicate; -import java.util.stream.Collectors; - -@Service -@Slf4j -@RequiredArgsConstructor -public class S3V2Service implements StorageService { - - private static final String FILE_ID = "x-vality-file-id"; - private static final String CREATED_AT = "x-vality-created-at"; - private static final String METADATA = "x-vality-metadata-"; - private static final String FILENAME_METADATA = "filename"; - private static final String FILENAME_PARAM = "filename="; - - private final S3SdkV2Properties s3SdkV2Properties; - private final S3Client s3SdkV2Client; - private final S3Presigner s3Presigner; - - @PostConstruct - public void init() { - if (!doesBucketExist()) { - createBucket(); - enableBucketVersioning(); - } - } - - @Override - public NewFileResult createNewFile(Map metadata, Instant expirationTime) { - var fileId = UUID.randomUUID().toString(); - uploadFileMetadata(metadata, fileId); - var url = presignUploadUrl(expirationTime, fileId); - return new NewFileResult(fileId, url.toString()); - } - - @Override - public URL generateDownloadUrl(String fileId, Instant expirationTime) { - var versions = getObjectVersions(fileId); - checkFileExist(fileId, versions); - var fileVersionId = getFileVersionId(fileId, versions); - PresignedGetObjectRequest presignedRequest = getPresignedRequest(fileId, expirationTime, fileVersionId); - return presignedRequest.url(); - } - - @Override - public FileData getFileData(String fileId) { - var versions = getObjectVersions(fileId); - checkFileExist(fileId, versions); - var fileMetadataVersionId = getFileMetadataVersionId(fileId, versions); - var metadataObjectResponse = getObject(fileId, fileMetadataVersionId); - var fileMetadata = getFileMetadata(fileId, fileMetadataVersionId, metadataObjectResponse); - var fileVersionId = getFileVersionId(fileId, versions); - var fileObjectResponse = getObject(fileId, fileVersionId); - String fileName = getFileName(fileId, fileVersionId, fileObjectResponse); - return new FileData( - fileMetadata.getFileId(), - fileName, - fileMetadata.getCreatedAt(), - fileMetadata.getMetadata()); - } - - @Override - public FileData getMultipartFileData(String fileId) { - var versions = getObjectVersions(fileId); - if (CollectionUtils.isEmpty(versions)) { - throw new FileNotFoundException(String.format( - "Failed to check object version with file on exist, fileId=%s, bucketName=%s ", - fileId, - s3SdkV2Properties.getBucketName())); - } - var fileVersionId = getFileVersionId(fileId, versions); - var objectResponse = getObject(fileId, fileVersionId); - var fileMetadata = getFileMetadata(fileId, fileVersionId, objectResponse); - Map metadata = fileMetadata.getMetadata(); - String fileName = metadata.get(FILENAME_METADATA).getStr(); - return new FileData( - fileId, - fileName, - fileMetadata.getCreatedAt(), - metadata); - } - - // единственный доступный вариант проверки существования бакета на данный момент через catch - // в репе сдк висит таска https://github.com/aws/aws-sdk-java-v2/issues/392#issuecomment-880224831 - // в первой версии сдк тоже через catch проверка на существование - // разница только в том, что проверка идет через метод S3Client#getBucketAcl - // во второй версии тоже есть этот метод, не уверен в чем разница с выбранным вариантом, - // но везде советуют его - private boolean doesBucketExist() { - try { - var request = HeadBucketRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .build(); - var headBucketResponse = s3SdkV2Client.headBucket(request); - var response = headBucketResponse.sdkHttpResponse(); - log.info(String.format("Check exist bucket result %d:%s", - response.statusCode(), response.statusText())); - if (response.isSuccessful()) { - log.info("Bucket is exist, bucketName={}", s3SdkV2Properties.getBucketName()); - } else { - throw new StorageException(String.format( - "Failed to check bucket on exist, bucketName=%s", s3SdkV2Properties.getBucketName())); - } - return true; - } catch (NoSuchBucketException ex) { - log.info("Bucket does not exist, bucketName={}", s3SdkV2Properties.getBucketName()); - return false; - } catch (S3Exception ex) { - throw new StorageException( - String.format("Failed to check bucket on exist, bucketName=%s", s3SdkV2Properties.getBucketName()), - ex); - } - } - - private void createBucket() { - try { - var s3Waiter = s3SdkV2Client.waiter(); - var createBucketRequest = CreateBucketRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .build(); - s3SdkV2Client.createBucket(createBucketRequest); - var headBucketRequest = HeadBucketRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .build(); - // Wait until the bucket is created and print out the response. - s3Waiter.waitUntilBucketExists(headBucketRequest) - .matched() - .response() - .ifPresent(headBucketResponse -> { - var response = headBucketResponse.sdkHttpResponse(); - log.info(String.format("Check created bucket result %d:%s", - response.statusCode(), response.statusText())); - if (response.isSuccessful()) { - log.info("Bucket has been created, bucketName={}", s3SdkV2Properties.getBucketName()); - } else { - throw new StorageException(String.format( - "Failed to create bucket, bucketName=%s", s3SdkV2Properties.getBucketName())); - } - }); - } catch (S3Exception ex) { - throw new StorageException( - String.format("Failed to create bucket, bucketName=%s", s3SdkV2Properties.getBucketName()), - ex); - } - } - - private void enableBucketVersioning() { - try { - var request = PutBucketVersioningRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .versioningConfiguration(VersioningConfiguration.builder() - .status(BucketVersioningStatus.ENABLED) - .build()) - .build(); - var putBucketVersioningResponse = s3SdkV2Client.putBucketVersioning(request); - var response = putBucketVersioningResponse.sdkHttpResponse(); - log.info(String.format("Check enable versioning bucket result %d:%s", - response.statusCode(), response.statusText())); - if (response.isSuccessful()) { - log.info("Versioning bucket has been enabled, bucketName={}", s3SdkV2Properties.getBucketName()); - } else { - throw new StorageException(String.format( - "Failed to enable bucket versioning, bucketName=%s", s3SdkV2Properties.getBucketName())); - } - } catch (S3Exception ex) { - throw new StorageException( - String.format("Failed to enable bucket versioning, " + - "bucketName=%s", s3SdkV2Properties.getBucketName()), - ex); - } - } - - private void uploadFileMetadata(Map metadata, String fileId) { - try { - HashMap s3Metadata = buildS3Metadata(metadata, fileId); - var request = PutObjectRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .metadata(s3Metadata) - .build(); - var putObjectResponse = s3SdkV2Client.putObject(request, RequestBody.empty()); - var response = putObjectResponse.sdkHttpResponse(); - log.info(String.format("Check upload object version with file metadata result %d:%s", - response.statusCode(), response.statusText())); - if (response.isSuccessful()) { - log.info("Object version with file metadata was uploaded, fileId={}, bucketName={}", - fileId, s3SdkV2Properties.getBucketName()); - } else { - throw new StorageException(String.format( - "Failed to upload object version with file metadata, fileId=%s, bucketName=%s", - fileId, s3SdkV2Properties.getBucketName())); - } - } catch (S3Exception ex) { - throw new StorageException( - String.format("Failed to upload object version with file metadata, fileId=%s, bucketName=%s", - fileId, s3SdkV2Properties.getBucketName()), - ex); - } - } - - private URL presignUploadUrl(Instant expirationTime, String fileId) { - var presignRequest = PutObjectPresignRequest.builder() - .signatureDuration(Duration.between(Instant.now(), expirationTime)) - .putObjectRequest(PutObjectRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .build()) - .build(); - var presignedRequest = s3Presigner.presignPutObject(presignRequest); - log.info("Upload url was presigned, fileId={}, bucketName={}", fileId, s3SdkV2Properties.getBucketName()); - log.debug("Presigned http request={}", presignedRequest.httpRequest().toString()); - return presignedRequest.url(); - } - - private List getObjectVersions(String fileId) { - try { - var request = ListObjectVersionsRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .prefix(fileId) - .build(); - var listObjectVersionsResponse = s3SdkV2Client.listObjectVersions(request); - var response = listObjectVersionsResponse.sdkHttpResponse(); - log.info(String.format("Check list object versions result %d:%s", - response.statusCode(), response.statusText())); - if (response.isSuccessful()) { - var versions = listObjectVersionsResponse.versions(); - log.info("List object versions has been got, fileId={}, bucketName={}, objectVersions={}, ", - fileId, - s3SdkV2Properties.getBucketName(), - versions.stream().map(ObjectVersion::toString).collect(Collectors.joining(","))); - return versions; - } else { - throw new StorageException(String.format( - "Failed to get list object versions, fileId=%s, bucketName=%s", - fileId, s3SdkV2Properties.getBucketName())); - } - } catch (S3Exception ex) { - throw new StorageException( - String.format( - "Failed to get list object versions, fileId=%s, bucketName=%s", - fileId, s3SdkV2Properties.getBucketName()), - ex); - } - } - - private void checkFileExist(String fileId, List versions) { - if (!doesFileExist(versions)) { - throw new FileNotFoundException(String.format( - "Failed to check object version with file on exist, fileId=%s, bucketName=%s, objectVersions=%s, ", - fileId, - s3SdkV2Properties.getBucketName(), - versions.stream().map(ObjectVersion::toString).collect(Collectors.joining(",")))); - } - } - - private Boolean doesFileExist(List versions) { - // должно быть 2 ревизии — 1я это метаданные, 2ая это сам загруженный файл - return versions.size() == 2; -// && versions.stream() -// .filter(v -> v.size() > 0) -// .map(v -> true) -// .findFirst() -// .orElse(false); - } - - private String getFileMetadataVersionId(String fileId, List versions) { - return versions.stream() - .filter(Predicate.not(ObjectVersion::isLatest)) - .findFirst() - .orElseThrow(() -> new StorageException(String.format( - "Object version with file metadata not found, fileId=%s, bucketName=%s, objectVersions=%s, ", - fileId, - s3SdkV2Properties.getBucketName(), - versions.stream().map(ObjectVersion::toString).collect(Collectors.joining(","))))) - .versionId(); - } - - private FileMetadata getFileMetadata(String fileId, - String versionId, - GetObjectResponse objectResponse) { - if (objectResponse.hasMetadata() && !objectResponse.metadata().isEmpty()) { - var s3Metadata = objectResponse.metadata(); - var metadata = s3Metadata.entrySet().stream() - .filter(entry -> entry.getKey().startsWith(METADATA) - && entry.getValue() != null) - .collect(Collectors.toMap( - o -> o.getKey().substring(METADATA.length()), - o -> DamselUtil.fromJson(o.getValue(), Value.class))); - return new FileMetadata(fileId, s3Metadata.get(CREATED_AT), metadata); - } else { - throw new StorageException(String.format( - "Object version with file metadata is empty, " + - "fileId=%s, fileMetadataVersionId=%s, bucketName=%s", - fileId, versionId, s3SdkV2Properties.getBucketName())); - } - } - - private String getFileVersionId(String fileId, List versions) { - return versions.stream() - .filter(ObjectVersion::isLatest) - .findFirst() - .orElseThrow(() -> new StorageException(String.format( - "Object version with file not found, fileId=%s, bucketName=%s, objectVersions=%s, ", - fileId, - s3SdkV2Properties.getBucketName(), - versions.stream().map(ObjectVersion::toString).collect(Collectors.joining(","))))) - .versionId(); - } - - private GetObjectResponse getObject(String fileId, String versionId) { - try { - var request = GetObjectRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .versionId(versionId) - .build(); - GetObjectResponse objectResponse = s3SdkV2Client.getObject( - request, - (getObjectResponse, inputStream) -> getObjectResponse); - SdkHttpResponse response = objectResponse.sdkHttpResponse(); - log.info(String.format("Check get object result %d:%s", - response.statusCode(), response.statusText())); - if (response.isSuccessful()) { - log.info("Object version with file has been got, " + - "fileId={}, fileVersionId={}, bucketName={}", - fileId, versionId, s3SdkV2Properties.getBucketName()); - return objectResponse; - } else { - throw new StorageException(String.format( - "Failed to get object version with file, " + - "fileId=%s, fileVersionId=%s, bucketName=%s", - fileId, versionId, s3SdkV2Properties.getBucketName())); - } - } catch (S3Exception ex) { - throw new StorageException( - String.format( - "Failed to get object version with file, " + - "fileId=%s, fileVersionId=%s, bucketName=%s", - fileId, versionId, s3SdkV2Properties.getBucketName()), - ex); - } - } - - private String getFileName(String fileId, - String fileVersionId, - GetObjectResponse getObjectResponse) { - var response = getObjectResponse.sdkHttpResponse(); - return Optional.ofNullable(getObjectResponse.contentDisposition()) - .map(this::extractFileName) - .or(() -> response.firstMatchingHeader("Content-Disposition") - .map(this::extractFileName)) - .orElseThrow(() -> new StorageException(String.format( - "Header 'Content-Disposition' in object version with file is empty, " + - "fileId=%s, fileVersionId=%s, bucketName=%s", - fileId, fileVersionId, s3SdkV2Properties.getBucketName()))); - } - - private String extractFileName(String contentDisposition) { - int fileNameIndex = contentDisposition.lastIndexOf(FILENAME_PARAM) + FILENAME_PARAM.length(); - return contentDisposition.substring(fileNameIndex).replaceAll("\"", ""); - } - - - @Override - public CreateMultipartUploadResult createMultipartUpload(Map metadata) { - if (!metadata.containsKey(FILENAME_METADATA)) { - throw new StorageException("Can't create multipart upload object without fileName"); - } - var fileId = UUID.randomUUID().toString(); - uploadFileMetadata(metadata, fileId); - try { - HashMap s3Metadata = buildS3Metadata(metadata, fileId); - String filename = URLEncoder.encode(metadata.get(FILENAME_METADATA).getStr(), StandardCharsets.UTF_8); - var createRequest = CreateMultipartUploadRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .metadata(s3Metadata) - .contentDisposition("attachment;filename=" + filename) - .build(); - CreateMultipartUploadResponse createResponse = s3SdkV2Client.createMultipartUpload(createRequest); - var response = createResponse.sdkHttpResponse(); - log.info("Check create multipart upload object with file metadata result {}:{}", - response.statusCode(), response.statusText()); - if (response.isSuccessful()) { - log.info("Multipart upload was created, fileId={}, bucketName={}, uploadId={}", - fileId, s3SdkV2Properties.getBucketName(), createResponse.uploadId()); - } else { - throw new StorageException(String.format( - "Failed to create multipart upload, fileId=%s, bucketName=%s", - fileId, s3SdkV2Properties.getBucketName())); - } - return new CreateMultipartUploadResult() - .setFileDataId(fileId) - .setMultipartUploadId(createResponse.uploadId()); - } catch (SdkException ex) { - throw new StorageException( - String.format("Failed to create multipart upload, fileId=%s, bucketName=%s", - fileId, s3SdkV2Properties.getBucketName()), - ex); - } - } - - private static HashMap buildS3Metadata(Map metadata, String fileId) { - var s3Metadata = new HashMap(); - s3Metadata.put(FILE_ID, fileId); - s3Metadata.put(CREATED_AT, Instant.now().toString()); - metadata.forEach((key, value) -> s3Metadata.put(METADATA + key, DamselUtil.toJsonString(value))); - return s3Metadata; - } - - @Override - public UploadMultipartResult uploadMultipart(UploadMultipartRequestData requestData) { - String fileId = requestData.getFileDataId(); - String multipartUploadId = requestData.getMultipartUploadId(); - try { - var uploadPartRequest = UploadPartRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .uploadId(multipartUploadId) - .partNumber(requestData.getSequencePart()) - .contentLength((long) requestData.getContentLength()) - .build(); - RequestBody requestBody = RequestBody.fromBytes(requestData.getContent()); - UploadPartResponse uploadPartResponse = s3SdkV2Client.uploadPart(uploadPartRequest, requestBody); - var response = uploadPartResponse.sdkHttpResponse(); - log.info("Check file part upload result {}:{}", - response.statusCode(), response.statusText()); - if (response.isSuccessful()) { - log.info("File part was uploaded, fileId={}, bucketName={}, uploadId={}, partId={}", - fileId, s3SdkV2Properties.getBucketName(), multipartUploadId, uploadPartResponse.eTag()); - } else { - throw new StorageException(String.format( - "Failed to upload file part, fileId=%s, bucketName=%s, uploadId=%s", - fileId, s3SdkV2Properties.getBucketName(), multipartUploadId)); - } - return new UploadMultipartResult() - .setPartId(uploadPartResponse.eTag()) - .setSequencePart(requestData.getSequencePart()); - } catch (SdkException ex) { - throw new StorageException( - String.format("Failed to upload file part, fileId=%s, bucketName=%s, uploadId=%s", - fileId, s3SdkV2Properties.getBucketName(), multipartUploadId), - ex); - } - } - - @Override - public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request) { - String fileId = request.getFileDataId(); - String multipartUploadId = request.getMultipartUploadId(); - try { - var completeRequest = buildRequest(request, fileId, multipartUploadId); - CompleteMultipartUploadResponse completeResponse = s3SdkV2Client.completeMultipartUpload(completeRequest); - var response = completeResponse.sdkHttpResponse(); - log.info("Check complete multipart upload result {}:{}", - response.statusCode(), response.statusText()); - if (response.isSuccessful()) { - log.info("Multipart upload was completed, fileId={}, bucketName={}, uploadId={}", - fileId, s3SdkV2Properties.getBucketName(), multipartUploadId); - } else { - throw new StorageException(String.format( - "Failed to complete multipart upload, fileId=%s, bucketName=%s, uploadId=%s", - fileId, s3SdkV2Properties.getBucketName(), multipartUploadId)); - } - String objectUrl = s3SdkV2Client.utilities().getUrl(GetUrlRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .build()) - .toExternalForm(); - log.info("Create url for multipart uploaded file, url={}, fileId={}, bucketName={}, uploadId={}", - objectUrl, fileId, s3SdkV2Properties.getBucketName(), multipartUploadId); - return new CompleteMultipartUploadResult() - .setUploadUrl(objectUrl); - } catch (SdkException ex) { - throw new StorageException( - String.format("Failed to complete multipart upload, fileId=%s, bucketName=%s, uploadId=%s", - fileId, s3SdkV2Properties.getBucketName(), multipartUploadId), - ex); - } - } - - @Override - public URL generateMultipartDownloadUrl(String fileId, Instant expirationTime) { - var versions = getObjectVersions(fileId); - if (CollectionUtils.isEmpty(versions)) { - throw new FileNotFoundException(String.format( - "Failed to check object version with file on exist, fileId=%s, bucketName=%s ", - fileId, - s3SdkV2Properties.getBucketName())); - } - var fileVersionId = getFileVersionId(fileId, versions); - PresignedGetObjectRequest presignedRequest = getPresignedRequest(fileId, expirationTime, fileVersionId); - return presignedRequest.url(); - } - - private PresignedGetObjectRequest getPresignedRequest(String fileId, Instant expirationTime, String fileVersionId) { - var presignRequest = GetObjectPresignRequest.builder() - .signatureDuration(Duration.between(Instant.now(), expirationTime)) - .getObjectRequest(GetObjectRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .versionId(fileVersionId) - .build()) - .build(); - var presignedRequest = s3Presigner.presignGetObject(presignRequest); - log.info("Download url was presigned, fileId={}, bucketName={}, isBrowserExecutable={}", - fileId, s3SdkV2Properties.getBucketName(), presignedRequest.isBrowserExecutable()); - log.debug("Presigned http request={}", presignedRequest.httpRequest().toString()); - return presignedRequest; - } - - private software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest buildRequest( - CompleteMultipartUploadRequest request, - String fileId, - String multipartUploadId) { - List completedParts = request.getCompletedParts().stream() - .map(completedMultipart -> CompletedPart.builder() - .eTag(completedMultipart.getPartId()) - .partNumber(completedMultipart.getSequencePart()) - .build()) - .toList(); - CompletedMultipartUpload completedUpload = CompletedMultipartUpload.builder() - .parts(completedParts) - .build(); - return software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest.builder() - .bucket(s3SdkV2Properties.getBucketName()) - .key(fileId) - .uploadId(multipartUploadId) - .multipartUpload(completedUpload) - .build(); - } - - @RequiredArgsConstructor - @Getter - @ToString - private static class FileMetadata { - - private final String fileId; - private final String createdAt; - private final Map metadata; - - } -} diff --git a/src/main/java/dev/vality/file/storage/service/StorageService.java b/src/main/java/dev/vality/file/storage/service/StorageService.java deleted file mode 100644 index 70c7dee5..00000000 --- a/src/main/java/dev/vality/file/storage/service/StorageService.java +++ /dev/null @@ -1,28 +0,0 @@ -package dev.vality.file.storage.service; - -import dev.vality.file.storage.*; -import dev.vality.msgpack.Value; - -import java.net.URL; -import java.time.Instant; -import java.util.Map; - -public interface StorageService { - - NewFileResult createNewFile(Map metadata, Instant expirationTime); - - URL generateDownloadUrl(String fileDataId, Instant expirationTime); - - FileData getFileData(String fileDataId); - - FileData getMultipartFileData(String fileId); - - CreateMultipartUploadResult createMultipartUpload(Map metadata); - - UploadMultipartResult uploadMultipart(UploadMultipartRequestData requestData); - - CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest request); - - URL generateMultipartDownloadUrl(String fileDataId, Instant expirationTime); - -} diff --git a/src/main/java/dev/vality/file/storage/service/exception/ExtractMetadataException.java b/src/main/java/dev/vality/file/storage/service/exception/ExtractMetadataException.java deleted file mode 100644 index 9d740902..00000000 --- a/src/main/java/dev/vality/file/storage/service/exception/ExtractMetadataException.java +++ /dev/null @@ -1,12 +0,0 @@ -package dev.vality.file.storage.service.exception; - -public class ExtractMetadataException extends StorageException { - - public ExtractMetadataException(String message) { - super(message); - } - - public ExtractMetadataException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/src/main/java/dev/vality/file/storage/service/exception/FileNotFoundException.java b/src/main/java/dev/vality/file/storage/service/exception/FileNotFoundException.java deleted file mode 100644 index f96cc6fd..00000000 --- a/src/main/java/dev/vality/file/storage/service/exception/FileNotFoundException.java +++ /dev/null @@ -1,12 +0,0 @@ -package dev.vality.file.storage.service.exception; - -public class FileNotFoundException extends StorageException { - - public FileNotFoundException(String message) { - super(message); - } - - public FileNotFoundException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/src/main/java/dev/vality/file/storage/service/exception/StorageException.java b/src/main/java/dev/vality/file/storage/service/exception/StorageException.java deleted file mode 100644 index b5249b10..00000000 --- a/src/main/java/dev/vality/file/storage/service/exception/StorageException.java +++ /dev/null @@ -1,12 +0,0 @@ -package dev.vality.file.storage.service.exception; - -public class StorageException extends RuntimeException { - - public StorageException(String message) { - super(message); - } - - public StorageException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/src/main/java/dev/vality/file/storage/service/exception/WaitingUploadException.java b/src/main/java/dev/vality/file/storage/service/exception/WaitingUploadException.java deleted file mode 100644 index d7fe1dc6..00000000 --- a/src/main/java/dev/vality/file/storage/service/exception/WaitingUploadException.java +++ /dev/null @@ -1,12 +0,0 @@ -package dev.vality.file.storage.service.exception; - -public class WaitingUploadException extends StorageException { - - public WaitingUploadException(String message) { - super(message); - } - - public WaitingUploadException(String message, Throwable cause) { - super(message, cause); - } -} diff --git a/src/main/java/dev/vality/file/storage/servlet/FileStoragePresignedMultipartServlet.java b/src/main/java/dev/vality/file/storage/servlet/FileStoragePresignedMultipartServlet.java new file mode 100644 index 00000000..69190c81 --- /dev/null +++ b/src/main/java/dev/vality/file/storage/servlet/FileStoragePresignedMultipartServlet.java @@ -0,0 +1,30 @@ +package dev.vality.file.storage.servlet; + +import dev.vality.file.storage.FileStoragePresignedMultipartSrv; +import dev.vality.woody.thrift.impl.http.THServiceBuilder; +import jakarta.servlet.*; +import jakarta.servlet.annotation.WebServlet; +import lombok.RequiredArgsConstructor; + +import java.io.IOException; + +@WebServlet("/file_storage/presigned-multipart/v1") +@RequiredArgsConstructor +public class FileStoragePresignedMultipartServlet extends GenericServlet { + + private final FileStoragePresignedMultipartSrv.Iface fileStoragePresignedMultipartHandler; + + private Servlet thriftServlet; + + @Override + public void init(ServletConfig config) throws ServletException { + super.init(config); + thriftServlet = new THServiceBuilder() + .build(FileStoragePresignedMultipartSrv.Iface.class, fileStoragePresignedMultipartHandler); + } + + @Override + public void service(ServletRequest req, ServletResponse res) throws ServletException, IOException { + thriftServlet.service(req, res); + } +} diff --git a/src/main/resources/application.yml b/src/main/resources/application.yml index d010113a..9930afd1 100644 --- a/src/main/resources/application.yml +++ b/src/main/resources/application.yml @@ -41,6 +41,7 @@ s3-sdk-v2: region: 'RU' access-key: 'minio' secret-key: 'minio123' + multipart-url-ttl: '1h' testcontainers: ceph: diff --git a/src/test/java/dev/vality/file/storage/CatchExceptionsTest.java b/src/test/java/dev/vality/file/storage/CatchExceptionsTest.java deleted file mode 100644 index 4a976e3f..00000000 --- a/src/test/java/dev/vality/file/storage/CatchExceptionsTest.java +++ /dev/null @@ -1,48 +0,0 @@ -package dev.vality.file.storage; - -import com.amazonaws.SdkClientException; -import com.amazonaws.services.s3.AmazonS3; -import com.amazonaws.services.s3.transfer.TransferManager; -import dev.vality.woody.api.flow.error.WRuntimeException; -import dev.vality.woody.thrift.impl.http.THSpawnClientBuilder; -import org.apache.thrift.TException; -import org.junit.jupiter.api.Disabled; -import org.junit.jupiter.api.Test; -import org.mockito.Mockito; -import org.springframework.boot.test.context.SpringBootTest; -import org.springframework.test.context.bean.override.mockito.MockitoBean; -import org.springframework.boot.test.web.server.LocalServerPort; - -import java.net.URI; -import java.net.URISyntaxException; -import java.util.UUID; - -import static org.junit.jupiter.api.Assertions.assertThrows; - -@Disabled -@SpringBootTest(webEnvironment = SpringBootTest.WebEnvironment.RANDOM_PORT) -public class CatchExceptionsTest { - - @LocalServerPort - protected int port; - - @MockitoBean - private TransferManager transferManager; - - @MockitoBean - private AmazonS3 s3Client; - - @Test - public void shouldThrowException() throws URISyntaxException, TException { - FileStorageSrv.Iface fileStorageCli = new THSpawnClientBuilder() - .withAddress(new URI("http://localhost:" + port + "/file_storage/v2")) - .withNetworkTimeout(555000) - .build(FileStorageSrv.Iface.class); - - Mockito.when(s3Client.getObject(Mockito.any())).thenThrow(SdkClientException.class); - - assertThrows( - WRuntimeException.class, - () -> fileStorageCli.getFileData(UUID.randomUUID().toString())); - } -} diff --git a/src/test/java/dev/vality/file/storage/FileStorageTest.java b/src/test/java/dev/vality/file/storage/FileStorageTest.java index 3634df68..4f90eb2d 100644 --- a/src/test/java/dev/vality/file/storage/FileStorageTest.java +++ b/src/test/java/dev/vality/file/storage/FileStorageTest.java @@ -4,9 +4,11 @@ import dev.vality.woody.thrift.impl.http.THSpawnClientBuilder; import org.apache.http.HttpEntity; import org.apache.http.client.HttpResponseException; +import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.client.methods.HttpPut; import org.apache.http.entity.ByteArrayEntity; +import org.apache.http.entity.ContentType; import org.apache.http.entity.FileEntity; import org.apache.http.impl.client.AbstractResponseHandler; import org.apache.http.impl.client.BasicResponseHandler; @@ -33,7 +35,6 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; -import java.time.Instant; import java.util.*; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executors; @@ -54,6 +55,7 @@ public abstract class FileStorageTest { private static final String FILE_NAME = "asd123.asd"; protected FileStorageSrv.Iface fileStorageClient; + protected FileStoragePresignedMultipartSrv.Iface fileStoragePresignedMultipartClient; @Value("${local.server.port}") private int port; @@ -64,6 +66,10 @@ public void setUp() throws Exception { .withAddress(new URI("http://localhost:" + port + "/file_storage/v2")) .withNetworkTimeout(TIMEOUT) .build(FileStorageSrv.Iface.class); + fileStoragePresignedMultipartClient = new THSpawnClientBuilder() + .withAddress(new URI("http://localhost:" + port + "/file_storage/presigned-multipart/v1")) + .withNetworkTimeout(TIMEOUT) + .build(FileStoragePresignedMultipartSrv.Iface.class); } @Test @@ -276,66 +282,132 @@ public void s3ConnectionPoolTest() throws Exception { } @Test - public void multipartUploadTest() throws Exception { - var exception = assertThrows(WRuntimeException.class, - () -> fileStorageClient.createMultipartUpload(Collections.emptyMap())); - assertThat(exception.getErrorDefinition().getErrorReason(), - containsString("Can't create multipart upload object without fileName")); - - dev.vality.msgpack.Value fileName = str("fileName"); - var metadata = Map.of("filename", fileName); - CreateMultipartUploadResult createResult = fileStorageClient.createMultipartUpload(metadata); + public void presignedMultipartUploadFlowTest() throws Exception { + dev.vality.msgpack.Value fileName = str("test_registry.csv"); + Map metadata = Map.of("filename", fileName); + PresignedMultipartUpload createResult = fileStoragePresignedMultipartClient.createMultipartUpload(metadata); assertNotNull(createResult.getFileDataId()); assertNotNull(createResult.getMultipartUploadId()); - - List completedParts = new ArrayList<>(); - processMultipartUpload(createResult, completedParts); - - var completeRequest = new CompleteMultipartUploadRequest() + assertEquals(FileUploadStatus.pending_upload, createResult.getUploadStatus()); + assertEquals(fileName.getStr(), createResult.getFileName()); + assertFalse(createResult.isSetFileUrl()); + + PresignedMultipartUpload pendingUpload = fileStoragePresignedMultipartClient + .getMultipartUpload(createResult.getFileDataId()); + assertEquals(FileUploadStatus.pending_upload, pendingUpload.getUploadStatus()); + assertEquals(createResult.getMultipartUploadId(), pendingUpload.getMultipartUploadId()); + assertEquals(fileName.getStr(), pendingUpload.getFileName()); + + List completedParts = processPresignedMultipartUpload(createResult); + var completeRequest = new CompletePresignedMultipartUploadRequest() .setMultipartUploadId(createResult.getMultipartUploadId()) .setFileDataId(createResult.getFileDataId()) .setCompletedParts(completedParts); - - CompleteMultipartUploadResult result = fileStorageClient.completeMultipartUpload(completeRequest); + CompletePresignedMultipartUploadResult result = fileStoragePresignedMultipartClient + .completeMultipartUpload(completeRequest); assertNotNull(result); - assertNotNull(result.getUploadUrl()); + assertEquals(createResult.getFileDataId(), result.getFileDataId()); + assertEquals(createResult.getMultipartUploadId(), result.getMultipartUploadId()); + assertEquals(FileUploadStatus.uploaded, result.getUploadStatus()); + assertNotNull(result.getFileUrl()); + + PresignedMultipartUpload uploadedState = fileStoragePresignedMultipartClient + .getMultipartUpload(createResult.getFileDataId()); + assertEquals(FileUploadStatus.uploaded, uploadedState.getUploadStatus()); + assertEquals(result.getFileUrl(), uploadedState.getFileUrl()); + + FileData multipartFileData = fileStorageClient.getFileData(createResult.getFileDataId()); + assertEquals(createResult.getFileDataId(), multipartFileData.getFileDataId()); + assertEquals(fileName.getStr(), multipartFileData.getFileName()); + assertNotNull(multipartFileData.getCreatedAt()); + assertNotNull(multipartFileData.getMetadata()); + + String downloadUrl = fileStorageClient.generateDownloadUrl( + createResult.getFileDataId(), generateCurrentTimePlusDay().toString()); + downloadTestData(downloadUrl, getFileFromResources("test_registry.csv")); } @Test - public void getMultipartFileData() throws Exception { - dev.vality.msgpack.Value value = new dev.vality.msgpack.Value(); - String fileName = "test_registry.csv"; - value.setStr(fileName); - Map metadata = Map.of("filename", value); - CreateMultipartUploadResult createResult = fileStorageClient.createMultipartUpload(metadata); - assertNotNull(createResult.getFileDataId()); - assertNotNull(createResult.getMultipartUploadId()); - - List completedParts = new ArrayList<>(); - processMultipartUpload(createResult, completedParts); + public void presignedMultipartAbortFlowTest() throws Exception { + dev.vality.msgpack.Value fileName = str("test_registry.csv"); + Map metadata = Map.of("filename", fileName); + PresignedMultipartUpload createResult = fileStoragePresignedMultipartClient.createMultipartUpload(metadata); - var completeRequest = new CompleteMultipartUploadRequest() - .setMultipartUploadId(createResult.getMultipartUploadId()) + fileStoragePresignedMultipartClient.abortMultipartUpload(new AbortMultipartUploadRequest() .setFileDataId(createResult.getFileDataId()) - .setCompletedParts(completedParts); + .setMultipartUploadId(createResult.getMultipartUploadId())); - fileStorageClient.completeMultipartUpload(completeRequest); + PresignedMultipartUpload abortedState = fileStoragePresignedMultipartClient + .getMultipartUpload(createResult.getFileDataId()); + assertEquals(FileUploadStatus.aborted, abortedState.getUploadStatus()); + assertFalse(abortedState.isSetFileUrl()); - FileData multipartFileData = fileStorageClient.getFileData(createResult.getFileDataId()); + assertThrows(WRuntimeException.class, () -> fileStoragePresignedMultipartClient.presignMultipartUploadPart( + new PresignMultipartUploadPartRequest() + .setFileDataId(createResult.getFileDataId()) + .setMultipartUploadId(createResult.getMultipartUploadId()) + .setSequencePart(1) + .setContentLength((long) FILE_DATA.getBytes(StandardCharsets.UTF_8).length))); + } - assertEquals(createResult.getFileDataId(), multipartFileData.getFileDataId()); - assertEquals(fileName, multipartFileData.getFileName()); - assertNotNull(multipartFileData.getCreatedAt()); - assertNotNull(multipartFileData.getMetadata()); + @Test + public void presignedMultipartRejectsWrongUploadIdTest() throws Exception { + dev.vality.msgpack.Value fileName = str("test_registry.csv"); + Map metadata = Map.of("filename", fileName); + PresignedMultipartUpload createResult = fileStoragePresignedMultipartClient.createMultipartUpload(metadata); + + var exception = assertThrows(WRuntimeException.class, () -> fileStoragePresignedMultipartClient + .presignMultipartUploadPart(new PresignMultipartUploadPartRequest() + .setFileDataId(createResult.getFileDataId()) + .setMultipartUploadId("wrong-upload-id") + .setSequencePart(1) + .setContentLength((long) FILE_DATA.getBytes(StandardCharsets.UTF_8).length))); + assertThat(exception.getErrorDefinition().getErrorReason(), + containsString("Multipart upload id does not match stored state")); + } + + @Test + public void presignedMultipartRejectsDuplicateCompletedPartsTest() throws Exception { + dev.vality.msgpack.Value fileName = str("test_registry.csv"); + Map metadata = Map.of("filename", fileName); + PresignedMultipartUpload createResult = fileStoragePresignedMultipartClient.createMultipartUpload(metadata); + + PresignMultipartUploadPartResult presignedPart = fileStoragePresignedMultipartClient + .presignMultipartUploadPart(new PresignMultipartUploadPartRequest() + .setFileDataId(createResult.getFileDataId()) + .setMultipartUploadId(createResult.getMultipartUploadId()) + .setSequencePart(1) + .setContentLength((long) FILE_DATA.getBytes(StandardCharsets.UTF_8).length)); + + String etag = uploadPresignedPart( + presignedPart.getUploadUrl(), + FILE_DATA.getBytes(StandardCharsets.UTF_8), + presignedPart.getRequiredHeaders()); + + List duplicateParts = List.of( + new CompletedPresignedMultipart().setSequencePart(1).setEtag(etag), + new CompletedPresignedMultipart().setSequencePart(1).setEtag(etag) + ); + + var exception = assertThrows(WRuntimeException.class, () -> fileStoragePresignedMultipartClient + .completeMultipartUpload(new CompletePresignedMultipartUploadRequest() + .setFileDataId(createResult.getFileDataId()) + .setMultipartUploadId(createResult.getMultipartUploadId()) + .setCompletedParts(duplicateParts))); + assertThat(exception.getErrorDefinition().getErrorReason(), + containsString("Completed parts must contain unique sequencePart values")); } @Test - public void generateMultipartDownloadUrl() throws Exception { - dev.vality.msgpack.Value value = new dev.vality.msgpack.Value(); - String fileName = "test_registry.csv"; - value.setStr(fileName); - Map metadata = Map.of("filename", value); + public void multipartUploadTest() throws Exception { + var exception = assertThrows(WRuntimeException.class, + () -> fileStorageClient.createMultipartUpload(Collections.emptyMap())); + assertThat(exception.getErrorDefinition().getErrorReason(), + containsString("Can't create multipart upload object without fileName")); + + dev.vality.msgpack.Value fileName = str("fileName"); + var metadata = Map.of("filename", fileName); CreateMultipartUploadResult createResult = fileStorageClient.createMultipartUpload(metadata); assertNotNull(createResult.getFileDataId()); assertNotNull(createResult.getMultipartUploadId()); @@ -348,12 +420,10 @@ public void generateMultipartDownloadUrl() throws Exception { .setFileDataId(createResult.getFileDataId()) .setCompletedParts(completedParts); - fileStorageClient.completeMultipartUpload(completeRequest); - - String expiredTime = Instant.now().toString(); - String url = fileStorageClient.generateDownloadUrl(createResult.getFileDataId(), expiredTime); + CompleteMultipartUploadResult result = fileStorageClient.completeMultipartUpload(completeRequest); - assertNotNull(url); + assertNotNull(result); + assertNotNull(result.getUploadUrl()); } private void uploadTestData(NewFileResult fileResult, String fileName, Path testData) throws IOException { @@ -472,4 +542,59 @@ private void processMultipartUpload(CreateMultipartUploadResult createResult, e.printStackTrace(); } } + + private List processPresignedMultipartUpload(PresignedMultipartUpload createResult) + throws Exception { + int partNumber = 1; + ByteBuffer buffer = ByteBuffer.allocate(5 * 1024 * 1024); + List completedParts = new ArrayList<>(); + Path path = getFileFromResources("test_registry.csv"); + try (RandomAccessFile file = new RandomAccessFile(path.toFile(), "r")) { + long fileSize = file.length(); + long position = 0; + while (position < fileSize) { + file.seek(position); + int bytesRead = file.getChannel().read(buffer); + buffer.flip(); + byte[] content = new byte[bytesRead]; + buffer.get(content); + + PresignMultipartUploadPartResult presignedPart = fileStoragePresignedMultipartClient + .presignMultipartUploadPart(new PresignMultipartUploadPartRequest() + .setFileDataId(createResult.getFileDataId()) + .setMultipartUploadId(createResult.getMultipartUploadId()) + .setSequencePart(partNumber) + .setContentLength((long) bytesRead)); + + assertTrue(presignedPart.isSetExpiresAt()); + assertTrue(presignedPart.isSetRequiredHeaders()); + + String etag = uploadPresignedPart(presignedPart.getUploadUrl(), content, + presignedPart.getRequiredHeaders()); + completedParts.add(new CompletedPresignedMultipart() + .setSequencePart(partNumber) + .setEtag(etag)); + + buffer.clear(); + position += bytesRead; + partNumber++; + } + } + return completedParts; + } + + private String uploadPresignedPart(String uploadUrl, byte[] content, Map requiredHeaders) + throws IOException { + try (var client = HttpClients.createDefault()) { + var requestPut = new HttpPut(uploadUrl); + requiredHeaders.forEach(requestPut::setHeader); + requestPut.setEntity(new ByteArrayEntity(content, ContentType.APPLICATION_OCTET_STREAM)); + try (CloseableHttpResponse response = client.execute(requestPut)) { + assertEquals(HttpStatus.OK.value(), response.getStatusLine().getStatusCode()); + var etagHeader = response.getFirstHeader("ETag"); + assertNotNull(etagHeader); + return etagHeader.getValue(); + } + } + } }