docker: use minio for s3 storage

This commit is contained in:
Abdullah Atta
2022-12-31 14:56:00 +05:00
parent 78f8151828
commit 7ebbbb8004
4 changed files with 99 additions and 19 deletions

View File

@@ -34,10 +34,26 @@ using Streetwriters.Common;
namespace Notesnook.API.Services
{
enum S3ClientMode
{
INTERNAL = 0,
EXTERNAL = 1
}
public class S3Service : IS3Service
{
private readonly string BUCKET_NAME = "nn-attachments";
private AmazonS3Client S3Client { get; }
// When running in a dockerized environment the sync server doesn't have access
// to the host's S3 Service URL. It can only talk to S3 server via its own internal
// network. This creates the issue where the client needs host-level access while
// the sync server needs only internal access.
// This wouldn't be a big issue (just map one to the other right?) but the signed
// URLs generated by S3 are host specific. Changing their hostname on the fly causes
// SignatureDoesNotMatch error.
// That is why we create 2 separate S3 clients. One for internal traffic and one for external.
private AmazonS3Client S3InternalClient { get; }
private HttpClient httpClient = new HttpClient();
public S3Service()
@@ -59,6 +75,19 @@ namespace Notesnook.API.Services
#else
S3Client = new AmazonS3Client(Constants.S3_ACCESS_KEY_ID, Constants.S3_ACCESS_KEY, config);
#endif
if (!string.IsNullOrEmpty(Constants.S3_INTERNAL_SERVICE_URL))
{
S3InternalClient = new AmazonS3Client(Constants.S3_ACCESS_KEY_ID, Constants.S3_ACCESS_KEY, new AmazonS3Config
{
ServiceURL = Constants.S3_INTERNAL_SERVICE_URL,
AuthenticationRegion = Constants.S3_REGION,
ForcePathStyle = true,
SignatureMethod = SigningAlgorithm.HmacSHA256,
SignatureVersion = "4"
});
}
AWSConfigsS3.UseSignatureVersion4 = true;
}
@@ -67,7 +96,7 @@ namespace Notesnook.API.Services
var objectName = GetFullObjectName(userId, name);
if (objectName == null) throw new Exception("Invalid object name."); ;
var response = await S3Client.DeleteObjectAsync(BUCKET_NAME, objectName);
var response = await GetS3Client(S3ClientMode.INTERNAL).DeleteObjectAsync(BUCKET_NAME, objectName);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode)))
throw new Exception("Could not delete object.");
@@ -85,7 +114,7 @@ namespace Notesnook.API.Services
var keys = new List<KeyVersion>();
do
{
response = await S3Client.ListObjectsV2Async(request);
response = await GetS3Client(S3ClientMode.INTERNAL).ListObjectsV2Async(request);
response.S3Objects.ForEach(obj => keys.Add(new KeyVersion
{
Key = obj.Key,
@@ -110,7 +139,7 @@ namespace Notesnook.API.Services
public async Task<long?> GetObjectSizeAsync(string userId, string name)
{
var url = this.GetPresignedURL(userId, name, HttpVerb.HEAD);
var url = this.GetPresignedURL(userId, name, HttpVerb.HEAD, S3ClientMode.INTERNAL);
if (url == null) return null;
var request = new HttpRequestMessage(HttpMethod.Head, url);
@@ -140,7 +169,7 @@ namespace Notesnook.API.Services
if (string.IsNullOrEmpty(uploadId))
{
var response = await S3Client.InitiateMultipartUploadAsync(BUCKET_NAME, objectName);
var response = await GetS3Client(S3ClientMode.INTERNAL).InitiateMultipartUploadAsync(BUCKET_NAME, objectName);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode))) throw new Exception("Failed to initiate multipart upload.");
uploadId = response.UploadId;
@@ -164,7 +193,7 @@ namespace Notesnook.API.Services
var objectName = GetFullObjectName(userId, name);
if (userId == null || objectName == null) throw new Exception("Could not abort multipart upload.");
var response = await S3Client.AbortMultipartUploadAsync(BUCKET_NAME, objectName, uploadId);
var response = await GetS3Client(S3ClientMode.INTERNAL).AbortMultipartUploadAsync(BUCKET_NAME, objectName, uploadId);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode))) throw new Exception("Failed to abort multipart upload.");
}
@@ -175,11 +204,11 @@ namespace Notesnook.API.Services
uploadRequest.Key = objectName;
uploadRequest.BucketName = BUCKET_NAME;
var response = await S3Client.CompleteMultipartUploadAsync(uploadRequest);
var response = await GetS3Client(S3ClientMode.INTERNAL).CompleteMultipartUploadAsync(uploadRequest);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode))) throw new Exception("Failed to complete multipart upload.");
}
private string GetPresignedURL(string userId, string name, HttpVerb httpVerb)
private string GetPresignedURL(string userId, string name, HttpVerb httpVerb, S3ClientMode mode = S3ClientMode.EXTERNAL)
{
var objectName = GetFullObjectName(userId, name);
if (userId == null || objectName == null) return null;
@@ -193,15 +222,16 @@ namespace Notesnook.API.Services
#if DEBUG
Protocol = Protocol.HTTP,
#else
Protocol = Protocol.HTTPS,
Protocol = Constants.IS_SELF_HOSTED ? Protocol.HTTP : Protocol.HTTPS,
#endif
};
return S3Client.GetPreSignedURL(request);
return GetS3Client(mode).GetPreSignedURL(request);
}
private string GetPresignedURLForUploadPart(string objectName, string uploadId, int partNumber)
{
return S3Client.GetPreSignedURL(new GetPreSignedUrlRequest
return GetS3Client().GetPreSignedURL(new GetPreSignedUrlRequest
{
BucketName = BUCKET_NAME,
Expires = System.DateTime.Now.AddHours(1),
@@ -212,7 +242,7 @@ namespace Notesnook.API.Services
#if DEBUG
Protocol = Protocol.HTTP,
#else
Protocol = Protocol.HTTPS,
Protocol = Constants.IS_SELF_HOSTED ? Protocol.HTTP : Protocol.HTTPS,
#endif
});
}
@@ -227,5 +257,11 @@ namespace Notesnook.API.Services
{
return ((int)statusCode >= 200) && ((int)statusCode <= 299);
}
AmazonS3Client GetS3Client(S3ClientMode mode = S3ClientMode.EXTERNAL)
{
if (mode == S3ClientMode.INTERNAL && S3InternalClient != null) return S3InternalClient;
return S3Client;
}
}
}