s3: add support for failover

This commit is contained in:
Abdullah Atta
2025-12-17 09:06:26 +05:00
parent 347507f00a
commit 265b456c46
5 changed files with 494 additions and 99 deletions

View File

@@ -102,7 +102,7 @@ namespace Notesnook.API.Controllers
private async Task<long> UploadFileAsync(string userId, string name, long fileSize)
{
var url = s3Service.GetInternalUploadObjectUrl(userId, name) ?? throw new Exception("Could not create signed url.");
var url = await s3Service.GetInternalUploadObjectUrlAsync(userId, name) ?? throw new Exception("Could not create signed url.");
var httpClient = new HttpClient();
var content = new StreamContent(HttpContext.Request.BodyReader.AsStream());
@@ -168,7 +168,7 @@ namespace Notesnook.API.Controllers
try
{
var userId = this.User.GetUserId();
var url = await s3Service.GetDownloadObjectUrl(userId, name);
var url = await s3Service.GetDownloadObjectUrlAsync(userId, name);
if (url == null) return BadRequest("Could not create signed url.");
return Ok(url);
}

View File

@@ -0,0 +1,414 @@
/*
This file is part of the Notesnook Sync Server project (https://notesnook.com/)
Copyright (C) 2023 Streetwriters (Private) Limited
This program is free software: you can redistribute it and/or modify
it under the terms of the Affero GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
Affero GNU General Public License for more details.
You should have received a copy of the Affero GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
using System;
using System.Collections.Generic;
using System.Linq;
using System.Threading.Tasks;
using Amazon.S3;
using Amazon.S3.Model;
using Microsoft.Extensions.Logging;
namespace Notesnook.API.Helpers
{
/// <summary>
/// Configuration for S3 failover behavior
/// </summary>
public class S3FailoverConfig
{
/// <summary>
/// Maximum number of retry attempts per endpoint
/// </summary>
public int MaxRetries { get; set; } = 3;
/// <summary>
/// Delay between retries in milliseconds
/// </summary>
public int RetryDelayMs { get; set; } = 1000;
/// <summary>
/// Whether to use exponential backoff for retries
/// </summary>
public bool UseExponentialBackoff { get; set; } = true;
/// <summary>
/// Whether to allow failover for write operations (PUT, POST, DELETE).
/// Default is false to prevent data consistency issues.
/// </summary>
public bool AllowWriteFailover { get; set; } = false;
/// <summary>
/// List of exception types that should trigger failover
/// </summary>
public HashSet<Type> FailoverExceptions { get; set; } = new()
{
typeof(AmazonS3Exception),
typeof(System.Net.Http.HttpRequestException),
typeof(System.Net.Sockets.SocketException),
typeof(System.Threading.Tasks.TaskCanceledException),
typeof(TimeoutException)
};
/// <summary>
/// List of S3 error codes that should trigger failover
/// </summary>
public HashSet<string> FailoverErrorCodes { get; set; } = new()
{
"ServiceUnavailable",
"SlowDown",
"InternalError",
"RequestTimeout"
};
}
/// <summary>
/// Result of a failover operation
/// </summary>
public class S3FailoverResult<T>
{
public T? Result { get; set; }
public bool UsedFailover { get; set; }
public int ClientIndex { get; set; } = 0;
public int AttemptsUsed { get; set; }
public Exception? LastException { get; set; }
}
/// <summary>
/// Helper class for S3 operations with automatic failover to multiple endpoints
/// </summary>
public class S3FailoverHelper
{
private readonly List<AmazonS3Client> clients;
private readonly S3FailoverConfig config;
private readonly ILogger? logger;
/// <summary>
/// Initialize with a list of S3 clients (first is primary, rest are failover endpoints)
/// </summary>
public S3FailoverHelper(
IEnumerable<AmazonS3Client> clients,
S3FailoverConfig? config = null,
ILogger? logger = null)
{
if (clients == null) throw new ArgumentNullException(nameof(clients));
this.clients = new List<AmazonS3Client>(clients);
if (this.clients.Count == 0) throw new ArgumentException("At least one S3 client is required", nameof(clients));
this.config = config ?? new S3FailoverConfig();
this.logger = logger;
}
/// <summary>
/// Initialize with params array of S3 clients
/// </summary>
public S3FailoverHelper(
S3FailoverConfig? config = null,
ILogger? logger = null,
params AmazonS3Client[] clients)
{
if (clients == null || clients.Length == 0)
throw new ArgumentException("At least one S3 client is required", nameof(clients));
this.clients = new List<AmazonS3Client>(clients);
this.config = config ?? new S3FailoverConfig();
this.logger = logger;
}
/// <summary>
/// Execute an S3 operation with automatic failover
/// </summary>
/// <param name="operation">The S3 operation to execute</param>
/// <param name="operationName">Name of the operation for logging</param>
/// <param name="isWriteOperation">Whether this is a write operation (PUT/POST/DELETE). Write operations only use primary endpoint by default.</param>
public async Task<T> ExecuteWithFailoverAsync<T>(
Func<AmazonS3Client, Task<T>> operation,
string operationName = "S3Operation",
bool isWriteOperation = false)
{
var result = await ExecuteWithFailoverInternalAsync(operation, operationName, isWriteOperation);
if (result.Result == null)
{
throw result.LastException ?? new Exception($"Failed to execute {operationName} on all endpoints");
}
return result.Result;
}
/// <summary>
/// Execute an S3 operation with automatic failover and return detailed result
/// </summary>
/// <param name="operation">The S3 operation to execute</param>
/// <param name="operationName">Name of the operation for logging</param>
/// <param name="isWriteOperation">Whether this is a write operation (PUT/POST/DELETE). Write operations only use primary endpoint by default.</param>
private async Task<S3FailoverResult<T>> ExecuteWithFailoverInternalAsync<T>(
Func<AmazonS3Client, Task<T>> operation,
string operationName = "S3Operation",
bool isWriteOperation = false)
{
var result = new S3FailoverResult<T>();
Exception? lastException = null;
// Determine max clients to try based on write operation flag
var maxClientsToTry = (isWriteOperation && !config.AllowWriteFailover) ? 1 : clients.Count;
if (isWriteOperation && !config.AllowWriteFailover && clients.Count > 1)
{
logger?.LogDebug(
"Write operation {Operation} will only use primary endpoint. Failover is disabled for write operations.",
operationName);
}
// Try each client in sequence (first is primary, rest are failovers)
for (int i = 0; i < maxClientsToTry; i++)
{
var client = clients[i];
var clientName = i == 0 ? "primary" : $"failover-{i}";
var isPrimary = i == 0;
if (!isPrimary && lastException != null)
{
logger?.LogWarning(lastException,
"Previous S3 endpoint failed for {Operation}. Attempting {ClientName} (endpoint {Index}/{Total}).",
operationName, clientName, i + 1, maxClientsToTry);
}
var (success, value, exception, attempts) = await TryExecuteAsync(client, operation, operationName, clientName);
result.AttemptsUsed += attempts;
if (success && value != null)
{
result.Result = value;
result.UsedFailover = !isPrimary;
result.ClientIndex = i;
if (!isPrimary)
{
logger?.LogInformation(
"Successfully failed over to {ClientName} S3 endpoint for {Operation}",
clientName, operationName);
}
return result;
}
lastException = exception;
// If this is not the last client and should retry, log and continue
if (i < maxClientsToTry - 1 && ShouldFailover(exception))
{
logger?.LogWarning(exception,
"Endpoint {ClientName} failed for {Operation}. {Remaining} endpoint(s) remaining.",
clientName, operationName, maxClientsToTry - i - 1);
}
}
// All clients failed
result.LastException = lastException;
logger?.LogError(lastException,
"All S3 endpoints failed for {Operation}. Total endpoints tried: {EndpointCount}, Total attempts: {Attempts}",
operationName, maxClientsToTry, result.AttemptsUsed);
return result;
} /// <summary>
/// Try to execute an operation with retries
/// </summary>
private async Task<(bool success, T? value, Exception? exception, int attempts)> TryExecuteAsync<T>(
AmazonS3Client client,
Func<AmazonS3Client, Task<T>> operation,
string operationName,
string endpointName)
{
Exception? lastException = null;
int attempts = 0;
for (int retry = 0; retry <= config.MaxRetries; retry++)
{
attempts++;
try
{
var result = await operation(client);
return (true, result, null, attempts);
}
catch (Exception ex)
{
lastException = ex;
if (retry < config.MaxRetries && ShouldRetry(ex))
{
var delay = CalculateRetryDelay(retry);
logger?.LogWarning(ex,
"Attempt {Attempt}/{MaxAttempts} failed for {Operation} on {Endpoint}. Retrying in {Delay}ms",
retry + 1, config.MaxRetries + 1, operationName, endpointName, delay);
await Task.Delay(delay);
}
else
{
logger?.LogError(ex,
"Operation {Operation} failed on {Endpoint} after {Attempts} attempts",
operationName, endpointName, attempts);
break;
}
}
}
return (false, default, lastException, attempts);
}
/// <summary>
/// Determine if an exception should trigger a retry
/// </summary>
private bool ShouldRetry(Exception exception)
{
// Check if exception type is in the retry list
var exceptionType = exception.GetType();
if (config.FailoverExceptions.Contains(exceptionType))
{
// For S3 exceptions, check error codes
if (exception is AmazonS3Exception s3Exception)
{
return config.FailoverErrorCodes.Contains(s3Exception.ErrorCode);
}
return true;
}
return false;
}
/// <summary>
/// Determine if an exception should trigger failover to secondary endpoint
/// </summary>
private bool ShouldFailover(Exception? exception)
{
if (exception == null) return false;
return ShouldRetry(exception);
}
/// <summary>
/// Calculate delay for retry based on retry attempt number
/// </summary>
private int CalculateRetryDelay(int retryAttempt)
{
if (!config.UseExponentialBackoff)
{
return config.RetryDelayMs;
}
// Exponential backoff: delay * 2^retryAttempt
return config.RetryDelayMs * (int)Math.Pow(2, retryAttempt);
}
/// <summary>
/// Execute a void operation with automatic failover
/// </summary>
/// <param name="operation">The S3 operation to execute</param>
/// <param name="operationName">Name of the operation for logging</param>
/// <param name="isWriteOperation">Whether this is a write operation (PUT/POST/DELETE). Write operations only use primary endpoint by default.</param>
public async Task ExecuteWithFailoverAsync(
Func<AmazonS3Client, Task> operation,
string operationName = "S3Operation",
bool isWriteOperation = false)
{
await ExecuteWithFailoverAsync<object?>(async (client) =>
{
await operation(client);
return null;
}, operationName, isWriteOperation);
}
}
public static class S3ClientFactory
{
public static List<AmazonS3Client> CreateS3Clients(
string serviceUrls,
string regions,
string accessKeyIds,
string secretKeys,
bool forcePathStyle = true)
{
if (string.IsNullOrWhiteSpace(serviceUrls))
return new List<AmazonS3Client>();
var urls = SplitAndTrim(serviceUrls);
var regionList = SplitAndTrim(regions);
var keyIds = SplitAndTrim(accessKeyIds);
var secrets = SplitAndTrim(secretKeys);
if (urls.Length != regionList.Length ||
urls.Length != keyIds.Length ||
urls.Length != secrets.Length)
{
throw new ArgumentException("All S3 configuration parameters must have the same number of values");
}
var clients = new List<AmazonS3Client>();
for (int i = 0; i < urls.Length; i++)
{
var url = urls[i];
if (string.IsNullOrWhiteSpace(url))
continue;
// Get corresponding values from other arrays
var region = regionList[i];
var keyId = keyIds[i];
var secret = secrets[i];
// Validate that all required values are present
if (string.IsNullOrWhiteSpace(region) ||
string.IsNullOrWhiteSpace(keyId) ||
string.IsNullOrWhiteSpace(secret))
{
System.Diagnostics.Debug.WriteLine(
$"Skipping S3 client at index {i}: Missing required values (URL={url}, Region={region}, KeyId={keyId?.Length > 0}, Secret={secret?.Length > 0})");
continue;
}
try
{
var config = new AmazonS3Config
{
ServiceURL = url,
AuthenticationRegion = region,
ForcePathStyle = forcePathStyle,
SignatureMethod = Amazon.Runtime.SigningAlgorithm.HmacSHA256,
SignatureVersion = "4"
};
var client = new AmazonS3Client(keyId, secret, config);
clients.Add(client);
}
catch (Exception ex)
{
// Log configuration error but continue with other clients
System.Diagnostics.Debug.WriteLine($"Failed to create S3 client for URL {url}: {ex.Message}");
}
}
return clients;
}
private static string[] SplitAndTrim(string? input)
{
if (string.IsNullOrWhiteSpace(input))
return Array.Empty<string>();
return input.Split(';', StringSplitOptions.None)
.Select(s => s.Trim())
.ToArray();
}
}
}

View File

@@ -31,9 +31,9 @@ namespace Notesnook.API.Interfaces
Task DeleteObjectAsync(string userId, string name);
Task DeleteDirectoryAsync(string userId);
Task<long> GetObjectSizeAsync(string userId, string name);
string? GetUploadObjectUrl(string userId, string name);
string? GetInternalUploadObjectUrl(string userId, string name);
Task<string?> GetDownloadObjectUrl(string userId, string name);
Task<string?> GetUploadObjectUrlAsync(string userId, string name);
Task<string?> GetInternalUploadObjectUrlAsync(string userId, string name);
Task<string?> GetDownloadObjectUrlAsync(string userId, string name);
Task<MultipartUploadMeta> StartMultipartUploadAsync(string userId, string name, int parts, string? uploadId = null);
Task AbortMultipartUploadAsync(string userId, string name, string uploadId);
Task CompleteMultipartUploadAsync(string userId, CompleteMultipartUploadRequest uploadRequest);

View File

@@ -27,6 +27,7 @@ using Amazon;
using Amazon.Runtime;
using Amazon.S3;
using Amazon.S3.Model;
using Microsoft.Extensions.Logging;
using Microsoft.Extensions.Options;
using MongoDB.Driver;
using Notesnook.API.Helpers;
@@ -47,9 +48,9 @@ namespace Notesnook.API.Services
public class S3Service : IS3Service
{
private readonly string BUCKET_NAME = Constants.S3_BUCKET_NAME ?? "";
private readonly string INTERNAL_BUCKET_NAME = Constants.S3_INTERNAL_BUCKET_NAME ?? "";
private AmazonS3Client S3Client { get; }
private readonly string BUCKET_NAME = Constants.S3_BUCKET_NAME;
private readonly string INTERNAL_BUCKET_NAME = Constants.S3_INTERNAL_BUCKET_NAME ?? Constants.S3_BUCKET_NAME;
private readonly S3FailoverHelper S3Client;
private ISyncItemsRepositoryAccessor Repositories { get; }
// When running in a dockerized environment the sync server doesn't have access
@@ -60,57 +61,48 @@ namespace Notesnook.API.Services
// URLs generated by S3 are host specific. Changing their hostname on the fly causes
// SignatureDoesNotMatch error.
// That is why we create 2 separate S3 clients. One for internal traffic and one for external.
private AmazonS3Client? S3InternalClient { get; }
private readonly S3FailoverHelper S3InternalClient;
private readonly HttpClient httpClient = new();
public S3Service(ISyncItemsRepositoryAccessor syncItemsRepositoryAccessor)
public S3Service(ISyncItemsRepositoryAccessor syncItemsRepositoryAccessor, ILogger<S3Service> logger)
{
Repositories = syncItemsRepositoryAccessor;
var config = CreateConfig();
#if (DEBUG || STAGING)
S3Client = new AmazonS3Client("S3RVER", "S3RVER", config);
#else
S3Client = new AmazonS3Client(Constants.S3_ACCESS_KEY_ID, Constants.S3_ACCESS_KEY, config);
#endif
S3Client = new S3FailoverHelper(
S3ClientFactory.CreateS3Clients(
Constants.S3_SERVICE_URL,
Constants.S3_REGION,
Constants.S3_ACCESS_KEY_ID,
Constants.S3_ACCESS_KEY,
forcePathStyle: true
),
logger: logger
);
if (!string.IsNullOrEmpty(Constants.S3_INTERNAL_SERVICE_URL))
if (!string.IsNullOrEmpty(Constants.S3_INTERNAL_SERVICE_URL) && !string.IsNullOrEmpty(Constants.S3_INTERNAL_BUCKET_NAME))
{
S3InternalClient = new AmazonS3Client(Constants.S3_ACCESS_KEY_ID, Constants.S3_ACCESS_KEY, new AmazonS3Config
{
ServiceURL = Constants.S3_INTERNAL_SERVICE_URL,
AuthenticationRegion = Constants.S3_REGION,
ForcePathStyle = true,
SignatureMethod = SigningAlgorithm.HmacSHA256,
SignatureVersion = "4"
});
S3InternalClient = new S3FailoverHelper(
S3ClientFactory.CreateS3Clients(
Constants.S3_INTERNAL_SERVICE_URL,
Constants.S3_REGION,
Constants.S3_ACCESS_KEY_ID,
Constants.S3_ACCESS_KEY,
forcePathStyle: true
),
logger: logger
);
}
else S3InternalClient = S3Client;
AWSConfigsS3.UseSignatureVersion4 = true;
}
public static AmazonS3Config CreateConfig()
{
return new AmazonS3Config
{
#if (DEBUG || STAGING)
ServiceURL = Servers.S3Server.ToString(),
#else
ServiceURL = Constants.S3_SERVICE_URL,
AuthenticationRegion = Constants.S3_REGION,
#endif
ForcePathStyle = true,
SignatureMethod = SigningAlgorithm.HmacSHA256,
SignatureVersion = "4"
};
}
public async Task DeleteObjectAsync(string userId, string name)
{
var objectName = GetFullObjectName(userId, name) ?? throw new Exception("Invalid object name.");
var response = await GetS3Client(S3ClientMode.INTERNAL).DeleteObjectAsync(GetBucketName(S3ClientMode.INTERNAL), objectName);
var response = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.DeleteObjectAsync(INTERNAL_BUCKET_NAME, objectName), operationName: "DeleteObject", isWriteOperation: true);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode)))
if (!IsSuccessStatusCode((int)response.HttpStatusCode))
throw new Exception("Could not delete object.");
}
@@ -118,7 +110,7 @@ namespace Notesnook.API.Services
{
var request = new ListObjectsV2Request
{
BucketName = GetBucketName(S3ClientMode.INTERNAL),
BucketName = INTERNAL_BUCKET_NAME,
Prefix = userId,
};
@@ -126,7 +118,7 @@ namespace Notesnook.API.Services
var keys = new List<KeyVersion>();
do
{
response = await GetS3Client(S3ClientMode.INTERNAL).ListObjectsV2Async(request);
response = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.ListObjectsV2Async(request), operationName: "ListObjectsV2");
response.S3Objects.ForEach(obj => keys.Add(new KeyVersion
{
Key = obj.Key,
@@ -138,12 +130,11 @@ namespace Notesnook.API.Services
if (keys.Count <= 0) return;
var deleteObjectsResponse = await GetS3Client(S3ClientMode.INTERNAL)
.DeleteObjectsAsync(new DeleteObjectsRequest
var deleteObjectsResponse = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.DeleteObjectsAsync(new DeleteObjectsRequest
{
BucketName = GetBucketName(S3ClientMode.INTERNAL),
BucketName = INTERNAL_BUCKET_NAME,
Objects = keys,
});
}), operationName: "DeleteObjects", isWriteOperation: true);
if (!IsSuccessStatusCode((int)deleteObjectsResponse.HttpStatusCode))
throw new Exception("Could not delete directory.");
@@ -151,7 +142,7 @@ namespace Notesnook.API.Services
public async Task<long> GetObjectSizeAsync(string userId, string name)
{
var url = this.GetPresignedURL(userId, name, HttpVerb.HEAD, S3ClientMode.INTERNAL);
var url = await this.GetPresignedURLAsync(userId, name, HttpVerb.HEAD, S3ClientMode.INTERNAL);
if (url == null) return 0;
var request = new HttpRequestMessage(HttpMethod.Head, url);
@@ -160,17 +151,17 @@ namespace Notesnook.API.Services
}
public string? GetUploadObjectUrl(string userId, string name)
public async Task<string?> GetUploadObjectUrlAsync(string userId, string name)
{
return this.GetPresignedURL(userId, name, HttpVerb.PUT);
return await this.GetPresignedURLAsync(userId, name, HttpVerb.PUT);
}
public string? GetInternalUploadObjectUrl(string userId, string name)
public async Task<string?> GetInternalUploadObjectUrlAsync(string userId, string name)
{
return this.GetPresignedURL(userId, name, HttpVerb.PUT, S3ClientMode.INTERNAL);
return await this.GetPresignedURLAsync(userId, name, HttpVerb.PUT, S3ClientMode.INTERNAL);
}
public async Task<string?> GetDownloadObjectUrl(string userId, string name)
public async Task<string?> GetDownloadObjectUrlAsync(string userId, string name)
{
// var subscriptionService = await WampServers.SubscriptionServer.GetServiceAsync<IUserSubscriptionService>(SubscriptionServerTopics.UserSubscriptionServiceTopic);
// var subscription = await subscriptionService.GetUserSubscriptionAsync(Clients.Notesnook.Id, userId);
@@ -182,7 +173,7 @@ namespace Notesnook.API.Services
// throw new Exception($"You cannot download files larger than {StorageHelper.FormatBytes(fileSizeLimit)} on this plan.");
// }
var url = this.GetPresignedURL(userId, name, HttpVerb.GET);
var url = await this.GetPresignedURLAsync(userId, name, HttpVerb.GET);
if (url == null) return null;
return url;
}
@@ -194,8 +185,8 @@ namespace Notesnook.API.Services
if (string.IsNullOrEmpty(uploadId))
{
var response = await GetS3Client(S3ClientMode.INTERNAL).InitiateMultipartUploadAsync(GetBucketName(S3ClientMode.INTERNAL), objectName);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode))) throw new Exception("Failed to initiate multipart upload.");
var response = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.InitiateMultipartUploadAsync(INTERNAL_BUCKET_NAME, objectName), operationName: "InitiateMultipartUpload", isWriteOperation: true);
if (!IsSuccessStatusCode((int)response.HttpStatusCode)) throw new Exception("Failed to initiate multipart upload.");
uploadId = response.UploadId;
}
@@ -203,7 +194,7 @@ namespace Notesnook.API.Services
var signedUrls = new string[parts];
for (var i = 0; i < parts; ++i)
{
signedUrls[i] = GetPresignedURLForUploadPart(objectName, uploadId, i + 1);
signedUrls[i] = await GetPresignedURLForUploadPartAsync(objectName, uploadId, i + 1);
}
return new MultipartUploadMeta
@@ -218,14 +209,14 @@ namespace Notesnook.API.Services
var objectName = GetFullObjectName(userId, name);
if (userId == null || objectName == null) throw new Exception("Could not abort multipart upload.");
var response = await GetS3Client(S3ClientMode.INTERNAL).AbortMultipartUploadAsync(GetBucketName(S3ClientMode.INTERNAL), objectName, uploadId);
var response = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.AbortMultipartUploadAsync(INTERNAL_BUCKET_NAME, objectName, uploadId), operationName: "AbortMultipartUpload", isWriteOperation: true);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode))) throw new Exception("Failed to abort multipart upload.");
}
private async Task<long> GetMultipartUploadSizeAsync(string userId, string key, string uploadId)
{
var objectName = GetFullObjectName(userId, key);
var parts = await GetS3Client(S3ClientMode.INTERNAL).ListPartsAsync(GetBucketName(S3ClientMode.INTERNAL), objectName, uploadId);
var parts = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.ListPartsAsync(INTERNAL_BUCKET_NAME, objectName, uploadId), operationName: "ListParts");
long totalSize = 0;
foreach (var part in parts.Parts)
{
@@ -268,9 +259,9 @@ namespace Notesnook.API.Services
}
uploadRequest.Key = objectName;
uploadRequest.BucketName = GetBucketName(S3ClientMode.INTERNAL);
var response = await GetS3Client(S3ClientMode.INTERNAL).CompleteMultipartUploadAsync(uploadRequest);
if (!IsSuccessStatusCode(((int)response.HttpStatusCode))) throw new Exception("Failed to complete multipart upload.");
uploadRequest.BucketName = INTERNAL_BUCKET_NAME;
var response = await S3InternalClient.ExecuteWithFailoverAsync((client) => client.CompleteMultipartUploadAsync(uploadRequest), operationName: "CompleteMultipartUpload", isWriteOperation: true);
if (!IsSuccessStatusCode((int)response.HttpStatusCode)) throw new Exception("Failed to complete multipart upload.");
if (!Constants.IS_SELF_HOSTED)
{
@@ -282,34 +273,41 @@ namespace Notesnook.API.Services
}
}
private string? GetPresignedURL(string userId, string name, HttpVerb httpVerb, S3ClientMode mode = S3ClientMode.EXTERNAL)
private async Task<string?> GetPresignedURLAsync(string userId, string name, HttpVerb httpVerb, S3ClientMode mode = S3ClientMode.EXTERNAL)
{
var objectName = GetFullObjectName(userId, name);
if (userId == null || objectName == null) return null;
var client = GetS3Client(mode);
var request = new GetPreSignedUrlRequest
var client = mode == S3ClientMode.INTERNAL ? S3InternalClient : S3Client;
var bucketName = mode == S3ClientMode.INTERNAL ? INTERNAL_BUCKET_NAME : BUCKET_NAME;
return await client.ExecuteWithFailoverAsync(client =>
{
BucketName = GetBucketName(mode),
Expires = System.DateTime.Now.AddHours(1),
Verb = httpVerb,
Key = objectName,
var request = new GetPreSignedUrlRequest
{
BucketName = bucketName,
Expires = System.DateTime.Now.AddHours(1),
Verb = httpVerb,
Key = objectName,
#if (DEBUG || STAGING)
Protocol = Protocol.HTTP,
Protocol = Protocol.HTTP,
#else
Protocol = client.Config.ServiceURL.StartsWith("http://") ? Protocol.HTTP : Protocol.HTTPS,
Protocol = client.Config.ServiceURL.StartsWith("http://") ? Protocol.HTTP : Protocol.HTTPS,
#endif
};
return client.GetPreSignedURL(request);
};
return client.GetPreSignedURLAsync(request);
}, operationName: "GetPreSignedURL");
}
private string GetPresignedURLForUploadPart(string objectName, string uploadId, int partNumber, S3ClientMode mode = S3ClientMode.EXTERNAL)
private Task<string> GetPresignedURLForUploadPartAsync(string objectName, string uploadId, int partNumber, S3ClientMode mode = S3ClientMode.EXTERNAL)
{
var client = GetS3Client(mode);
return client.GetPreSignedURL(new GetPreSignedUrlRequest
var client = mode == S3ClientMode.INTERNAL ? S3InternalClient : S3Client;
var bucketName = mode == S3ClientMode.INTERNAL ? INTERNAL_BUCKET_NAME : BUCKET_NAME;
return client.ExecuteWithFailoverAsync(c => c.GetPreSignedURLAsync(new GetPreSignedUrlRequest
{
BucketName = GetBucketName(mode),
BucketName = bucketName,
Expires = System.DateTime.Now.AddHours(1),
Verb = HttpVerb.PUT,
Key = objectName,
@@ -318,9 +316,9 @@ namespace Notesnook.API.Services
#if (DEBUG || STAGING)
Protocol = Protocol.HTTP,
#else
Protocol = client.Config.ServiceURL.StartsWith("http://") ? Protocol.HTTP : Protocol.HTTPS,
Protocol = c.Config.ServiceURL.StartsWith("http://") ? Protocol.HTTP : Protocol.HTTPS,
#endif
});
}), operationName: "GetPreSignedURL");
}
private static string? GetFullObjectName(string userId, string name)
@@ -333,17 +331,5 @@ namespace Notesnook.API.Services
{
return ((int)statusCode >= 200) && ((int)statusCode <= 299);
}
AmazonS3Client GetS3Client(S3ClientMode mode = S3ClientMode.EXTERNAL)
{
if (mode == S3ClientMode.INTERNAL && S3InternalClient != null) return S3InternalClient;
return S3Client;
}
string GetBucketName(S3ClientMode mode = S3ClientMode.EXTERNAL)
{
if (mode == S3ClientMode.INTERNAL && S3InternalClient != null) return INTERNAL_BUCKET_NAME;
return BUCKET_NAME;
}
}
}

View File

@@ -200,12 +200,7 @@ namespace Notesnook.API
services.AddControllers();
services.AddHealthChecks().AddS3((options) =>
{
options.Credentials = new BasicAWSCredentials(Constants.S3_ACCESS_KEY_ID, Constants.S3_ACCESS_KEY);
options.S3Config = S3Service.CreateConfig();
options.BucketName = Constants.S3_BUCKET_NAME;
}, "s3-check", HealthStatus.Degraded).AddMongoDb(Constants.MONGODB_CONNECTION_STRING, "mongodb-check", HealthStatus.Unhealthy);
services.AddHealthChecks();
services.AddSignalR((hub) =>
{