mirror of
https://github.com/FoggedLens/deflock-app.git
synced 2026-02-13 09:12:56 +00:00
Compare commits
10 Commits
v2.2.0-rc
...
v2.3.1-rel
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ae354c43a4 | ||
|
|
34eac41a96 | ||
|
|
816dadfbd1 | ||
|
|
607ecbafaf | ||
|
|
8b44b3abf5 | ||
|
|
a675cf185a | ||
|
|
26b479bf20 | ||
|
|
ae795a7607 | ||
|
|
a05e03567e | ||
|
|
da6887f7d3 |
17
DEVELOPER.md
17
DEVELOPER.md
@@ -202,15 +202,24 @@ Deletions don't need position dragging or tag editing - they just need confirmat
|
||||
- Retries: Exponential backoff up to 59 minutes
|
||||
- Failures: OSM auto-closes after 60 minutes, so we eventually give up
|
||||
|
||||
**Queue processing workflow:**
|
||||
**Queue processing workflow (v2.3.0+ concurrent processing):**
|
||||
1. User action (add/edit/delete) → `PendingUpload` created with `UploadState.pending`
|
||||
2. Immediate visual feedback (cache updated with temp markers)
|
||||
3. Background uploader processes queue when online:
|
||||
3. Background uploader starts new uploads every 5 seconds (configurable via `kUploadQueueProcessingInterval`):
|
||||
- **Concurrency limit**: Maximum 5 uploads processing simultaneously (`kMaxConcurrentUploads`)
|
||||
- **Individual lifecycles**: Each upload processes through all three stages independently
|
||||
- **Timer role**: Only used to start new pending uploads, not control stage progression
|
||||
4. Each upload processes through stages without waiting for other uploads:
|
||||
- **Pending** → Create changeset → **CreatingChangeset** → **Uploading**
|
||||
- **Uploading** → Upload node → **ClosingChangeset**
|
||||
- **ClosingChangeset** → Close changeset → **Complete**
|
||||
4. Success → cache updated with real data, temp markers removed
|
||||
5. Failures → appropriate retry logic based on which stage failed
|
||||
5. Success → cache updated with real data, temp markers removed
|
||||
6. Failures → appropriate retry logic based on which stage failed
|
||||
|
||||
**Performance improvement (v2.3.0):**
|
||||
- **Before**: Sequential processing with 10-second delays between each stage of each upload
|
||||
- **After**: Concurrent processing with uploads completing in 10-30 seconds regardless of queue size
|
||||
- **User benefit**: 3-5x faster upload processing for users with good internet connections
|
||||
|
||||
**Why three explicit stages:**
|
||||
The previous implementation conflated changeset creation + node operation as one step, making error handling unclear. The new approach:
|
||||
|
||||
@@ -98,12 +98,7 @@ cp lib/keys.dart.example lib/keys.dart
|
||||
## Roadmap
|
||||
|
||||
### Needed Bugfixes
|
||||
- Add cancel button to submission guide
|
||||
- When not logged in, submit button should take users to settings>account to log in.
|
||||
- Ensure GPS/follow-me works after recent revamp (loses lock? have to move map for button state to update?)
|
||||
- Add new tags to top of a profile so they're visible immediately
|
||||
- Allow arbitrary entry on refine tags page
|
||||
- Don't show NSI suggestions that aren't sufficiently popular (image=)
|
||||
- Clean cache when nodes have been deleted by others
|
||||
- Are offline areas preferred for fast loading even when online? Check working.
|
||||
|
||||
|
||||
@@ -1,4 +1,24 @@
|
||||
{
|
||||
"2.3.1": {
|
||||
"content": [
|
||||
"• Follow-me mode now automatically restores when add/edit/tag sheets are closed",
|
||||
"• Follow-me button is greyed out while node sheets are open (add/edit/tag) since following doesn't make sense during node operations"
|
||||
"• Drop support for approximate location since I can't get it to work reliably; apologies"
|
||||
]
|
||||
},
|
||||
"2.3.0": {
|
||||
"content": [
|
||||
"• Concurrent upload queue processing",
|
||||
"• Each submission is now much faster"
|
||||
]
|
||||
},
|
||||
"2.2.1": {
|
||||
"content": [
|
||||
"• Fixed network status indicator timing out prematurely",
|
||||
"• Improved GPS follow-me reliability - fixed sync issues that could cause tracking to stop working",
|
||||
"• Network status now accurately shows 'taking a while' when requests split or backoff, and only shows 'timed out' for actual network failures"
|
||||
]
|
||||
},
|
||||
"2.2.0": {
|
||||
"content": [
|
||||
"• Fixed follow-me sync issues where tracking would sometimes stop working after mode changes",
|
||||
@@ -259,4 +279,4 @@
|
||||
"• New suspected locations feature"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,13 +55,18 @@ const String kClientName = 'DeFlock';
|
||||
|
||||
// Upload and changeset configuration
|
||||
const Duration kUploadHttpTimeout = Duration(seconds: 30); // HTTP request timeout for uploads
|
||||
const Duration kUploadQueueProcessingInterval = Duration(seconds: 5); // How often to check for new uploads to start
|
||||
const int kMaxConcurrentUploads = 5; // Maximum number of uploads processing simultaneously
|
||||
const Duration kChangesetCloseInitialRetryDelay = Duration(seconds: 10);
|
||||
const Duration kChangesetCloseMaxRetryDelay = Duration(minutes: 5); // Cap at 5 minutes
|
||||
const Duration kChangesetAutoCloseTimeout = Duration(minutes: 59); // Give up and trust OSM auto-close
|
||||
const double kChangesetCloseBackoffMultiplier = 2.0;
|
||||
|
||||
// Navigation routing configuration
|
||||
const Duration kNavigationRoutingTimeout = Duration(seconds: 120); // HTTP timeout for routing requests
|
||||
const Duration kNavigationRoutingTimeout = Duration(seconds: 90); // HTTP timeout for routing requests
|
||||
|
||||
// Overpass API configuration
|
||||
const Duration kOverpassQueryTimeout = Duration(seconds: 45); // Timeout for Overpass API queries (was 25s hardcoded)
|
||||
|
||||
// Suspected locations CSV URL
|
||||
const String kSuspectedLocationsCsvUrl = 'https://alprwatch.org/suspected-locations/deflock-latest.csv';
|
||||
|
||||
@@ -9,6 +9,7 @@ import '../../widgets/add_node_sheet.dart';
|
||||
import '../../widgets/edit_node_sheet.dart';
|
||||
import '../../widgets/navigation_sheet.dart';
|
||||
import '../../widgets/measured_sheet.dart';
|
||||
import '../../state/settings_state.dart' show FollowMeMode;
|
||||
|
||||
/// Coordinates all bottom sheet operations including opening, closing, height tracking,
|
||||
/// and sheet-related validation logic.
|
||||
@@ -25,6 +26,9 @@ class SheetCoordinator {
|
||||
|
||||
// Flag to prevent map bounce when transitioning from tag sheet to edit sheet
|
||||
bool _transitioningToEdit = false;
|
||||
|
||||
// Follow-me state restoration
|
||||
FollowMeMode? _followMeModeBeforeSheet;
|
||||
|
||||
// Getters for accessing heights
|
||||
double get addSheetHeight => _addSheetHeight;
|
||||
@@ -88,7 +92,8 @@ class SheetCoordinator {
|
||||
return;
|
||||
}
|
||||
|
||||
// Disable follow-me when adding a node so the map doesn't jump around
|
||||
// Save current follow-me mode and disable it while sheet is open
|
||||
_followMeModeBeforeSheet = appState.followMeMode;
|
||||
appState.setFollowMeMode(FollowMeMode.off);
|
||||
|
||||
appState.startAddSession();
|
||||
@@ -120,6 +125,9 @@ class SheetCoordinator {
|
||||
debugPrint('[SheetCoordinator] AddNodeSheet dismissed - canceling session');
|
||||
appState.cancelSession();
|
||||
}
|
||||
|
||||
// Restore follow-me mode that was active before sheet opened
|
||||
_restoreFollowMeMode(appState);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -132,7 +140,8 @@ class SheetCoordinator {
|
||||
}) {
|
||||
final appState = context.read<AppState>();
|
||||
|
||||
// Disable follow-me when editing a node so the map doesn't jump around
|
||||
// Save current follow-me mode and disable it while sheet is open
|
||||
_followMeModeBeforeSheet = appState.followMeMode;
|
||||
appState.setFollowMeMode(FollowMeMode.off);
|
||||
|
||||
final session = appState.editSession!; // should be non-null when this is called
|
||||
@@ -185,6 +194,9 @@ class SheetCoordinator {
|
||||
debugPrint('[SheetCoordinator] EditNodeSheet dismissed - canceling edit session');
|
||||
appState.cancelEditSession();
|
||||
}
|
||||
|
||||
// Restore follow-me mode that was active before sheet opened
|
||||
_restoreFollowMeMode(appState);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -250,4 +262,16 @@ class SheetCoordinator {
|
||||
_tagSheetHeight = 0.0;
|
||||
onStateChanged();
|
||||
}
|
||||
|
||||
/// Restore the follow-me mode that was active before opening a node sheet
|
||||
void _restoreFollowMeMode(AppState appState) {
|
||||
if (_followMeModeBeforeSheet != null) {
|
||||
debugPrint('[SheetCoordinator] Restoring follow-me mode: ${_followMeModeBeforeSheet}');
|
||||
appState.setFollowMeMode(_followMeModeBeforeSheet!);
|
||||
_followMeModeBeforeSheet = null; // Clear stored state
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if any node editing/viewing sheet is currently open
|
||||
bool get hasActiveNodeSheet => _addSheetHeight > 0 || _editSheetHeight > 0 || _tagSheetHeight > 0;
|
||||
}
|
||||
@@ -433,7 +433,7 @@ class _HomeScreenState extends State<HomeScreen> with TickerProviderStateMixin {
|
||||
IconButton(
|
||||
tooltip: _getFollowMeTooltip(appState.followMeMode),
|
||||
icon: Icon(_getFollowMeIcon(appState.followMeMode)),
|
||||
onPressed: _mapViewKey.currentState?.hasLocation == true
|
||||
onPressed: (_mapViewKey.currentState?.hasLocation == true && !_sheetCoordinator.hasActiveNodeSheet)
|
||||
? () {
|
||||
final oldMode = appState.followMeMode;
|
||||
final newMode = _getNextFollowMeMode(oldMode);
|
||||
@@ -444,7 +444,7 @@ class _HomeScreenState extends State<HomeScreen> with TickerProviderStateMixin {
|
||||
_mapViewKey.currentState?.retryLocationInit();
|
||||
}
|
||||
}
|
||||
: null, // Grey out when no location
|
||||
: null, // Grey out when no location or when node sheet is open
|
||||
),
|
||||
AnimatedBuilder(
|
||||
animation: LocalizationService.instance,
|
||||
|
||||
@@ -78,6 +78,11 @@ Future<List<OsmNode>> _fetchOverpassNodesWithSplitting({
|
||||
// Rate limits should NOT be split - just fail with extended backoff
|
||||
debugPrint('[fetchOverpassNodes] Rate limited - using extended backoff, not splitting');
|
||||
|
||||
// Report slow progress when backing off
|
||||
if (reportStatus) {
|
||||
NetworkStatus.instance.reportSlowProgress();
|
||||
}
|
||||
|
||||
// Wait longer for rate limits before giving up entirely
|
||||
await Future.delayed(const Duration(seconds: 30));
|
||||
return []; // Return empty rather than rethrowing - let caller handle error reporting
|
||||
@@ -88,6 +93,11 @@ Future<List<OsmNode>> _fetchOverpassNodesWithSplitting({
|
||||
return []; // Return empty - let caller handle error reporting
|
||||
}
|
||||
|
||||
// Report slow progress when we start splitting (only at the top level)
|
||||
if (reportStatus) {
|
||||
NetworkStatus.instance.reportSlowProgress();
|
||||
}
|
||||
|
||||
// Split the bounds into 4 quadrants and try each separately
|
||||
debugPrint('[fetchOverpassNodes] Splitting area into quadrants (depth: $splitDepth)');
|
||||
final quadrants = _splitBounds(bounds);
|
||||
@@ -218,7 +228,7 @@ String _buildOverpassQuery(LatLngBounds bounds, List<NodeProfile> profiles, int
|
||||
}).join('\n ');
|
||||
|
||||
return '''
|
||||
[out:json][timeout:25];
|
||||
[out:json][timeout:${kOverpassQueryTimeout.inSeconds}];
|
||||
(
|
||||
$nodeClauses
|
||||
);
|
||||
|
||||
@@ -19,7 +19,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
bool _hasSuccess = false;
|
||||
int _recentOfflineMisses = 0;
|
||||
Timer? _overpassRecoveryTimer;
|
||||
Timer? _waitingTimer;
|
||||
Timer? _noDataResetTimer;
|
||||
Timer? _successResetTimer;
|
||||
// Getters
|
||||
@@ -72,7 +71,25 @@ class NetworkStatus extends ChangeNotifier {
|
||||
}
|
||||
}
|
||||
|
||||
/// Set waiting status (show when loading tiles/cameras)
|
||||
/// Report that requests are taking longer than usual (splitting, backoffs, etc.)
|
||||
void reportSlowProgress() {
|
||||
if (!_overpassHasIssues) {
|
||||
_overpassHasIssues = true;
|
||||
_isWaitingForData = false; // Transition from waiting to slow progress
|
||||
notifyListeners();
|
||||
debugPrint('[NetworkStatus] Surveillance data requests taking longer than usual');
|
||||
}
|
||||
|
||||
// Reset recovery timer - we'll clear this when the operation actually completes
|
||||
_overpassRecoveryTimer?.cancel();
|
||||
_overpassRecoveryTimer = Timer(const Duration(minutes: 2), () {
|
||||
_overpassHasIssues = false;
|
||||
notifyListeners();
|
||||
debugPrint('[NetworkStatus] Slow progress status cleared');
|
||||
});
|
||||
}
|
||||
|
||||
/// Set waiting status (show when loading surveillance data)
|
||||
void setWaiting() {
|
||||
// Clear any previous timeout/no-data state when starting new wait
|
||||
_isTimedOut = false;
|
||||
@@ -83,17 +100,7 @@ class NetworkStatus extends ChangeNotifier {
|
||||
if (!_isWaitingForData) {
|
||||
_isWaitingForData = true;
|
||||
notifyListeners();
|
||||
// Don't log routine waiting - only log if we stay waiting too long
|
||||
}
|
||||
|
||||
// Set timeout for genuine network issues (not 404s)
|
||||
_waitingTimer?.cancel();
|
||||
_waitingTimer = Timer(const Duration(seconds: 8), () {
|
||||
_isWaitingForData = false;
|
||||
_isTimedOut = true;
|
||||
debugPrint('[NetworkStatus] Request timed out - likely network issues');
|
||||
notifyListeners();
|
||||
});
|
||||
}
|
||||
|
||||
/// Show success status briefly when data loads
|
||||
@@ -103,7 +110,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
_hasNoData = false;
|
||||
_hasSuccess = true;
|
||||
_recentOfflineMisses = 0;
|
||||
_waitingTimer?.cancel();
|
||||
_noDataResetTimer?.cancel();
|
||||
notifyListeners();
|
||||
|
||||
@@ -123,7 +129,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
_isTimedOut = false;
|
||||
_hasSuccess = false;
|
||||
_hasNoData = true;
|
||||
_waitingTimer?.cancel();
|
||||
_successResetTimer?.cancel();
|
||||
notifyListeners();
|
||||
|
||||
@@ -145,7 +150,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
_hasNoData = false;
|
||||
_hasSuccess = false;
|
||||
_recentOfflineMisses = 0;
|
||||
_waitingTimer?.cancel();
|
||||
_noDataResetTimer?.cancel();
|
||||
_successResetTimer?.cancel();
|
||||
notifyListeners();
|
||||
@@ -158,7 +162,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
_isTimedOut = true;
|
||||
_hasNoData = false;
|
||||
_hasSuccess = false;
|
||||
_waitingTimer?.cancel();
|
||||
_noDataResetTimer?.cancel();
|
||||
_successResetTimer?.cancel();
|
||||
notifyListeners();
|
||||
@@ -179,7 +182,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
_isTimedOut = false;
|
||||
_hasNoData = false;
|
||||
_hasSuccess = false;
|
||||
_waitingTimer?.cancel();
|
||||
_noDataResetTimer?.cancel();
|
||||
_successResetTimer?.cancel();
|
||||
|
||||
@@ -200,7 +202,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
_isWaitingForData = false;
|
||||
_isTimedOut = false;
|
||||
_hasNoData = true;
|
||||
_waitingTimer?.cancel();
|
||||
notifyListeners();
|
||||
debugPrint('[NetworkStatus] No offline data available for this area');
|
||||
}
|
||||
@@ -217,7 +218,6 @@ class NetworkStatus extends ChangeNotifier {
|
||||
@override
|
||||
void dispose() {
|
||||
_overpassRecoveryTimer?.cancel();
|
||||
_waitingTimer?.cancel();
|
||||
_noDataResetTimer?.cancel();
|
||||
_successResetTimer?.cancel();
|
||||
super.dispose();
|
||||
|
||||
@@ -10,16 +10,19 @@ import '../models/node_profile.dart';
|
||||
import '../services/node_cache.dart';
|
||||
import '../services/uploader.dart';
|
||||
import '../widgets/node_provider_with_cache.dart';
|
||||
import '../dev_config.dart';
|
||||
import 'settings_state.dart';
|
||||
import 'session_state.dart';
|
||||
|
||||
class UploadQueueState extends ChangeNotifier {
|
||||
final List<PendingUpload> _queue = [];
|
||||
Timer? _uploadTimer;
|
||||
int _activeUploadCount = 0;
|
||||
|
||||
// Getters
|
||||
int get pendingCount => _queue.length;
|
||||
List<PendingUpload> get pendingUploads => List.unmodifiable(_queue);
|
||||
int get activeUploadCount => _activeUploadCount;
|
||||
|
||||
// Initialize by loading queue from storage and repopulate cache with pending nodes
|
||||
Future<void> init() async {
|
||||
@@ -321,19 +324,22 @@ class UploadQueueState extends ChangeNotifier {
|
||||
// No uploads if queue is empty, offline mode is enabled, or queue processing is paused
|
||||
if (_queue.isEmpty || offlineMode || pauseQueueProcessing) return;
|
||||
|
||||
_uploadTimer = Timer.periodic(const Duration(seconds: 10), (t) async {
|
||||
_uploadTimer = Timer.periodic(kUploadQueueProcessingInterval, (t) async {
|
||||
if (_queue.isEmpty || offlineMode || pauseQueueProcessing) {
|
||||
_uploadTimer?.cancel();
|
||||
return;
|
||||
}
|
||||
|
||||
// Find next item to process based on state
|
||||
final pendingItems = _queue.where((pu) => pu.uploadState == UploadState.pending).toList();
|
||||
final creatingChangesetItems = _queue.where((pu) => pu.uploadState == UploadState.creatingChangeset).toList();
|
||||
// Check if we can start more uploads (concurrency limit check)
|
||||
if (_activeUploadCount >= kMaxConcurrentUploads) {
|
||||
debugPrint('[UploadQueue] At concurrency limit ($_activeUploadCount/$kMaxConcurrentUploads), waiting for uploads to complete');
|
||||
return;
|
||||
}
|
||||
|
||||
// Process any expired items
|
||||
final uploadingItems = _queue.where((pu) => pu.uploadState == UploadState.uploading).toList();
|
||||
final closingItems = _queue.where((pu) => pu.uploadState == UploadState.closingChangeset).toList();
|
||||
|
||||
// Process any expired items
|
||||
for (final uploadingItem in uploadingItems) {
|
||||
if (uploadingItem.hasChangesetExpired) {
|
||||
debugPrint('[UploadQueue] Changeset expired during node submission - marking as failed');
|
||||
@@ -347,73 +353,109 @@ class UploadQueueState extends ChangeNotifier {
|
||||
if (closingItem.hasChangesetExpired) {
|
||||
debugPrint('[UploadQueue] Changeset expired during close - trusting OSM auto-close (node was submitted successfully)');
|
||||
_markAsCompleting(closingItem, submittedNodeId: closingItem.submittedNodeId!);
|
||||
// Continue processing loop - don't return here
|
||||
}
|
||||
}
|
||||
|
||||
// Find next pending item to start
|
||||
final pendingItems = _queue.where((pu) => pu.uploadState == UploadState.pending).toList();
|
||||
|
||||
// Find next item to process (process in stage order)
|
||||
PendingUpload? item;
|
||||
if (pendingItems.isNotEmpty) {
|
||||
item = pendingItems.first;
|
||||
} else if (creatingChangesetItems.isNotEmpty) {
|
||||
// Already in progress, skip
|
||||
return;
|
||||
} else if (uploadingItems.isNotEmpty) {
|
||||
// Check if any uploading items are ready for retry
|
||||
final readyToRetry = uploadingItems.where((ui) =>
|
||||
!ui.hasChangesetExpired && ui.isReadyForNodeSubmissionRetry
|
||||
).toList();
|
||||
|
||||
if (readyToRetry.isNotEmpty) {
|
||||
item = readyToRetry.first;
|
||||
}
|
||||
} else {
|
||||
// No active items, check if any changeset close items are ready for retry
|
||||
final readyToRetry = closingItems.where((ci) =>
|
||||
!ci.hasChangesetExpired && ci.isReadyForChangesetCloseRetry
|
||||
).toList();
|
||||
|
||||
if (readyToRetry.isNotEmpty) {
|
||||
item = readyToRetry.first;
|
||||
}
|
||||
}
|
||||
|
||||
if (item == null) {
|
||||
// No items ready for processing - check if queue is effectively empty
|
||||
if (pendingItems.isEmpty) {
|
||||
// Check if queue is effectively empty
|
||||
final hasActiveItems = _queue.any((pu) =>
|
||||
pu.uploadState == UploadState.pending ||
|
||||
pu.uploadState == UploadState.creatingChangeset ||
|
||||
(pu.uploadState == UploadState.uploading && !pu.hasChangesetExpired) ||
|
||||
(pu.uploadState == UploadState.closingChangeset && !pu.hasChangesetExpired)
|
||||
pu.uploadState == UploadState.uploading ||
|
||||
pu.uploadState == UploadState.closingChangeset
|
||||
);
|
||||
|
||||
if (!hasActiveItems) {
|
||||
debugPrint('[UploadQueue] No active items remaining, stopping uploader');
|
||||
_uploadTimer?.cancel();
|
||||
}
|
||||
return; // Nothing to process right now
|
||||
return;
|
||||
}
|
||||
|
||||
// Retrieve access after every tick (accounts for re-login)
|
||||
// Retrieve access token
|
||||
final access = await getAccessToken();
|
||||
if (access == null) return; // not logged in
|
||||
|
||||
debugPrint('[UploadQueue] Processing item in state: ${item.uploadState} with uploadMode: ${item.uploadMode}');
|
||||
// Start processing the next pending upload
|
||||
final item = pendingItems.first;
|
||||
debugPrint('[UploadQueue] Starting new upload processing for item at ${item.coord} ($_activeUploadCount/$kMaxConcurrentUploads active)');
|
||||
|
||||
if (item.uploadState == UploadState.pending) {
|
||||
await _processCreateChangeset(item, access);
|
||||
} else if (item.uploadState == UploadState.creatingChangeset) {
|
||||
// Already in progress, skip (shouldn't happen due to filtering above)
|
||||
debugPrint('[UploadQueue] Changeset creation already in progress, skipping');
|
||||
return;
|
||||
} else if (item.uploadState == UploadState.uploading) {
|
||||
await _processNodeOperation(item, access);
|
||||
} else if (item.uploadState == UploadState.closingChangeset) {
|
||||
await _processChangesetClose(item, access);
|
||||
}
|
||||
_activeUploadCount++;
|
||||
_processIndividualUpload(item, access);
|
||||
});
|
||||
}
|
||||
|
||||
// Process an individual upload through all three stages
|
||||
Future<void> _processIndividualUpload(PendingUpload item, String accessToken) async {
|
||||
try {
|
||||
debugPrint('[UploadQueue] Starting individual upload processing for ${item.operation.name} at ${item.coord}');
|
||||
|
||||
// Stage 1: Create changeset
|
||||
await _processCreateChangeset(item, accessToken);
|
||||
if (item.uploadState == UploadState.error) return;
|
||||
|
||||
// Stage 2: Node operation with retry logic
|
||||
bool nodeOperationCompleted = false;
|
||||
while (!nodeOperationCompleted && !item.hasChangesetExpired && item.uploadState != UploadState.error) {
|
||||
await _processNodeOperation(item, accessToken);
|
||||
|
||||
if (item.uploadState == UploadState.closingChangeset) {
|
||||
// Node operation succeeded
|
||||
nodeOperationCompleted = true;
|
||||
} else if (item.uploadState == UploadState.uploading && !item.isReadyForNodeSubmissionRetry) {
|
||||
// Need to wait before retry
|
||||
final delay = item.nextNodeSubmissionRetryDelay;
|
||||
debugPrint('[UploadQueue] Waiting ${delay.inSeconds}s before node submission retry');
|
||||
await Future.delayed(delay);
|
||||
} else if (item.uploadState == UploadState.error) {
|
||||
// Failed permanently
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!nodeOperationCompleted) return; // Failed or expired
|
||||
|
||||
// Stage 3: Close changeset with retry logic
|
||||
bool changesetClosed = false;
|
||||
while (!changesetClosed && !item.hasChangesetExpired && item.uploadState != UploadState.error) {
|
||||
await _processChangesetClose(item, accessToken);
|
||||
|
||||
if (item.uploadState == UploadState.complete) {
|
||||
// Changeset close succeeded
|
||||
changesetClosed = true;
|
||||
} else if (item.uploadState == UploadState.closingChangeset && !item.isReadyForChangesetCloseRetry) {
|
||||
// Need to wait before retry
|
||||
final delay = item.nextChangesetCloseRetryDelay;
|
||||
debugPrint('[UploadQueue] Waiting ${delay.inSeconds}s before changeset close retry');
|
||||
await Future.delayed(delay);
|
||||
} else if (item.uploadState == UploadState.error) {
|
||||
// Failed permanently
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!changesetClosed && item.hasChangesetExpired) {
|
||||
// Trust OSM auto-close if we ran out of time
|
||||
debugPrint('[UploadQueue] Upload completed but changeset close timed out - trusting OSM auto-close');
|
||||
if (item.submittedNodeId != null) {
|
||||
_markAsCompleting(item, submittedNodeId: item.submittedNodeId!);
|
||||
}
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
debugPrint('[UploadQueue] Unexpected error in individual upload processing: $e');
|
||||
item.setError('Unexpected error: $e');
|
||||
_saveQueue();
|
||||
notifyListeners();
|
||||
} finally {
|
||||
// Always decrement the active upload count
|
||||
_activeUploadCount--;
|
||||
debugPrint('[UploadQueue] Individual upload processing finished ($_activeUploadCount/$kMaxConcurrentUploads active)');
|
||||
}
|
||||
}
|
||||
|
||||
// Process changeset creation (step 1 of 3)
|
||||
Future<void> _processCreateChangeset(PendingUpload item, String access) async {
|
||||
item.markAsCreatingChangeset();
|
||||
|
||||
@@ -10,11 +10,11 @@ import '../../services/proximity_alert_service.dart';
|
||||
import '../../models/osm_node.dart';
|
||||
import '../../models/node_profile.dart';
|
||||
|
||||
/// Simple GPS controller that respects permissions and provides location updates.
|
||||
/// Simple GPS controller that handles precise location permissions only.
|
||||
/// Key principles:
|
||||
/// - Respect "denied forever" - stop trying
|
||||
/// - Retry "denied" - user might enable later
|
||||
/// - Accept whatever accuracy is available once granted
|
||||
/// - Retry "denied" - user might enable later
|
||||
/// - Only works with precise location permissions
|
||||
class GpsController {
|
||||
StreamSubscription<Position>? _positionSub;
|
||||
Timer? _retryTimer;
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
name: deflockapp
|
||||
description: Map public surveillance infrastructure with OpenStreetMap
|
||||
publish_to: "none"
|
||||
version: 2.2.0+36 # The thing after the + is the version code, incremented with each release
|
||||
version: 2.3.1+38 # The thing after the + is the version code, incremented with each release
|
||||
|
||||
environment:
|
||||
sdk: ">=3.5.0 <4.0.0" # oauth2_client 4.x needs Dart 3.5+
|
||||
|
||||
Reference in New Issue
Block a user