mirror of
https://github.com/FoggedLens/deflock-app.git
synced 2026-02-12 16:52:51 +00:00
Node fetch rework
This commit is contained in:
@@ -15,7 +15,7 @@ import 'models/suspected_location.dart';
|
||||
import 'models/tile_provider.dart';
|
||||
import 'models/search_result.dart';
|
||||
import 'services/offline_area_service.dart';
|
||||
import 'services/node_cache.dart';
|
||||
import 'services/map_data_provider.dart';
|
||||
import 'services/tile_preview_service.dart';
|
||||
import 'services/changelog_service.dart';
|
||||
import 'services/operator_profile_service.dart';
|
||||
@@ -658,7 +658,7 @@ class AppState extends ChangeNotifier {
|
||||
|
||||
Future<void> setUploadMode(UploadMode mode) async {
|
||||
// Clear node cache when switching upload modes to prevent mixing production/sandbox data
|
||||
NodeCache.instance.clear();
|
||||
MapDataProvider().clearCache();
|
||||
debugPrint('[AppState] Cleared node cache due to upload mode change');
|
||||
|
||||
await _settingsState.setUploadMode(mode);
|
||||
|
||||
@@ -5,13 +5,9 @@ import 'package:flutter/foundation.dart';
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/osm_node.dart';
|
||||
import '../app_state.dart';
|
||||
import 'map_data_submodules/nodes_from_overpass.dart';
|
||||
import 'map_data_submodules/nodes_from_osm_api.dart';
|
||||
import 'map_data_submodules/tiles_from_remote.dart';
|
||||
import 'map_data_submodules/nodes_from_local.dart';
|
||||
import 'map_data_submodules/tiles_from_local.dart';
|
||||
import 'network_status.dart';
|
||||
import 'prefetch_area_service.dart';
|
||||
import 'node_data_manager.dart';
|
||||
|
||||
enum MapSource { local, remote, auto } // For future use
|
||||
|
||||
@@ -27,103 +23,31 @@ class MapDataProvider {
|
||||
factory MapDataProvider() => _instance;
|
||||
MapDataProvider._();
|
||||
|
||||
// REMOVED: AppState get _appState => AppState();
|
||||
final NodeDataManager _nodeDataManager = NodeDataManager();
|
||||
|
||||
bool get isOfflineMode => AppState.instance.offlineMode;
|
||||
void setOfflineMode(bool enabled) {
|
||||
AppState.instance.setOfflineMode(enabled);
|
||||
}
|
||||
|
||||
/// Fetch surveillance nodes from OSM/Overpass or local storage.
|
||||
/// Remote is default. If source is MapSource.auto, remote is tried first unless offline.
|
||||
/// Fetch surveillance nodes using the new simplified system.
|
||||
/// Returns cached data immediately if available, otherwise fetches from appropriate source.
|
||||
Future<List<OsmNode>> getNodes({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
MapSource source = MapSource.auto,
|
||||
bool isUserInitiated = false,
|
||||
}) async {
|
||||
final offline = AppState.instance.offlineMode;
|
||||
|
||||
// Explicit remote request: error if offline, else always remote
|
||||
if (source == MapSource.remote) {
|
||||
if (offline) {
|
||||
throw OfflineModeException("Cannot fetch remote nodes in offline mode.");
|
||||
}
|
||||
return _fetchRemoteNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: 0, // No limit - fetch all available data
|
||||
);
|
||||
}
|
||||
|
||||
// Explicit local request: always use local
|
||||
if (source == MapSource.local) {
|
||||
return fetchLocalNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
);
|
||||
}
|
||||
|
||||
// AUTO: In offline mode, behavior depends on upload mode
|
||||
if (offline) {
|
||||
if (uploadMode == UploadMode.sandbox) {
|
||||
// Offline + Sandbox = no nodes (local cache is production data)
|
||||
debugPrint('[MapDataProvider] Offline + Sandbox mode: returning no nodes (local cache is production data)');
|
||||
return <OsmNode>[];
|
||||
} else {
|
||||
// Offline + Production = use local cache
|
||||
return fetchLocalNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
maxNodes: 0, // No limit - get all available data
|
||||
);
|
||||
}
|
||||
} else if (uploadMode == UploadMode.sandbox) {
|
||||
// Sandbox mode: Only fetch from sandbox API, ignore local production nodes
|
||||
debugPrint('[MapDataProvider] Sandbox mode: fetching only from sandbox API, ignoring local cache');
|
||||
return _fetchRemoteNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: 0, // No limit - fetch all available data
|
||||
);
|
||||
} else {
|
||||
// Production mode: use pre-fetch service for efficient area loading
|
||||
final preFetchService = PrefetchAreaService();
|
||||
|
||||
// Always get local nodes first (fast, from cache)
|
||||
final localNodes = await fetchLocalNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
maxNodes: AppState.instance.maxNodes,
|
||||
);
|
||||
|
||||
// Check if we need to trigger a new pre-fetch (spatial or temporal)
|
||||
final needsFetch = !preFetchService.isWithinPreFetchedArea(bounds, profiles, uploadMode) ||
|
||||
preFetchService.isDataStale();
|
||||
|
||||
if (needsFetch) {
|
||||
// Outside area OR data stale - start pre-fetch with loading state
|
||||
debugPrint('[MapDataProvider] Starting pre-fetch with loading state');
|
||||
NetworkStatus.instance.setWaiting();
|
||||
preFetchService.requestPreFetchIfNeeded(
|
||||
viewBounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
);
|
||||
} else {
|
||||
debugPrint('[MapDataProvider] Using existing fresh pre-fetched area cache');
|
||||
}
|
||||
|
||||
// Return all local nodes without any rendering limit
|
||||
// Rendering limits are applied at the UI layer
|
||||
return localNodes;
|
||||
}
|
||||
return _nodeDataManager.getNodesFor(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
isUserInitiated: isUserInitiated,
|
||||
);
|
||||
}
|
||||
|
||||
/// Bulk/paged node fetch for offline downloads (handling paging, dedup, and Overpass retries)
|
||||
/// Only use for offline area download, not for map browsing! Ignores maxNodes config.
|
||||
/// Bulk node fetch for offline downloads using new system
|
||||
Future<List<OsmNode>> getAllNodesForDownload({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
@@ -131,16 +55,12 @@ class MapDataProvider {
|
||||
int maxResults = 0, // 0 = no limit for offline downloads
|
||||
int maxTries = 3,
|
||||
}) async {
|
||||
final offline = AppState.instance.offlineMode;
|
||||
if (offline) {
|
||||
if (AppState.instance.offlineMode) {
|
||||
throw OfflineModeException("Cannot fetch remote nodes for offline area download in offline mode.");
|
||||
}
|
||||
return _fetchRemoteNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: maxResults, // Pass 0 for unlimited
|
||||
);
|
||||
|
||||
// For downloads, always fetch fresh data (don't use cache)
|
||||
return _nodeDataManager.fetchWithSplitting(bounds, profiles);
|
||||
}
|
||||
|
||||
/// Fetch tile image bytes. Default is to try local first, then remote if not offline. Honors explicit source.
|
||||
@@ -202,57 +122,44 @@ class MapDataProvider {
|
||||
clearRemoteTileQueueSelective(currentBounds);
|
||||
}
|
||||
|
||||
/// Fetch remote nodes with Overpass first, OSM API fallback
|
||||
Future<List<OsmNode>> _fetchRemoteNodes({
|
||||
/// Add or update nodes in cache (for upload queue integration)
|
||||
void addOrUpdateNodes(List<OsmNode> nodes) {
|
||||
_nodeDataManager.addOrUpdateNodes(nodes);
|
||||
}
|
||||
|
||||
/// NodeCache compatibility - alias for addOrUpdateNodes
|
||||
void addOrUpdate(List<OsmNode> nodes) {
|
||||
addOrUpdateNodes(nodes);
|
||||
}
|
||||
|
||||
/// Remove node from cache (for deletions)
|
||||
void removeNodeById(int nodeId) {
|
||||
_nodeDataManager.removeNodeById(nodeId);
|
||||
}
|
||||
|
||||
/// Clear cache (when profiles change)
|
||||
void clearCache() {
|
||||
_nodeDataManager.clearCache();
|
||||
}
|
||||
|
||||
/// Force refresh current area (manual retry)
|
||||
Future<void> refreshArea({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
required int maxResults,
|
||||
}) async {
|
||||
// For sandbox mode, skip Overpass and go directly to OSM API
|
||||
// (Overpass doesn't have sandbox data)
|
||||
if (uploadMode == UploadMode.sandbox) {
|
||||
debugPrint('[MapDataProvider] Sandbox mode detected, using OSM API directly');
|
||||
return fetchOsmApiNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: maxResults,
|
||||
);
|
||||
}
|
||||
|
||||
// For production mode, try Overpass first, then fallback to OSM API
|
||||
try {
|
||||
final nodes = await fetchOverpassNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: maxResults,
|
||||
);
|
||||
|
||||
// If Overpass returns nodes, we're good
|
||||
if (nodes.isNotEmpty) {
|
||||
return nodes;
|
||||
}
|
||||
|
||||
// If Overpass returns empty (could be no data or could be an issue),
|
||||
// try OSM API as well to be thorough
|
||||
debugPrint('[MapDataProvider] Overpass returned no nodes, trying OSM API fallback');
|
||||
return fetchOsmApiNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: maxResults,
|
||||
);
|
||||
|
||||
} catch (e) {
|
||||
debugPrint('[MapDataProvider] Overpass failed ($e), trying OSM API fallback');
|
||||
return fetchOsmApiNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: maxResults,
|
||||
);
|
||||
}
|
||||
return _nodeDataManager.refreshArea(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
);
|
||||
}
|
||||
|
||||
/// NodeCache compatibility methods for upload queue
|
||||
OsmNode? getNodeById(int nodeId) => _nodeDataManager.getNodeById(nodeId);
|
||||
void removePendingEditMarker(int nodeId) => _nodeDataManager.removePendingEditMarker(nodeId);
|
||||
void removePendingDeletionMarker(int nodeId) => _nodeDataManager.removePendingDeletionMarker(nodeId);
|
||||
void removeTempNodeById(int tempNodeId) => _nodeDataManager.removeTempNodeById(tempNodeId);
|
||||
List<OsmNode> findNodesWithinDistance(LatLng coord, double distanceMeters, {int? excludeNodeId}) =>
|
||||
_nodeDataManager.findNodesWithinDistance(coord, distanceMeters, excludeNodeId: excludeNodeId);
|
||||
}
|
||||
@@ -1,419 +0,0 @@
|
||||
import 'dart:convert';
|
||||
import 'package:http/http.dart' as http;
|
||||
import 'package:flutter/foundation.dart';
|
||||
import 'package:latlong2/latlong.dart';
|
||||
import 'package:flutter_map/flutter_map.dart';
|
||||
|
||||
import '../../models/node_profile.dart';
|
||||
import '../../models/osm_node.dart';
|
||||
import '../../models/pending_upload.dart';
|
||||
import '../../app_state.dart';
|
||||
import '../../dev_config.dart';
|
||||
import '../network_status.dart';
|
||||
import '../overpass_node_limit_exception.dart';
|
||||
|
||||
/// Fetches surveillance nodes from the Overpass OSM API for the given bounds and profiles.
|
||||
/// If the query fails due to too many nodes, automatically splits the area and retries.
|
||||
Future<List<OsmNode>> fetchOverpassNodes({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
required int maxResults,
|
||||
}) async {
|
||||
// Check if this is a user-initiated fetch (indicated by loading state)
|
||||
final wasUserInitiated = NetworkStatus.instance.currentStatus == NetworkStatusType.waiting;
|
||||
|
||||
try {
|
||||
final nodes = await _fetchOverpassNodesWithSplitting(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: maxResults,
|
||||
splitDepth: 0,
|
||||
reportStatus: wasUserInitiated, // Only top level reports status
|
||||
);
|
||||
|
||||
// Only report success at the top level if this was user-initiated
|
||||
if (wasUserInitiated) {
|
||||
NetworkStatus.instance.setSuccess();
|
||||
}
|
||||
|
||||
return nodes;
|
||||
} catch (e) {
|
||||
// Only report errors at the top level if this was user-initiated
|
||||
if (wasUserInitiated) {
|
||||
if (e.toString().contains('timeout') || e.toString().contains('timed out')) {
|
||||
NetworkStatus.instance.setTimeoutError();
|
||||
} else {
|
||||
NetworkStatus.instance.setNetworkError();
|
||||
}
|
||||
}
|
||||
|
||||
debugPrint('[fetchOverpassNodes] Top-level operation failed: $e');
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal method that handles splitting when node limit is exceeded.
|
||||
Future<List<OsmNode>> _fetchOverpassNodesWithSplitting({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
required int maxResults,
|
||||
required int splitDepth,
|
||||
required bool reportStatus, // Only true for top level
|
||||
}) async {
|
||||
if (profiles.isEmpty) return [];
|
||||
|
||||
const int maxSplitDepth = kMaxPreFetchSplitDepth; // Maximum times we'll split (4^3 = 64 max sub-areas)
|
||||
|
||||
try {
|
||||
return await _fetchSingleOverpassQuery(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
maxResults: maxResults,
|
||||
reportStatus: reportStatus,
|
||||
);
|
||||
} on OverpassRateLimitException catch (e) {
|
||||
// Rate limits should NOT be split - just fail with extended backoff
|
||||
debugPrint('[fetchOverpassNodes] Rate limited - using extended backoff, not splitting');
|
||||
|
||||
// Report slow progress when backing off
|
||||
if (reportStatus) {
|
||||
NetworkStatus.instance.reportSlowProgress();
|
||||
}
|
||||
|
||||
// Wait longer for rate limits before giving up entirely
|
||||
await Future.delayed(const Duration(seconds: 30));
|
||||
return []; // Return empty rather than rethrowing - let caller handle error reporting
|
||||
} on OverpassNodeLimitException {
|
||||
// If we've hit max split depth, give up to avoid infinite recursion
|
||||
if (splitDepth >= maxSplitDepth) {
|
||||
debugPrint('[fetchOverpassNodes] Max split depth reached, giving up on area: $bounds');
|
||||
return []; // Return empty - let caller handle error reporting
|
||||
}
|
||||
|
||||
// Report slow progress when we start splitting (only at the top level)
|
||||
if (reportStatus) {
|
||||
NetworkStatus.instance.reportSlowProgress();
|
||||
}
|
||||
|
||||
// Split the bounds into 4 quadrants and try each separately
|
||||
debugPrint('[fetchOverpassNodes] Splitting area into quadrants (depth: $splitDepth)');
|
||||
final quadrants = _splitBounds(bounds);
|
||||
final List<OsmNode> allNodes = [];
|
||||
|
||||
for (final quadrant in quadrants) {
|
||||
final nodes = await _fetchOverpassNodesWithSplitting(
|
||||
bounds: quadrant,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: 0, // No limit on individual quadrants to avoid double-limiting
|
||||
splitDepth: splitDepth + 1,
|
||||
reportStatus: false, // Sub-requests don't report status
|
||||
);
|
||||
allNodes.addAll(nodes);
|
||||
}
|
||||
|
||||
debugPrint('[fetchOverpassNodes] Collected ${allNodes.length} nodes from ${quadrants.length} quadrants');
|
||||
return allNodes;
|
||||
}
|
||||
}
|
||||
|
||||
/// Perform a single Overpass query without splitting logic.
|
||||
Future<List<OsmNode>> _fetchSingleOverpassQuery({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
required int maxResults,
|
||||
required bool reportStatus,
|
||||
}) async {
|
||||
const String overpassEndpoint = 'https://overpass-api.de/api/interpreter';
|
||||
|
||||
// Build the Overpass query
|
||||
final query = _buildOverpassQuery(bounds, profiles, maxResults);
|
||||
|
||||
try {
|
||||
debugPrint('[fetchOverpassNodes] Querying Overpass for surveillance nodes...');
|
||||
debugPrint('[fetchOverpassNodes] Query:\n$query');
|
||||
|
||||
final response = await http.post(
|
||||
Uri.parse(overpassEndpoint),
|
||||
body: {'data': query.trim()}
|
||||
);
|
||||
|
||||
if (response.statusCode != 200) {
|
||||
final errorBody = response.body;
|
||||
debugPrint('[fetchOverpassNodes] Overpass API error: $errorBody');
|
||||
|
||||
// Check if it's specifically the 50k node limit error (HTTP 400)
|
||||
// Exact message: "You requested too many nodes (limit is 50000)"
|
||||
if (errorBody.contains('too many nodes') &&
|
||||
errorBody.contains('50000')) {
|
||||
debugPrint('[fetchOverpassNodes] Detected 50k node limit error, will attempt splitting');
|
||||
throw OverpassNodeLimitException('Query exceeded node limit', serverResponse: errorBody);
|
||||
}
|
||||
|
||||
// Check for timeout errors that indicate query complexity (should split)
|
||||
// Common timeout messages from Overpass
|
||||
if (errorBody.contains('timeout') ||
|
||||
errorBody.contains('runtime limit exceeded') ||
|
||||
errorBody.contains('Query timed out')) {
|
||||
debugPrint('[fetchOverpassNodes] Detected timeout error, will attempt splitting to reduce complexity');
|
||||
throw OverpassNodeLimitException('Query timed out', serverResponse: errorBody);
|
||||
}
|
||||
|
||||
// Check for rate limiting (should NOT split - needs longer backoff)
|
||||
if (errorBody.contains('rate limited') ||
|
||||
errorBody.contains('too many requests') ||
|
||||
response.statusCode == 429) {
|
||||
debugPrint('[fetchOverpassNodes] Rate limited by Overpass API - needs extended backoff');
|
||||
throw OverpassRateLimitException('Rate limited by server', serverResponse: errorBody);
|
||||
}
|
||||
|
||||
// Don't report status here - let the top level handle it
|
||||
throw Exception('Overpass API error: $errorBody');
|
||||
}
|
||||
|
||||
final data = await compute(jsonDecode, response.body) as Map<String, dynamic>;
|
||||
final elements = data['elements'] as List<dynamic>;
|
||||
|
||||
if (elements.length > 20) {
|
||||
debugPrint('[fetchOverpassNodes] Retrieved ${elements.length} elements (nodes + ways/relations)');
|
||||
}
|
||||
|
||||
// Don't report success here - let the top level handle it
|
||||
|
||||
// Parse response to determine which nodes are constrained
|
||||
final nodes = _parseOverpassResponseWithConstraints(elements);
|
||||
|
||||
// Clean up any pending uploads that now appear in Overpass results
|
||||
_cleanupCompletedUploads(nodes);
|
||||
|
||||
return nodes;
|
||||
|
||||
} catch (e) {
|
||||
// Re-throw OverpassNodeLimitException so splitting logic can catch it
|
||||
if (e is OverpassNodeLimitException) rethrow;
|
||||
|
||||
debugPrint('[fetchOverpassNodes] Exception: $e');
|
||||
|
||||
// Don't report status here - let the top level handle it
|
||||
throw e; // Re-throw to let caller handle
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds an Overpass API query for surveillance nodes matching the given profiles within bounds.
|
||||
/// Also fetches ways and relations that reference these nodes to determine constraint status.
|
||||
String _buildOverpassQuery(LatLngBounds bounds, List<NodeProfile> profiles, int maxResults) {
|
||||
// Deduplicate profiles to reduce query complexity - broader profiles subsume more specific ones
|
||||
final deduplicatedProfiles = _deduplicateProfilesForQuery(profiles);
|
||||
|
||||
// Safety check: if deduplication removed all profiles (edge case), fall back to original list
|
||||
final profilesToQuery = deduplicatedProfiles.isNotEmpty ? deduplicatedProfiles : profiles;
|
||||
|
||||
if (deduplicatedProfiles.length < profiles.length) {
|
||||
debugPrint('[Overpass] Deduplicated ${profiles.length} profiles to ${deduplicatedProfiles.length} for query efficiency');
|
||||
}
|
||||
|
||||
// Build node clauses for deduplicated profiles only
|
||||
final nodeClauses = profilesToQuery.map((profile) {
|
||||
// Convert profile tags to Overpass filter format, excluding empty values
|
||||
final tagFilters = profile.tags.entries
|
||||
.where((entry) => entry.value.trim().isNotEmpty) // Skip empty values
|
||||
.map((entry) => '["${entry.key}"="${entry.value}"]')
|
||||
.join();
|
||||
|
||||
// Build the node query with tag filters and bounding box
|
||||
return 'node$tagFilters(${bounds.southWest.latitude},${bounds.southWest.longitude},${bounds.northEast.latitude},${bounds.northEast.longitude});';
|
||||
}).join('\n ');
|
||||
|
||||
return '''
|
||||
[out:json][timeout:${kOverpassQueryTimeout.inSeconds}];
|
||||
(
|
||||
$nodeClauses
|
||||
);
|
||||
out body ${maxResults > 0 ? maxResults : ''};
|
||||
(
|
||||
way(bn);
|
||||
rel(bn);
|
||||
);
|
||||
out meta;
|
||||
''';
|
||||
}
|
||||
|
||||
/// Deduplicate profiles for Overpass queries by removing profiles that are subsumed by others.
|
||||
/// A profile A subsumes profile B if all of A's non-empty tags exist in B with identical values.
|
||||
/// This optimization reduces query complexity while returning the same nodes (since broader
|
||||
/// profiles capture all nodes that more specific profiles would).
|
||||
List<NodeProfile> _deduplicateProfilesForQuery(List<NodeProfile> profiles) {
|
||||
if (profiles.length <= 1) return profiles;
|
||||
|
||||
final result = <NodeProfile>[];
|
||||
|
||||
for (final candidate in profiles) {
|
||||
// Skip profiles that only have empty tags - they would match everything and break queries
|
||||
final candidateNonEmptyTags = candidate.tags.entries
|
||||
.where((entry) => entry.value.trim().isNotEmpty)
|
||||
.toList();
|
||||
|
||||
if (candidateNonEmptyTags.isEmpty) continue;
|
||||
|
||||
// Check if any existing profile in our result subsumes this candidate
|
||||
bool isSubsumed = false;
|
||||
for (final existing in result) {
|
||||
if (_profileSubsumes(existing, candidate)) {
|
||||
isSubsumed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isSubsumed) {
|
||||
// This candidate is not subsumed, so add it
|
||||
// But first, remove any existing profiles that this candidate subsumes
|
||||
result.removeWhere((existing) => _profileSubsumes(candidate, existing));
|
||||
result.add(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// Check if broaderProfile subsumes specificProfile.
|
||||
/// Returns true if all non-empty tags in broaderProfile exist in specificProfile with identical values.
|
||||
bool _profileSubsumes(NodeProfile broaderProfile, NodeProfile specificProfile) {
|
||||
// Get non-empty tags from both profiles
|
||||
final broaderTags = Map.fromEntries(
|
||||
broaderProfile.tags.entries.where((entry) => entry.value.trim().isNotEmpty)
|
||||
);
|
||||
final specificTags = Map.fromEntries(
|
||||
specificProfile.tags.entries.where((entry) => entry.value.trim().isNotEmpty)
|
||||
);
|
||||
|
||||
// If broader has no non-empty tags, it doesn't subsume anything (would match everything)
|
||||
if (broaderTags.isEmpty) return false;
|
||||
|
||||
// If broader has more non-empty tags than specific, it can't subsume
|
||||
if (broaderTags.length > specificTags.length) return false;
|
||||
|
||||
// Check if all broader tags exist in specific with same values
|
||||
for (final entry in broaderTags.entries) {
|
||||
if (specificTags[entry.key] != entry.value) return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/// Split a LatLngBounds into 4 quadrants (NW, NE, SW, SE).
|
||||
List<LatLngBounds> _splitBounds(LatLngBounds bounds) {
|
||||
final centerLat = (bounds.north + bounds.south) / 2;
|
||||
final centerLng = (bounds.east + bounds.west) / 2;
|
||||
|
||||
return [
|
||||
// Southwest quadrant (bottom-left)
|
||||
LatLngBounds(
|
||||
LatLng(bounds.south, bounds.west),
|
||||
LatLng(centerLat, centerLng),
|
||||
),
|
||||
// Southeast quadrant (bottom-right)
|
||||
LatLngBounds(
|
||||
LatLng(bounds.south, centerLng),
|
||||
LatLng(centerLat, bounds.east),
|
||||
),
|
||||
// Northwest quadrant (top-left)
|
||||
LatLngBounds(
|
||||
LatLng(centerLat, bounds.west),
|
||||
LatLng(bounds.north, centerLng),
|
||||
),
|
||||
// Northeast quadrant (top-right)
|
||||
LatLngBounds(
|
||||
LatLng(centerLat, centerLng),
|
||||
LatLng(bounds.north, bounds.east),
|
||||
),
|
||||
];
|
||||
}
|
||||
|
||||
/// Parse Overpass response elements to create OsmNode objects with constraint information.
|
||||
List<OsmNode> _parseOverpassResponseWithConstraints(List<dynamic> elements) {
|
||||
final nodeElements = <Map<String, dynamic>>[];
|
||||
final constrainedNodeIds = <int>{};
|
||||
|
||||
// First pass: collect surveillance nodes and identify constrained nodes
|
||||
for (final element in elements.whereType<Map<String, dynamic>>()) {
|
||||
final type = element['type'] as String?;
|
||||
|
||||
if (type == 'node') {
|
||||
// This is a surveillance node - collect it
|
||||
nodeElements.add(element);
|
||||
} else if (type == 'way' || type == 'relation') {
|
||||
// This is a way/relation that references some of our nodes
|
||||
final refs = element['nodes'] as List<dynamic>? ??
|
||||
element['members']?.where((m) => m['type'] == 'node').map((m) => m['ref']) ?? [];
|
||||
|
||||
// Mark all referenced nodes as constrained
|
||||
for (final ref in refs) {
|
||||
if (ref is int) {
|
||||
constrainedNodeIds.add(ref);
|
||||
} else if (ref is String) {
|
||||
final nodeId = int.tryParse(ref);
|
||||
if (nodeId != null) constrainedNodeIds.add(nodeId);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: create OsmNode objects with constraint info
|
||||
final nodes = nodeElements.map((element) {
|
||||
final nodeId = element['id'] as int;
|
||||
final isConstrained = constrainedNodeIds.contains(nodeId);
|
||||
|
||||
return OsmNode(
|
||||
id: nodeId,
|
||||
coord: LatLng(element['lat'], element['lon']),
|
||||
tags: Map<String, String>.from(element['tags'] ?? {}),
|
||||
isConstrained: isConstrained,
|
||||
);
|
||||
}).toList();
|
||||
|
||||
final constrainedCount = nodes.where((n) => n.isConstrained).length;
|
||||
if (constrainedCount > 0) {
|
||||
debugPrint('[fetchOverpassNodes] Found $constrainedCount constrained nodes out of ${nodes.length} total');
|
||||
}
|
||||
|
||||
return nodes;
|
||||
}
|
||||
|
||||
/// Clean up pending uploads that now appear in Overpass results
|
||||
void _cleanupCompletedUploads(List<OsmNode> overpassNodes) {
|
||||
try {
|
||||
final appState = AppState.instance;
|
||||
final pendingUploads = appState.pendingUploads;
|
||||
|
||||
if (pendingUploads.isEmpty) return;
|
||||
|
||||
final overpassNodeIds = overpassNodes.map((n) => n.id).toSet();
|
||||
|
||||
// Find pending uploads whose submitted node IDs now appear in Overpass results
|
||||
final uploadsToRemove = <PendingUpload>[];
|
||||
|
||||
for (final upload in pendingUploads) {
|
||||
if (upload.submittedNodeId != null &&
|
||||
overpassNodeIds.contains(upload.submittedNodeId!)) {
|
||||
uploadsToRemove.add(upload);
|
||||
debugPrint('[OverpassCleanup] Found submitted node ${upload.submittedNodeId} in Overpass results, removing from pending queue');
|
||||
}
|
||||
}
|
||||
|
||||
// Remove the completed uploads from the queue
|
||||
for (final upload in uploadsToRemove) {
|
||||
appState.removeFromQueue(upload);
|
||||
}
|
||||
|
||||
if (uploadsToRemove.isNotEmpty) {
|
||||
debugPrint('[OverpassCleanup] Cleaned up ${uploadsToRemove.length} completed uploads');
|
||||
}
|
||||
|
||||
} catch (e) {
|
||||
debugPrint('[OverpassCleanup] Error during cleanup: $e');
|
||||
// Don't let cleanup errors break the main functionality
|
||||
}
|
||||
}
|
||||
236
lib/services/node_data_manager.dart
Normal file
236
lib/services/node_data_manager.dart
Normal file
@@ -0,0 +1,236 @@
|
||||
import 'dart:async';
|
||||
import 'package:flutter/foundation.dart';
|
||||
import 'package:latlong2/latlong.dart';
|
||||
import 'package:flutter_map/flutter_map.dart';
|
||||
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/osm_node.dart';
|
||||
import '../app_state.dart';
|
||||
import 'overpass_service.dart';
|
||||
import 'node_spatial_cache.dart';
|
||||
import 'network_status.dart';
|
||||
import 'map_data_submodules/nodes_from_osm_api.dart';
|
||||
import 'map_data_submodules/nodes_from_local.dart';
|
||||
|
||||
/// Coordinates node data fetching between cache, Overpass, and OSM API.
|
||||
/// Simple interface: give me nodes for this view with proper caching and error handling.
|
||||
class NodeDataManager extends ChangeNotifier {
|
||||
static final NodeDataManager _instance = NodeDataManager._();
|
||||
factory NodeDataManager() => _instance;
|
||||
NodeDataManager._();
|
||||
|
||||
final OverpassService _overpassService = OverpassService();
|
||||
final NodeSpatialCache _cache = NodeSpatialCache();
|
||||
|
||||
/// Get nodes for the given bounds and profiles.
|
||||
/// Returns cached data immediately if available, otherwise fetches from appropriate source.
|
||||
Future<List<OsmNode>> getNodesFor({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
bool isUserInitiated = false,
|
||||
}) async {
|
||||
if (profiles.isEmpty) return [];
|
||||
|
||||
// Handle offline mode
|
||||
if (AppState.instance.offlineMode) {
|
||||
if (uploadMode == UploadMode.sandbox) {
|
||||
// Offline + Sandbox = no nodes (local cache is production data)
|
||||
debugPrint('[NodeDataManager] Offline + Sandbox mode: returning no nodes');
|
||||
return [];
|
||||
} else {
|
||||
// Offline + Production = use local cache only
|
||||
return fetchLocalNodes(bounds: bounds, profiles: profiles);
|
||||
}
|
||||
}
|
||||
|
||||
// Handle sandbox mode (always fetch from OSM API, no caching)
|
||||
if (uploadMode == UploadMode.sandbox) {
|
||||
debugPrint('[NodeDataManager] Sandbox mode: fetching from OSM API');
|
||||
return fetchOsmApiNodes(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: 0,
|
||||
);
|
||||
}
|
||||
|
||||
// Production mode: check cache first
|
||||
if (_cache.hasDataFor(bounds)) {
|
||||
debugPrint('[NodeDataManager] Using cached data for bounds');
|
||||
return _cache.getNodesFor(bounds);
|
||||
}
|
||||
|
||||
// Not cached - need to fetch
|
||||
if (isUserInitiated) {
|
||||
NetworkStatus.instance.setWaiting();
|
||||
}
|
||||
|
||||
try {
|
||||
final nodes = await fetchWithSplitting(bounds, profiles);
|
||||
|
||||
if (isUserInitiated) {
|
||||
NetworkStatus.instance.setSuccess();
|
||||
}
|
||||
|
||||
notifyListeners();
|
||||
return nodes;
|
||||
|
||||
} catch (e) {
|
||||
debugPrint('[NodeDataManager] Fetch failed: $e');
|
||||
|
||||
if (isUserInitiated) {
|
||||
if (e is RateLimitError) {
|
||||
NetworkStatus.instance.reportOverpassIssue();
|
||||
} else {
|
||||
NetworkStatus.instance.setNetworkError();
|
||||
}
|
||||
}
|
||||
|
||||
// Return whatever we have in cache for this area
|
||||
return _cache.getNodesFor(bounds);
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch nodes with automatic area splitting if needed
|
||||
Future<List<OsmNode>> fetchWithSplitting(
|
||||
LatLngBounds bounds,
|
||||
List<NodeProfile> profiles, {
|
||||
int splitDepth = 0,
|
||||
}) async {
|
||||
const maxSplitDepth = 3; // 4^3 = 64 max sub-areas
|
||||
|
||||
try {
|
||||
// Expand bounds slightly to reduce edge effects
|
||||
final expandedBounds = _expandBounds(bounds, 1.2);
|
||||
|
||||
final nodes = await _overpassService.fetchNodes(
|
||||
bounds: expandedBounds,
|
||||
profiles: profiles,
|
||||
);
|
||||
|
||||
// Success - cache the data for the expanded area
|
||||
_cache.markAreaAsFetched(expandedBounds, nodes);
|
||||
return nodes;
|
||||
|
||||
} on NodeLimitError {
|
||||
// Hit node limit or timeout - split area if not too deep
|
||||
if (splitDepth >= maxSplitDepth) {
|
||||
debugPrint('[NodeDataManager] Max split depth reached, giving up');
|
||||
return [];
|
||||
}
|
||||
|
||||
debugPrint('[NodeDataManager] Splitting area (depth: $splitDepth)');
|
||||
NetworkStatus.instance.reportSlowProgress();
|
||||
|
||||
return _fetchSplitAreas(bounds, profiles, splitDepth + 1);
|
||||
|
||||
} on RateLimitError {
|
||||
// Rate limited - wait and return empty
|
||||
debugPrint('[NodeDataManager] Rate limited, backing off');
|
||||
await Future.delayed(const Duration(seconds: 30));
|
||||
return [];
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch data by splitting area into quadrants
|
||||
Future<List<OsmNode>> _fetchSplitAreas(
|
||||
LatLngBounds bounds,
|
||||
List<NodeProfile> profiles,
|
||||
int splitDepth,
|
||||
) async {
|
||||
final quadrants = _splitBounds(bounds);
|
||||
final allNodes = <OsmNode>[];
|
||||
|
||||
for (final quadrant in quadrants) {
|
||||
try {
|
||||
final nodes = await fetchWithSplitting(quadrant, profiles, splitDepth: splitDepth);
|
||||
allNodes.addAll(nodes);
|
||||
} catch (e) {
|
||||
debugPrint('[NodeDataManager] Quadrant fetch failed: $e');
|
||||
// Continue with other quadrants
|
||||
}
|
||||
}
|
||||
|
||||
debugPrint('[NodeDataManager] Split fetch complete: ${allNodes.length} total nodes');
|
||||
return allNodes;
|
||||
}
|
||||
|
||||
/// Split bounds into 4 quadrants
|
||||
List<LatLngBounds> _splitBounds(LatLngBounds bounds) {
|
||||
final centerLat = (bounds.north + bounds.south) / 2;
|
||||
final centerLng = (bounds.east + bounds.west) / 2;
|
||||
|
||||
return [
|
||||
// Southwest
|
||||
LatLngBounds(LatLng(bounds.south, bounds.west), LatLng(centerLat, centerLng)),
|
||||
// Southeast
|
||||
LatLngBounds(LatLng(bounds.south, centerLng), LatLng(centerLat, bounds.east)),
|
||||
// Northwest
|
||||
LatLngBounds(LatLng(centerLat, bounds.west), LatLng(bounds.north, centerLng)),
|
||||
// Northeast
|
||||
LatLngBounds(LatLng(centerLat, centerLng), LatLng(bounds.north, bounds.east)),
|
||||
];
|
||||
}
|
||||
|
||||
/// Expand bounds by given factor around center point
|
||||
LatLngBounds _expandBounds(LatLngBounds bounds, double factor) {
|
||||
final centerLat = (bounds.north + bounds.south) / 2;
|
||||
final centerLng = (bounds.east + bounds.west) / 2;
|
||||
|
||||
final latSpan = (bounds.north - bounds.south) * factor / 2;
|
||||
final lngSpan = (bounds.east - bounds.west) * factor / 2;
|
||||
|
||||
return LatLngBounds(
|
||||
LatLng(centerLat - latSpan, centerLng - lngSpan),
|
||||
LatLng(centerLat + latSpan, centerLng + lngSpan),
|
||||
);
|
||||
}
|
||||
|
||||
/// Add or update nodes in cache (for upload queue integration)
|
||||
void addOrUpdateNodes(List<OsmNode> nodes) {
|
||||
_cache.addOrUpdateNodes(nodes);
|
||||
notifyListeners();
|
||||
}
|
||||
|
||||
/// Remove node from cache (for deletions)
|
||||
void removeNodeById(int nodeId) {
|
||||
_cache.removeNodeById(nodeId);
|
||||
notifyListeners();
|
||||
}
|
||||
|
||||
/// Clear cache (when profiles change significantly)
|
||||
void clearCache() {
|
||||
_cache.clear();
|
||||
notifyListeners();
|
||||
}
|
||||
|
||||
/// Force refresh for current view (manual retry)
|
||||
Future<void> refreshArea({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
}) async {
|
||||
// Clear any cached data for this area
|
||||
_cache.clear(); // Simple: clear everything for now
|
||||
|
||||
// Re-fetch
|
||||
await getNodesFor(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
isUserInitiated: true,
|
||||
);
|
||||
}
|
||||
|
||||
/// NodeCache compatibility methods
|
||||
OsmNode? getNodeById(int nodeId) => _cache.getNodeById(nodeId);
|
||||
void removePendingEditMarker(int nodeId) => _cache.removePendingEditMarker(nodeId);
|
||||
void removePendingDeletionMarker(int nodeId) => _cache.removePendingDeletionMarker(nodeId);
|
||||
void removeTempNodeById(int tempNodeId) => _cache.removeTempNodeById(tempNodeId);
|
||||
List<OsmNode> findNodesWithinDistance(LatLng coord, double distanceMeters, {int? excludeNodeId}) =>
|
||||
_cache.findNodesWithinDistance(coord, distanceMeters, excludeNodeId: excludeNodeId);
|
||||
|
||||
/// Get cache statistics
|
||||
String get cacheStats => _cache.stats.toString();
|
||||
}
|
||||
190
lib/services/node_spatial_cache.dart
Normal file
190
lib/services/node_spatial_cache.dart
Normal file
@@ -0,0 +1,190 @@
|
||||
import 'package:flutter/foundation.dart';
|
||||
import 'package:latlong2/latlong.dart';
|
||||
import 'package:flutter_map/flutter_map.dart';
|
||||
|
||||
import '../models/osm_node.dart';
|
||||
|
||||
const Distance _distance = Distance();
|
||||
|
||||
/// Simple spatial cache that tracks which areas have been successfully fetched.
|
||||
/// No temporal expiration - data stays cached until app restart or explicit clear.
|
||||
class NodeSpatialCache {
|
||||
static final NodeSpatialCache _instance = NodeSpatialCache._();
|
||||
factory NodeSpatialCache() => _instance;
|
||||
NodeSpatialCache._();
|
||||
|
||||
final List<CachedArea> _fetchedAreas = [];
|
||||
final Map<int, OsmNode> _nodes = {}; // nodeId -> node
|
||||
|
||||
/// Check if we have cached data covering the given bounds
|
||||
bool hasDataFor(LatLngBounds bounds) {
|
||||
return _fetchedAreas.any((area) => area.bounds.containsBounds(bounds));
|
||||
}
|
||||
|
||||
/// Record that we successfully fetched data for this area
|
||||
void markAreaAsFetched(LatLngBounds bounds, List<OsmNode> nodes) {
|
||||
// Add the fetched area
|
||||
_fetchedAreas.add(CachedArea(bounds, DateTime.now()));
|
||||
|
||||
// Update nodes in cache
|
||||
for (final node in nodes) {
|
||||
_nodes[node.id] = node;
|
||||
}
|
||||
|
||||
debugPrint('[NodeSpatialCache] Cached ${nodes.length} nodes for area ${bounds.south.toStringAsFixed(3)},${bounds.west.toStringAsFixed(3)} to ${bounds.north.toStringAsFixed(3)},${bounds.east.toStringAsFixed(3)}');
|
||||
debugPrint('[NodeSpatialCache] Total areas cached: ${_fetchedAreas.length}, total nodes: ${_nodes.length}');
|
||||
}
|
||||
|
||||
/// Get all cached nodes within the given bounds
|
||||
List<OsmNode> getNodesFor(LatLngBounds bounds) {
|
||||
return _nodes.values
|
||||
.where((node) => bounds.contains(node.coord))
|
||||
.toList();
|
||||
}
|
||||
|
||||
/// Add or update individual nodes (for upload queue integration)
|
||||
void addOrUpdateNodes(List<OsmNode> nodes) {
|
||||
for (final node in nodes) {
|
||||
final existing = _nodes[node.id];
|
||||
if (existing != null) {
|
||||
// Preserve any tags starting with underscore when updating existing nodes
|
||||
final mergedTags = Map<String, String>.from(node.tags);
|
||||
for (final entry in existing.tags.entries) {
|
||||
if (entry.key.startsWith('_')) {
|
||||
mergedTags[entry.key] = entry.value;
|
||||
}
|
||||
}
|
||||
_nodes[node.id] = OsmNode(
|
||||
id: node.id,
|
||||
coord: node.coord,
|
||||
tags: mergedTags,
|
||||
isConstrained: node.isConstrained,
|
||||
);
|
||||
} else {
|
||||
_nodes[node.id] = node;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a node by ID (for deletions)
|
||||
void removeNodeById(int nodeId) {
|
||||
if (_nodes.remove(nodeId) != null) {
|
||||
debugPrint('[NodeSpatialCache] Removed node $nodeId from cache');
|
||||
}
|
||||
}
|
||||
|
||||
/// Get a specific node by ID (returns null if not found)
|
||||
OsmNode? getNodeById(int nodeId) {
|
||||
return _nodes[nodeId];
|
||||
}
|
||||
|
||||
/// Remove the _pending_edit marker from a specific node
|
||||
void removePendingEditMarker(int nodeId) {
|
||||
final node = _nodes[nodeId];
|
||||
if (node != null && node.tags.containsKey('_pending_edit')) {
|
||||
final cleanTags = Map<String, String>.from(node.tags);
|
||||
cleanTags.remove('_pending_edit');
|
||||
|
||||
_nodes[nodeId] = OsmNode(
|
||||
id: node.id,
|
||||
coord: node.coord,
|
||||
tags: cleanTags,
|
||||
isConstrained: node.isConstrained,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove the _pending_deletion marker from a specific node
|
||||
void removePendingDeletionMarker(int nodeId) {
|
||||
final node = _nodes[nodeId];
|
||||
if (node != null && node.tags.containsKey('_pending_deletion')) {
|
||||
final cleanTags = Map<String, String>.from(node.tags);
|
||||
cleanTags.remove('_pending_deletion');
|
||||
|
||||
_nodes[nodeId] = OsmNode(
|
||||
id: node.id,
|
||||
coord: node.coord,
|
||||
tags: cleanTags,
|
||||
isConstrained: node.isConstrained,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/// Remove a specific temporary node by its ID
|
||||
void removeTempNodeById(int tempNodeId) {
|
||||
if (tempNodeId >= 0) {
|
||||
debugPrint('[NodeSpatialCache] Warning: Attempted to remove non-temp node ID $tempNodeId');
|
||||
return;
|
||||
}
|
||||
|
||||
if (_nodes.remove(tempNodeId) != null) {
|
||||
debugPrint('[NodeSpatialCache] Removed temp node $tempNodeId from cache');
|
||||
}
|
||||
}
|
||||
|
||||
/// Find nodes within distance of a coordinate (for proximity warnings)
|
||||
List<OsmNode> findNodesWithinDistance(LatLng coord, double distanceMeters, {int? excludeNodeId}) {
|
||||
final nearbyNodes = <OsmNode>[];
|
||||
|
||||
for (final node in _nodes.values) {
|
||||
// Skip the excluded node
|
||||
if (excludeNodeId != null && node.id == excludeNodeId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip nodes marked for deletion
|
||||
if (node.tags.containsKey('_pending_deletion')) {
|
||||
continue;
|
||||
}
|
||||
|
||||
final distanceInMeters = _distance.as(LengthUnit.Meter, coord, node.coord);
|
||||
if (distanceInMeters <= distanceMeters) {
|
||||
nearbyNodes.add(node);
|
||||
}
|
||||
}
|
||||
|
||||
return nearbyNodes;
|
||||
}
|
||||
|
||||
/// Clear all cached data
|
||||
void clear() {
|
||||
_fetchedAreas.clear();
|
||||
_nodes.clear();
|
||||
debugPrint('[NodeSpatialCache] Cache cleared');
|
||||
}
|
||||
|
||||
/// Get cache statistics for debugging
|
||||
CacheStats get stats => CacheStats(
|
||||
areasCount: _fetchedAreas.length,
|
||||
nodesCount: _nodes.length,
|
||||
);
|
||||
}
|
||||
|
||||
/// Represents an area that has been successfully fetched
|
||||
class CachedArea {
|
||||
final LatLngBounds bounds;
|
||||
final DateTime fetchedAt;
|
||||
|
||||
CachedArea(this.bounds, this.fetchedAt);
|
||||
}
|
||||
|
||||
/// Cache statistics for debugging
|
||||
class CacheStats {
|
||||
final int areasCount;
|
||||
final int nodesCount;
|
||||
|
||||
CacheStats({required this.areasCount, required this.nodesCount});
|
||||
|
||||
@override
|
||||
String toString() => 'CacheStats(areas: $areasCount, nodes: $nodesCount)';
|
||||
}
|
||||
|
||||
/// Extension to check if one bounds completely contains another
|
||||
extension LatLngBoundsExtension on LatLngBounds {
|
||||
bool containsBounds(LatLngBounds other) {
|
||||
return north >= other.north &&
|
||||
south <= other.south &&
|
||||
east >= other.east &&
|
||||
west <= other.west;
|
||||
}
|
||||
}
|
||||
187
lib/services/overpass_service.dart
Normal file
187
lib/services/overpass_service.dart
Normal file
@@ -0,0 +1,187 @@
|
||||
import 'dart:convert';
|
||||
import 'package:http/http.dart' as http;
|
||||
import 'package:flutter/foundation.dart';
|
||||
import 'package:latlong2/latlong.dart';
|
||||
import 'package:flutter_map/flutter_map.dart';
|
||||
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/osm_node.dart';
|
||||
import '../dev_config.dart';
|
||||
|
||||
/// Simple Overpass API client with proper HTTP retry logic.
|
||||
/// Single responsibility: Make requests, handle network errors, return data.
|
||||
class OverpassService {
|
||||
static const String _endpoint = 'https://overpass-api.de/api/interpreter';
|
||||
|
||||
/// Fetch surveillance nodes from Overpass API with proper retry logic.
|
||||
/// Throws NetworkError for retryable failures, NodeLimitError for area splitting.
|
||||
Future<List<OsmNode>> fetchNodes({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
int maxRetries = 3,
|
||||
}) async {
|
||||
if (profiles.isEmpty) return [];
|
||||
|
||||
final query = _buildQuery(bounds, profiles);
|
||||
|
||||
for (int attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
try {
|
||||
debugPrint('[OverpassService] Attempt ${attempt + 1}/${maxRetries + 1} for ${profiles.length} profiles');
|
||||
|
||||
final response = await http.post(
|
||||
Uri.parse(_endpoint),
|
||||
body: {'data': query},
|
||||
).timeout(kOverpassQueryTimeout);
|
||||
|
||||
if (response.statusCode == 200) {
|
||||
return _parseResponse(response.body);
|
||||
}
|
||||
|
||||
// Check for specific error types
|
||||
final errorBody = response.body;
|
||||
|
||||
// Node limit error - caller should split area
|
||||
if (response.statusCode == 400 &&
|
||||
(errorBody.contains('too many nodes') && errorBody.contains('50000'))) {
|
||||
debugPrint('[OverpassService] Node limit exceeded, area should be split');
|
||||
throw NodeLimitError('Query exceeded 50k node limit');
|
||||
}
|
||||
|
||||
// Timeout error - also try splitting (complex query)
|
||||
if (errorBody.contains('timeout') ||
|
||||
errorBody.contains('runtime limit exceeded') ||
|
||||
errorBody.contains('Query timed out')) {
|
||||
debugPrint('[OverpassService] Query timeout, area should be split');
|
||||
throw NodeLimitError('Query timed out - area too complex');
|
||||
}
|
||||
|
||||
// Rate limit - throw immediately, don't retry
|
||||
if (response.statusCode == 429 ||
|
||||
errorBody.contains('rate limited') ||
|
||||
errorBody.contains('too many requests')) {
|
||||
debugPrint('[OverpassService] Rate limited by Overpass');
|
||||
throw RateLimitError('Rate limited by Overpass API');
|
||||
}
|
||||
|
||||
// Other HTTP errors - retry with backoff
|
||||
if (attempt < maxRetries) {
|
||||
final delay = Duration(milliseconds: (200 * (1 << attempt)).clamp(200, 5000));
|
||||
debugPrint('[OverpassService] HTTP ${response.statusCode} error, retrying in ${delay.inMilliseconds}ms');
|
||||
await Future.delayed(delay);
|
||||
continue;
|
||||
}
|
||||
|
||||
throw NetworkError('HTTP ${response.statusCode}: $errorBody');
|
||||
|
||||
} catch (e) {
|
||||
// Handle specific error types without retry
|
||||
if (e is NodeLimitError || e is RateLimitError) {
|
||||
rethrow;
|
||||
}
|
||||
|
||||
// Network/timeout errors - retry with backoff
|
||||
if (attempt < maxRetries) {
|
||||
final delay = Duration(milliseconds: (200 * (1 << attempt)).clamp(200, 5000));
|
||||
debugPrint('[OverpassService] Network error ($e), retrying in ${delay.inMilliseconds}ms');
|
||||
await Future.delayed(delay);
|
||||
continue;
|
||||
}
|
||||
|
||||
throw NetworkError('Network error after $maxRetries retries: $e');
|
||||
}
|
||||
}
|
||||
|
||||
throw NetworkError('Max retries exceeded');
|
||||
}
|
||||
|
||||
/// Build Overpass QL query for given bounds and profiles
|
||||
String _buildQuery(LatLngBounds bounds, List<NodeProfile> profiles) {
|
||||
final nodeClauses = profiles.map((profile) {
|
||||
// Convert profile tags to Overpass filter format, excluding empty values
|
||||
final tagFilters = profile.tags.entries
|
||||
.where((entry) => entry.value.trim().isNotEmpty)
|
||||
.map((entry) => '["${entry.key}"="${entry.value}"]')
|
||||
.join();
|
||||
|
||||
return 'node$tagFilters(${bounds.southWest.latitude},${bounds.southWest.longitude},${bounds.northEast.latitude},${bounds.northEast.longitude});';
|
||||
}).join('\n ');
|
||||
|
||||
return '''
|
||||
[out:json][timeout:${kOverpassQueryTimeout.inSeconds}];
|
||||
(
|
||||
$nodeClauses
|
||||
);
|
||||
out body;
|
||||
(
|
||||
way(bn);
|
||||
rel(bn);
|
||||
);
|
||||
out meta;
|
||||
''';
|
||||
}
|
||||
|
||||
/// Parse Overpass JSON response into OsmNode objects
|
||||
List<OsmNode> _parseResponse(String responseBody) {
|
||||
final data = jsonDecode(responseBody) as Map<String, dynamic>;
|
||||
final elements = data['elements'] as List<dynamic>;
|
||||
|
||||
final nodeElements = <Map<String, dynamic>>[];
|
||||
final constrainedNodeIds = <int>{};
|
||||
|
||||
// First pass: collect surveillance nodes and identify constrained nodes
|
||||
for (final element in elements.whereType<Map<String, dynamic>>()) {
|
||||
final type = element['type'] as String?;
|
||||
|
||||
if (type == 'node') {
|
||||
nodeElements.add(element);
|
||||
} else if (type == 'way' || type == 'relation') {
|
||||
// Mark referenced nodes as constrained
|
||||
final refs = element['nodes'] as List<dynamic>? ??
|
||||
element['members']?.where((m) => m['type'] == 'node').map((m) => m['ref']) ?? [];
|
||||
|
||||
for (final ref in refs) {
|
||||
final nodeId = ref is int ? ref : int.tryParse(ref.toString());
|
||||
if (nodeId != null) constrainedNodeIds.add(nodeId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Second pass: create OsmNode objects
|
||||
final nodes = nodeElements.map((element) {
|
||||
final nodeId = element['id'] as int;
|
||||
return OsmNode(
|
||||
id: nodeId,
|
||||
coord: LatLng(element['lat'], element['lon']),
|
||||
tags: Map<String, String>.from(element['tags'] ?? {}),
|
||||
isConstrained: constrainedNodeIds.contains(nodeId),
|
||||
);
|
||||
}).toList();
|
||||
|
||||
debugPrint('[OverpassService] Parsed ${nodes.length} nodes, ${constrainedNodeIds.length} constrained');
|
||||
return nodes;
|
||||
}
|
||||
}
|
||||
|
||||
/// Error thrown when query exceeds node limits or is too complex - area should be split
|
||||
class NodeLimitError extends Error {
|
||||
final String message;
|
||||
NodeLimitError(this.message);
|
||||
@override
|
||||
String toString() => 'NodeLimitError: $message';
|
||||
}
|
||||
|
||||
/// Error thrown when rate limited - should not retry immediately
|
||||
class RateLimitError extends Error {
|
||||
final String message;
|
||||
RateLimitError(this.message);
|
||||
@override
|
||||
String toString() => 'RateLimitError: $message';
|
||||
}
|
||||
|
||||
/// Error thrown for network/HTTP issues - retryable
|
||||
class NetworkError extends Error {
|
||||
final String message;
|
||||
NetworkError(this.message);
|
||||
@override
|
||||
String toString() => 'NetworkError: $message';
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
import 'dart:async';
|
||||
import 'package:flutter/foundation.dart';
|
||||
import 'package:latlong2/latlong.dart';
|
||||
import 'package:flutter_map/flutter_map.dart';
|
||||
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/osm_node.dart';
|
||||
import '../app_state.dart';
|
||||
import '../dev_config.dart';
|
||||
import 'map_data_submodules/nodes_from_overpass.dart';
|
||||
import 'node_cache.dart';
|
||||
import 'network_status.dart';
|
||||
import '../widgets/node_provider_with_cache.dart';
|
||||
|
||||
/// Manages pre-fetching larger areas to reduce Overpass API calls.
|
||||
/// Uses zoom level 10 areas and automatically splits if hitting node limits.
|
||||
class PrefetchAreaService {
|
||||
static final PrefetchAreaService _instance = PrefetchAreaService._();
|
||||
factory PrefetchAreaService() => _instance;
|
||||
PrefetchAreaService._();
|
||||
|
||||
// Current pre-fetched area and associated data
|
||||
LatLngBounds? _preFetchedArea;
|
||||
List<NodeProfile>? _preFetchedProfiles;
|
||||
UploadMode? _preFetchedUploadMode;
|
||||
DateTime? _lastFetchTime;
|
||||
bool _preFetchInProgress = false;
|
||||
|
||||
// Debounce timer to avoid rapid requests while user is panning
|
||||
Timer? _debounceTimer;
|
||||
|
||||
// Configuration from dev_config
|
||||
static const double _areaExpansionMultiplier = kPreFetchAreaExpansionMultiplier;
|
||||
static const int _preFetchZoomLevel = kPreFetchZoomLevel;
|
||||
|
||||
/// Check if the given bounds are fully within the current pre-fetched area.
|
||||
bool isWithinPreFetchedArea(LatLngBounds bounds, List<NodeProfile> profiles, UploadMode uploadMode) {
|
||||
if (_preFetchedArea == null || _preFetchedProfiles == null || _preFetchedUploadMode == null) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if profiles and upload mode match
|
||||
if (_preFetchedUploadMode != uploadMode) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!_profileListsEqual(_preFetchedProfiles!, profiles)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if bounds are fully contained within pre-fetched area
|
||||
return bounds.north <= _preFetchedArea!.north &&
|
||||
bounds.south >= _preFetchedArea!.south &&
|
||||
bounds.east <= _preFetchedArea!.east &&
|
||||
bounds.west >= _preFetchedArea!.west;
|
||||
}
|
||||
|
||||
/// Check if cached data is stale (older than configured refresh interval).
|
||||
bool isDataStale() {
|
||||
if (_lastFetchTime == null) return true;
|
||||
return DateTime.now().difference(_lastFetchTime!).inSeconds > kDataRefreshIntervalSeconds;
|
||||
}
|
||||
|
||||
/// Request pre-fetch for the given view bounds if not already covered or if data is stale.
|
||||
/// Uses debouncing to avoid rapid requests while user is panning.
|
||||
void requestPreFetchIfNeeded({
|
||||
required LatLngBounds viewBounds,
|
||||
required List<NodeProfile> profiles,
|
||||
required UploadMode uploadMode,
|
||||
}) {
|
||||
// Skip if already in progress
|
||||
if (_preFetchInProgress) {
|
||||
debugPrint('[PrefetchAreaService] Pre-fetch already in progress, skipping');
|
||||
return;
|
||||
}
|
||||
|
||||
// Check both spatial and temporal conditions
|
||||
final isWithinArea = isWithinPreFetchedArea(viewBounds, profiles, uploadMode);
|
||||
final isStale = isDataStale();
|
||||
|
||||
if (isWithinArea && !isStale) {
|
||||
debugPrint('[PrefetchAreaService] Current view within fresh pre-fetched area, no fetch needed');
|
||||
return;
|
||||
}
|
||||
|
||||
if (isStale) {
|
||||
debugPrint('[PrefetchAreaService] Data is stale (>${kDataRefreshIntervalSeconds}s), refreshing');
|
||||
} else {
|
||||
debugPrint('[PrefetchAreaService] Current view outside pre-fetched area, fetching larger area');
|
||||
}
|
||||
|
||||
// Cancel any pending debounced request
|
||||
_debounceTimer?.cancel();
|
||||
|
||||
// Debounce to avoid rapid requests while user is still moving
|
||||
_debounceTimer = Timer(const Duration(milliseconds: 800), () {
|
||||
_startPreFetch(
|
||||
viewBounds: viewBounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
/// Start the actual pre-fetch operation.
|
||||
Future<void> _startPreFetch({
|
||||
required LatLngBounds viewBounds,
|
||||
required List<NodeProfile> profiles,
|
||||
required UploadMode uploadMode,
|
||||
}) async {
|
||||
if (_preFetchInProgress) return;
|
||||
|
||||
_preFetchInProgress = true;
|
||||
|
||||
try {
|
||||
// Calculate expanded area for pre-fetching
|
||||
final preFetchArea = _expandBounds(viewBounds, _areaExpansionMultiplier);
|
||||
|
||||
debugPrint('[PrefetchAreaService] Starting pre-fetch for area: ${preFetchArea.south},${preFetchArea.west} to ${preFetchArea.north},${preFetchArea.east}');
|
||||
|
||||
// Fetch nodes for the expanded area (unlimited - let splitting handle 50k limit)
|
||||
final nodes = await fetchOverpassNodes(
|
||||
bounds: preFetchArea,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
maxResults: 0, // Unlimited - our splitting system handles the 50k limit gracefully
|
||||
);
|
||||
|
||||
debugPrint('[PrefetchAreaService] Pre-fetch completed: ${nodes.length} nodes retrieved');
|
||||
|
||||
// Update cache with new nodes (fresh data overwrites stale, but preserves underscore tags)
|
||||
if (nodes.isNotEmpty) {
|
||||
NodeCache.instance.addOrUpdate(nodes);
|
||||
}
|
||||
|
||||
// Store the pre-fetched area info and timestamp
|
||||
_preFetchedArea = preFetchArea;
|
||||
_preFetchedProfiles = List.from(profiles);
|
||||
_preFetchedUploadMode = uploadMode;
|
||||
_lastFetchTime = DateTime.now();
|
||||
|
||||
// The overpass module already reported success/failure during fetching
|
||||
// We just need to handle the successful result here
|
||||
|
||||
// Notify UI that cache has been updated with fresh data
|
||||
NodeProviderWithCache.instance.refreshDisplay();
|
||||
|
||||
} catch (e) {
|
||||
debugPrint('[PrefetchAreaService] Pre-fetch failed: $e');
|
||||
// The overpass module already reported the error status
|
||||
// Don't update pre-fetched area info on failure
|
||||
} finally {
|
||||
_preFetchInProgress = false;
|
||||
}
|
||||
}
|
||||
|
||||
/// Expand bounds by the given multiplier, maintaining center point.
|
||||
LatLngBounds _expandBounds(LatLngBounds bounds, double multiplier) {
|
||||
final centerLat = (bounds.north + bounds.south) / 2;
|
||||
final centerLng = (bounds.east + bounds.west) / 2;
|
||||
|
||||
final latSpan = (bounds.north - bounds.south) * multiplier / 2;
|
||||
final lngSpan = (bounds.east - bounds.west) * multiplier / 2;
|
||||
|
||||
return LatLngBounds(
|
||||
LatLng(centerLat - latSpan, centerLng - lngSpan), // Southwest
|
||||
LatLng(centerLat + latSpan, centerLng + lngSpan), // Northeast
|
||||
);
|
||||
}
|
||||
|
||||
/// Check if two profile lists are equal by comparing IDs.
|
||||
bool _profileListsEqual(List<NodeProfile> list1, List<NodeProfile> list2) {
|
||||
if (list1.length != list2.length) return false;
|
||||
final ids1 = list1.map((p) => p.id).toSet();
|
||||
final ids2 = list2.map((p) => p.id).toSet();
|
||||
return ids1.length == ids2.length && ids1.containsAll(ids2);
|
||||
}
|
||||
|
||||
/// Clear the pre-fetched area (e.g., when profiles change significantly).
|
||||
void clearPreFetchedArea() {
|
||||
_preFetchedArea = null;
|
||||
_preFetchedProfiles = null;
|
||||
_preFetchedUploadMode = null;
|
||||
_lastFetchTime = null;
|
||||
debugPrint('[PrefetchAreaService] Pre-fetched area cleared');
|
||||
}
|
||||
|
||||
/// Dispose of resources.
|
||||
void dispose() {
|
||||
_debounceTimer?.cancel();
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,7 @@ import 'package:latlong2/latlong.dart';
|
||||
import '../models/pending_upload.dart';
|
||||
import '../models/osm_node.dart';
|
||||
import '../models/node_profile.dart';
|
||||
import '../services/node_cache.dart';
|
||||
import '../services/map_data_provider.dart';
|
||||
import '../services/uploader.dart';
|
||||
import '../widgets/node_provider_with_cache.dart';
|
||||
import '../dev_config.dart';
|
||||
@@ -15,6 +15,8 @@ import 'settings_state.dart';
|
||||
import 'session_state.dart';
|
||||
|
||||
class UploadQueueState extends ChangeNotifier {
|
||||
/// Helper to access the map data provider instance
|
||||
MapDataProvider get _nodeCache => MapDataProvider();
|
||||
final List<PendingUpload> _queue = [];
|
||||
Timer? _uploadTimer;
|
||||
int _activeUploadCount = 0;
|
||||
@@ -48,7 +50,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
if (upload.isDeletion) {
|
||||
// For deletions: mark the original node as pending deletion if it exists in cache
|
||||
if (upload.originalNodeId != null) {
|
||||
final existingNode = NodeCache.instance.getNodeById(upload.originalNodeId!);
|
||||
final existingNode = _nodeCache.getNodeById(upload.originalNodeId!);
|
||||
if (existingNode != null) {
|
||||
final deletionTags = Map<String, String>.from(existingNode.tags);
|
||||
deletionTags['_pending_deletion'] = 'true';
|
||||
@@ -78,7 +80,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
if (upload.isEdit) {
|
||||
// For edits: also mark original with _pending_edit if it exists
|
||||
if (upload.originalNodeId != null) {
|
||||
final existingOriginal = NodeCache.instance.getNodeById(upload.originalNodeId!);
|
||||
final existingOriginal = _nodeCache.getNodeById(upload.originalNodeId!);
|
||||
if (existingOriginal != null) {
|
||||
final originalTags = Map<String, String>.from(existingOriginal.tags);
|
||||
originalTags['_pending_edit'] = 'true';
|
||||
@@ -109,7 +111,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
}
|
||||
|
||||
if (nodesToAdd.isNotEmpty) {
|
||||
NodeCache.instance.addOrUpdate(nodesToAdd);
|
||||
_nodeCache.addOrUpdate(nodesToAdd);
|
||||
print('[UploadQueue] Repopulated cache with ${nodesToAdd.length} pending nodes from queue');
|
||||
|
||||
// Save queue if we updated any temp IDs for backward compatibility
|
||||
@@ -152,7 +154,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
tags: tags,
|
||||
);
|
||||
|
||||
NodeCache.instance.addOrUpdate([tempNode]);
|
||||
_nodeCache.addOrUpdate([tempNode]);
|
||||
// Notify node provider to update the map
|
||||
NodeProviderWithCache.instance.notifyListeners();
|
||||
|
||||
@@ -211,7 +213,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
tags: extractedTags,
|
||||
);
|
||||
|
||||
NodeCache.instance.addOrUpdate([extractedNode]);
|
||||
_nodeCache.addOrUpdate([extractedNode]);
|
||||
} else {
|
||||
// For modify: mark original with grey ring and create new temp node
|
||||
// 1. Mark the original node with _pending_edit (grey ring) at original location
|
||||
@@ -240,7 +242,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
tags: editedTags,
|
||||
);
|
||||
|
||||
NodeCache.instance.addOrUpdate([originalNode, editedNode]);
|
||||
_nodeCache.addOrUpdate([originalNode, editedNode]);
|
||||
}
|
||||
// Notify node provider to update the map
|
||||
NodeProviderWithCache.instance.notifyListeners();
|
||||
@@ -272,7 +274,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
tags: deletionTags,
|
||||
);
|
||||
|
||||
NodeCache.instance.addOrUpdate([nodeWithDeletionTag]);
|
||||
_nodeCache.addOrUpdate([nodeWithDeletionTag]);
|
||||
// Notify node provider to update the map
|
||||
NodeProviderWithCache.instance.notifyListeners();
|
||||
|
||||
@@ -693,11 +695,11 @@ class UploadQueueState extends ChangeNotifier {
|
||||
);
|
||||
|
||||
// Add/update the cache with the real node
|
||||
NodeCache.instance.addOrUpdate([realNode]);
|
||||
_nodeCache.addOrUpdate([realNode]);
|
||||
|
||||
// Clean up the specific temp node for this upload
|
||||
if (item.tempNodeId != null) {
|
||||
NodeCache.instance.removeTempNodeById(item.tempNodeId!);
|
||||
_nodeCache.removeTempNodeById(item.tempNodeId!);
|
||||
}
|
||||
|
||||
// For modify operations, clean up the original node's _pending_edit marker
|
||||
@@ -705,7 +707,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
if (item.isEdit && item.originalNodeId != null) {
|
||||
// Remove the _pending_edit marker from the original node in cache
|
||||
// The next Overpass fetch will provide the authoritative data anyway
|
||||
NodeCache.instance.removePendingEditMarker(item.originalNodeId!);
|
||||
_nodeCache.removePendingEditMarker(item.originalNodeId!);
|
||||
}
|
||||
|
||||
// Notify node provider to update the map
|
||||
@@ -716,7 +718,7 @@ class UploadQueueState extends ChangeNotifier {
|
||||
void _handleSuccessfulDeletion(PendingUpload item) {
|
||||
if (item.originalNodeId != null) {
|
||||
// Remove the node from cache entirely
|
||||
NodeCache.instance.removeNodeById(item.originalNodeId!);
|
||||
_nodeCache.removeNodeById(item.originalNodeId!);
|
||||
|
||||
// Notify node provider to update the map
|
||||
NodeProviderWithCache.instance.notifyListeners();
|
||||
@@ -760,25 +762,25 @@ class UploadQueueState extends ChangeNotifier {
|
||||
if (upload.isDeletion) {
|
||||
// For deletions: remove the _pending_deletion marker from the original node
|
||||
if (upload.originalNodeId != null) {
|
||||
NodeCache.instance.removePendingDeletionMarker(upload.originalNodeId!);
|
||||
_nodeCache.removePendingDeletionMarker(upload.originalNodeId!);
|
||||
}
|
||||
} else if (upload.isEdit) {
|
||||
// For edits: remove the specific temp node and the _pending_edit marker from original
|
||||
if (upload.tempNodeId != null) {
|
||||
NodeCache.instance.removeTempNodeById(upload.tempNodeId!);
|
||||
_nodeCache.removeTempNodeById(upload.tempNodeId!);
|
||||
}
|
||||
if (upload.originalNodeId != null) {
|
||||
NodeCache.instance.removePendingEditMarker(upload.originalNodeId!);
|
||||
_nodeCache.removePendingEditMarker(upload.originalNodeId!);
|
||||
}
|
||||
} else if (upload.operation == UploadOperation.extract) {
|
||||
// For extracts: remove the specific temp node (leave original unchanged)
|
||||
if (upload.tempNodeId != null) {
|
||||
NodeCache.instance.removeTempNodeById(upload.tempNodeId!);
|
||||
_nodeCache.removeTempNodeById(upload.tempNodeId!);
|
||||
}
|
||||
} else {
|
||||
// For creates: remove the specific temp node
|
||||
if (upload.tempNodeId != null) {
|
||||
NodeCache.instance.removeTempNodeById(upload.tempNodeId!);
|
||||
_nodeCache.removeTempNodeById(upload.tempNodeId!);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import '../dev_config.dart';
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/operator_profile.dart';
|
||||
import '../services/localization_service.dart';
|
||||
import '../services/node_cache.dart';
|
||||
import '../services/map_data_provider.dart';
|
||||
import '../services/changelog_service.dart';
|
||||
import 'refine_tags_sheet.dart';
|
||||
import 'proximity_warning_dialog.dart';
|
||||
@@ -121,7 +121,7 @@ class _AddNodeSheetState extends State<AddNodeSheet> {
|
||||
}
|
||||
|
||||
// Check for nearby nodes within the configured distance
|
||||
final nearbyNodes = NodeCache.instance.findNodesWithinDistance(
|
||||
final nearbyNodes = MapDataProvider().findNodesWithinDistance(
|
||||
widget.session.target!,
|
||||
kNodeProximityWarningDistance,
|
||||
);
|
||||
|
||||
@@ -7,7 +7,7 @@ import '../dev_config.dart';
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/operator_profile.dart';
|
||||
import '../services/localization_service.dart';
|
||||
import '../services/node_cache.dart';
|
||||
import '../services/map_data_provider.dart';
|
||||
import '../services/changelog_service.dart';
|
||||
import '../state/settings_state.dart';
|
||||
import 'refine_tags_sheet.dart';
|
||||
@@ -100,7 +100,7 @@ class _EditNodeSheetState extends State<EditNodeSheet> {
|
||||
|
||||
void _checkProximityOnly(BuildContext context, AppState appState, LocalizationService locService) {
|
||||
// Check for nearby nodes within the configured distance, excluding the node being edited
|
||||
final nearbyNodes = NodeCache.instance.findNodesWithinDistance(
|
||||
final nearbyNodes = MapDataProvider().findNodesWithinDistance(
|
||||
widget.session.target,
|
||||
kNodeProximityWarningDistance,
|
||||
excludeNodeId: widget.session.originalNode.id,
|
||||
|
||||
@@ -5,7 +5,7 @@ import 'package:latlong2/latlong.dart';
|
||||
|
||||
import '../../models/node_profile.dart';
|
||||
import '../../app_state.dart' show UploadMode;
|
||||
import '../../services/prefetch_area_service.dart';
|
||||
|
||||
import '../node_provider_with_cache.dart';
|
||||
import '../../dev_config.dart';
|
||||
|
||||
@@ -44,8 +44,6 @@ class NodeRefreshController {
|
||||
WidgetsBinding.instance.addPostFrameCallback((_) {
|
||||
// Clear node cache to ensure fresh data for new profile combination
|
||||
_nodeProvider.clearCache();
|
||||
// Clear pre-fetch area since profiles changed
|
||||
PrefetchAreaService().clearPreFetchedArea();
|
||||
// Force display refresh first (for immediate UI update)
|
||||
_nodeProvider.refreshDisplay();
|
||||
// Notify that profiles changed (triggers node refresh)
|
||||
|
||||
@@ -7,7 +7,7 @@ import 'package:provider/provider.dart';
|
||||
import '../app_state.dart' show AppState, FollowMeMode, UploadMode;
|
||||
import '../services/offline_area_service.dart';
|
||||
import '../services/network_status.dart';
|
||||
import '../services/prefetch_area_service.dart';
|
||||
|
||||
import '../models/osm_node.dart';
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/suspected_location.dart';
|
||||
@@ -214,7 +214,7 @@ class MapViewState extends State<MapView> {
|
||||
_nodeController.dispose();
|
||||
_tileManager.dispose();
|
||||
_gpsController.dispose();
|
||||
PrefetchAreaService().dispose();
|
||||
// PrefetchAreaService no longer used - replaced with NodeDataManager
|
||||
super.dispose();
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,8 @@ import 'package:latlong2/latlong.dart';
|
||||
import 'package:flutter_map/flutter_map.dart' show LatLngBounds;
|
||||
|
||||
import '../services/map_data_provider.dart';
|
||||
import '../services/node_cache.dart';
|
||||
import '../services/node_data_manager.dart';
|
||||
import '../services/node_spatial_cache.dart';
|
||||
import '../services/network_status.dart';
|
||||
import '../models/node_profile.dart';
|
||||
import '../models/osm_node.dart';
|
||||
@@ -17,51 +18,47 @@ class NodeProviderWithCache extends ChangeNotifier {
|
||||
factory NodeProviderWithCache() => instance;
|
||||
NodeProviderWithCache._internal();
|
||||
|
||||
final NodeDataManager _nodeDataManager = NodeDataManager();
|
||||
final NodeSpatialCache _cache = NodeSpatialCache();
|
||||
Timer? _debounceTimer;
|
||||
|
||||
/// Call this to get (quickly) all cached overlays for the given view.
|
||||
/// Filters by currently enabled profiles only. Limiting is handled by MapView.
|
||||
/// Get cached nodes for the given bounds, filtered by enabled profiles
|
||||
List<OsmNode> getCachedNodesForBounds(LatLngBounds bounds) {
|
||||
final allNodes = NodeCache.instance.queryByBounds(bounds);
|
||||
final allNodes = _cache.getNodesFor(bounds);
|
||||
final enabledProfiles = AppState.instance.enabledProfiles;
|
||||
|
||||
// If no profiles are enabled, show no nodes
|
||||
if (enabledProfiles.isEmpty) return [];
|
||||
|
||||
// Filter nodes to only show those matching enabled profiles
|
||||
// Note: This uses ALL enabled profiles for filtering, even though Overpass queries
|
||||
// may be deduplicated for efficiency (broader profiles capture nodes for specific ones)
|
||||
return allNodes.where((node) {
|
||||
return _matchesAnyProfile(node, enabledProfiles);
|
||||
}).toList();
|
||||
}
|
||||
|
||||
/// Call this when the map view changes (bounds/profiles), triggers async fetch
|
||||
/// and notifies listeners/UI when new data is available.
|
||||
/// Fetch and update nodes for the given view, with debouncing for rapid map movement
|
||||
void fetchAndUpdate({
|
||||
required LatLngBounds bounds,
|
||||
required List<NodeProfile> profiles,
|
||||
UploadMode uploadMode = UploadMode.production,
|
||||
}) {
|
||||
// Fast: serve cached immediately
|
||||
// Serve cached immediately
|
||||
notifyListeners();
|
||||
|
||||
// Debounce rapid panning/zooming
|
||||
_debounceTimer?.cancel();
|
||||
_debounceTimer = Timer(const Duration(milliseconds: 400), () async {
|
||||
try {
|
||||
// Use MapSource.auto to handle both offline and online modes appropriately
|
||||
final fresh = await MapDataProvider().getNodes(
|
||||
await _nodeDataManager.getNodesFor(
|
||||
bounds: bounds,
|
||||
profiles: profiles,
|
||||
uploadMode: uploadMode,
|
||||
source: MapSource.auto,
|
||||
isUserInitiated: true,
|
||||
);
|
||||
if (fresh.isNotEmpty) {
|
||||
NodeCache.instance.addOrUpdate(fresh);
|
||||
// Clear waiting status when node data arrives
|
||||
NetworkStatus.instance.clearWaiting();
|
||||
notifyListeners();
|
||||
}
|
||||
|
||||
// Notify UI of new data
|
||||
notifyListeners();
|
||||
|
||||
} catch (e) {
|
||||
debugPrint('[NodeProviderWithCache] Node fetch failed: $e');
|
||||
// Cache already holds whatever is available for the view
|
||||
@@ -71,7 +68,8 @@ class NodeProviderWithCache extends ChangeNotifier {
|
||||
|
||||
/// Clear the cache and repopulate with pending nodes from upload queue
|
||||
void clearCache() {
|
||||
NodeCache.instance.clear();
|
||||
_cache.clear();
|
||||
_nodeDataManager.clearCache();
|
||||
// Repopulate with pending nodes from upload queue if available
|
||||
_repopulatePendingNodesAfterClear();
|
||||
notifyListeners();
|
||||
@@ -79,12 +77,7 @@ class NodeProviderWithCache extends ChangeNotifier {
|
||||
|
||||
/// Repopulate pending nodes after cache clear
|
||||
void _repopulatePendingNodesAfterClear() {
|
||||
// We need access to the upload queue state, but we don't have direct access here
|
||||
// Instead, we'll trigger a callback that the app state can handle
|
||||
// For now, let's use a more direct approach through a global service access
|
||||
// This could be refactored to use proper dependency injection later
|
||||
Future.microtask(() {
|
||||
// This will be called from app state when cache clears happen
|
||||
_onCacheCleared?.call();
|
||||
});
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user