refactor: reduce cpu usage for proxies

This commit is contained in:
zhom
2025-12-02 22:30:40 +04:00
parent ef00c59063
commit 57ead61139
5 changed files with 148 additions and 88 deletions
+2 -1
View File
@@ -594,7 +594,8 @@ pub fn run() {
// Periodically broadcast browser running status to the frontend
let app_handle_status = app.handle().clone();
tauri::async_runtime::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_millis(500));
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(5));
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
let mut last_running_states: std::collections::HashMap<String, bool> =
std::collections::HashMap::new();
+8 -23
View File
@@ -750,7 +750,7 @@ impl ProfileManager {
// For non-camoufox browsers, use the existing PID-based logic
let inner_profile = profile.clone();
let system = System::new_all();
let mut system = System::new();
let mut is_running = false;
let mut found_pid: Option<u32> = None;
@@ -792,6 +792,8 @@ impl ProfileManager {
// If we didn't find the browser with the stored PID, search all processes
if !is_running {
// Refresh all processes only when we need to search (expensive but necessary)
system.refresh_all();
for (pid, process) in system.processes() {
let cmd = process.cmd();
if cmd.len() >= 2 {
@@ -874,7 +876,6 @@ impl ProfileManager {
None => inner_profile.clone(),
};
let previous_pid = latest_profile.process_id;
let mut merged = latest_profile.clone();
if let Some(pid) = found_pid {
@@ -890,13 +891,6 @@ impl ProfileManager {
if let Err(e) = self.save_profile(&merged) {
log::warn!("Warning: Failed to clear profile PID: {e}");
}
// Stop any associated proxy immediately when the browser stops
if let Some(old_pid) = previous_pid {
let _ = crate::proxy_manager::PROXY_MANAGER
.stop_proxy(app_handle.clone(), old_pid)
.await;
}
}
// Emit profile update event to frontend
@@ -974,18 +968,12 @@ impl ProfileManager {
None => profile.clone(),
};
if let Some(old_pid) = latest.process_id {
if latest.process_id.is_some() {
latest.process_id = None;
if let Err(e) = self.save_profile(&latest) {
log::warn!("Warning: Failed to clear Camoufox profile process info: {e}");
}
// Stop any proxy tied to this old PID immediately
let _ = crate::proxy_manager::PROXY_MANAGER
.stop_proxy(app_handle.clone(), old_pid)
.await;
// Emit profile update event to frontend
if let Err(e) = app_handle.emit("profile-updated", &latest) {
log::warn!("Warning: Failed to emit profile update event: {e}");
}
@@ -1010,7 +998,7 @@ impl ProfileManager {
None => profile.clone(),
};
if let Some(old_pid) = latest.process_id {
if latest.process_id.is_some() {
latest.process_id = None;
if let Err(e2) = self.save_profile(&latest) {
log::warn!(
@@ -1018,11 +1006,6 @@ impl ProfileManager {
);
}
// Best-effort stop of proxy tied to old PID
let _ = crate::proxy_manager::PROXY_MANAGER
.stop_proxy(app_handle.clone(), old_pid)
.await;
// Emit profile update event to frontend
if let Err(e3) = app_handle.emit("profile-updated", &latest) {
log::warn!("Warning: Failed to emit profile update event: {e3}");
@@ -1241,7 +1224,9 @@ mod tests {
let temp_dir = TempDir::new().unwrap();
// Mock the base directories by setting environment variables
std::env::set_var("HOME", temp_dir.path());
unsafe {
std::env::set_var("HOME", temp_dir.path());
}
let profile_manager = ProfileManager::instance();
(profile_manager, temp_dir)
+119 -60
View File
@@ -595,11 +595,6 @@ impl ProxyManager {
browser_pid: u32,
profile_id: Option<&str>,
) -> Result<ProxySettings, String> {
// First, proactively cleanup any dead proxies so we don't accidentally reuse stale ones
let _ = self.cleanup_dead_proxies(app_handle.clone()).await;
// If we have a previous proxy tied to this profile, and the upstream settings are changing,
// stop it before starting a new one so the change takes effect immediately.
if let Some(name) = profile_id {
// Check if we have an active proxy recorded for this profile
let maybe_existing_id = {
@@ -625,30 +620,29 @@ impl ProxyManager {
&& existing.upstream_host == desired_host
&& existing.upstream_port == desired_port;
if !is_same_upstream {
// Stop the previous proxy tied to this profile (best effort)
// We don't know the original PID mapping that created it; iterate to find its key
let pid_to_stop = {
let proxies = self.active_proxies.lock().unwrap();
proxies.iter().find_map(|(pid, info)| {
if info.id == existing_id {
Some(*pid)
} else {
None
}
})
};
if let Some(pid) = pid_to_stop {
let _ = self.stop_proxy(app_handle.clone(), pid).await;
if is_same_upstream {
// Settings match - can reuse existing proxy
// Just update the PID mapping if needed
let proxies = self.active_proxies.lock().unwrap();
if proxies.contains_key(&browser_pid) {
// Already mapped, reuse it
return Ok(ProxySettings {
proxy_type: "http".to_string(),
host: "127.0.0.1".to_string(),
port: existing.local_port,
username: None,
password: None,
});
}
// Need to add this PID to the mapping - we'll do that after starting
}
// Settings differ - we'll create a new proxy, but don't stop the old one
// It will be cleaned up by periodic cleanup if it becomes dead
}
}
}
// Check if we already have a proxy for this browser PID. If it exists but the upstream
// settings don't match the newly requested ones, stop it and create a new proxy so that
// changes take effect immediately.
let mut needs_restart = false;
// Check if we already have a proxy for this browser PID
// If settings match, reuse it; otherwise create a new one (don't stop the old one)
{
let proxies = self.active_proxies.lock().unwrap();
if let Some(existing) = proxies.get(&browser_pid) {
@@ -663,7 +657,7 @@ impl ProxyManager {
&& existing.upstream_port == desired_port;
if is_same_upstream {
// Check if profile_id matches - if not, we need to restart to update tracking
// Check if profile_id matches
let profile_id_matches = match (profile_id, &existing.profile_id) {
(Some(ref new_id), Some(ref old_id)) => new_id == old_id,
(None, None) => true,
@@ -671,7 +665,7 @@ impl ProxyManager {
};
if profile_id_matches {
// Reuse existing local proxy (profile_id matches)
// Reuse existing local proxy (settings and profile_id match)
return Ok(ProxySettings {
proxy_type: "http".to_string(),
host: "127.0.0.1".to_string(),
@@ -679,28 +673,15 @@ impl ProxyManager {
username: None,
password: None,
});
} else {
// Profile ID changed - need to restart proxy to update tracking
log::info!(
"Profile ID changed for proxy {}: {:?} -> {:?}, restarting proxy",
existing.id,
existing.profile_id,
profile_id
);
needs_restart = true;
}
} else {
// Upstream changed; we must restart the local proxy so that traffic is routed correctly
needs_restart = true;
// Profile ID changed - we'll create a new proxy but don't stop the old one
// It will be cleaned up by periodic cleanup if it becomes dead
}
// Upstream changed - we'll create a new proxy but don't stop the old one
// It will be cleaned up by periodic cleanup if it becomes dead
}
}
if needs_restart {
// Best-effort stop of the old proxy for this PID before starting a new one
let _ = self.stop_proxy(app_handle.clone(), browser_pid).await;
}
// Start a new proxy using the donut-proxy binary with the correct CLI interface
let mut proxy_cmd = app_handle
.shell()
@@ -955,30 +936,108 @@ impl ProxyManager {
}
}
// Check if a process is still running
fn is_process_running(&self, pid: u32) -> bool {
use sysinfo::{Pid, System};
let system = System::new_all();
system.process(Pid::from(pid as usize)).is_some()
}
// Clean up proxies for dead browser processes
// Only clean up orphaned config files where the proxy process itself is dead
pub async fn cleanup_dead_proxies(
&self,
app_handle: tauri::AppHandle,
) -> Result<Vec<u32>, String> {
let dead_pids = {
let proxies = self.active_proxies.lock().unwrap();
proxies
.keys()
.filter(|&&pid| pid != 0 && !self.is_process_running(pid)) // Skip temporary PID 0
.copied()
.collect::<Vec<u32>>()
// Don't stop proxies for dead browser processes - let them run indefinitely
// The proxy processes are idle and don't consume CPU when not in use
// Only clean up config files where the proxy process itself is dead (see below)
let dead_pids: Vec<u32> = Vec::new();
// Clean up orphaned proxy configs (only where proxy process is definitely dead)
// IMPORTANT: Only clean up configs where the proxy process itself is dead
// If the proxy process is running (even if idle), leave it alone
// The user doesn't care if proxy processes run indefinitely as long as they're not consuming CPU
let orphaned_configs = {
use crate::proxy_storage::{is_process_running, list_proxy_configs};
use std::time::{SystemTime, UNIX_EPOCH};
let all_configs = list_proxy_configs();
let tracked_proxy_ids: std::collections::HashSet<String> = {
let proxies = self.active_proxies.lock().unwrap();
proxies.values().map(|p| p.id.clone()).collect()
};
// Get current time for grace period check
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_secs();
all_configs
.into_iter()
.filter(|config| {
// If proxy is tracked in active_proxies, it's definitely not orphaned
if tracked_proxy_ids.contains(&config.id) {
return false;
}
// Extract creation time from proxy ID (format: proxy_{timestamp}_{random})
// This gives us a grace period for newly created proxies
let proxy_age = config
.id
.strip_prefix("proxy_")
.and_then(|s| s.split('_').next())
.and_then(|s| s.parse::<u64>().ok())
.map(|created_at| now.saturating_sub(created_at))
.unwrap_or(0);
// Grace period: don't clean up proxies created in the last 120 seconds
// This prevents race conditions during startup (increased from 60 to 120 for safety)
if proxy_age < 120 {
log::debug!(
"Skipping cleanup of proxy {} - too new (age: {}s)",
config.id,
proxy_age
);
return false;
}
// ONLY clean up if we can verify the proxy process is dead
// If proxy process is running, leave it alone (even if idle)
if let Some(proxy_pid) = config.pid {
// Check if proxy process is actually dead
if !is_process_running(proxy_pid) {
// Proxy process is dead, clean up the config file
log::info!(
"Proxy {} process (PID {}) is dead, will clean up config",
config.id,
proxy_pid
);
return true;
}
// Proxy process is running - leave it alone
log::debug!(
"Skipping cleanup of proxy {} - process (PID {}) is still running",
config.id,
proxy_pid
);
return false;
}
// No PID in config - can't verify if process is dead
// Be conservative: don't clean up (might be starting up or PID not set yet)
log::debug!(
"Skipping cleanup of proxy {} - no PID in config (might be starting up)",
config.id
);
false
})
.collect::<Vec<_>>()
};
for dead_pid in &dead_pids {
log::info!("Cleaning up proxy for dead browser process PID: {dead_pid}");
let _ = self.stop_proxy(app_handle.clone(), *dead_pid).await;
// Clean up orphaned config files (proxy process is dead)
for config in orphaned_configs {
log::info!(
"Cleaning up orphaned proxy config: {} (proxy process is dead)",
config.id
);
// Just delete the config file - the process is already dead
use crate::proxy_storage::delete_proxy_config;
delete_proxy_config(&config.id);
}
// Emit event for reactive UI updates
+18 -3
View File
@@ -601,12 +601,27 @@ pub async fn run_proxy_server(config: ProxyConfig) -> Result<(), Box<dyn std::er
// Start a background task to periodically flush traffic stats to disk
tokio::spawn(async move {
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(1));
let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(5));
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
let mut last_activity_time = std::time::Instant::now();
let mut last_byte_count = 0u64;
loop {
interval.tick().await;
if let Some(tracker) = get_traffic_tracker() {
if let Err(e) = tracker.flush_to_disk() {
log::error!("Failed to flush traffic stats: {}", e);
// Only flush if there's been recent activity
let (sent, recv, _) = tracker.get_snapshot();
let current_bytes = sent + recv;
let has_recent_activity = current_bytes != last_byte_count
|| last_activity_time.elapsed() < std::time::Duration::from_secs(30);
if has_recent_activity {
if let Err(e) = tracker.flush_to_disk() {
log::error!("Failed to flush traffic stats: {}", e);
} else {
last_activity_time = std::time::Instant::now();
last_byte_count = current_bytes;
}
}
}
}
+1 -1
View File
@@ -133,6 +133,6 @@ pub fn generate_proxy_id() -> String {
pub fn is_process_running(pid: u32) -> bool {
use sysinfo::{Pid, System};
let system = System::new_all();
let system = System::new();
system.process(Pid::from(pid as usize)).is_some()
}