diff --git a/next-env.d.ts b/next-env.d.ts
index b87975d..9edff1c 100644
--- a/next-env.d.ts
+++ b/next-env.d.ts
@@ -1,6 +1,6 @@
///
///
-import "./dist/dev/types/routes.d.ts";
+import "./.next/types/routes.d.ts";
// NOTE: This file should not be edited
// see https://nextjs.org/docs/app/api-reference/config/typescript for more information.
diff --git a/src-tauri/src/downloader.rs b/src-tauri/src/downloader.rs
index 21d61c4..09dbd1a 100644
--- a/src-tauri/src/downloader.rs
+++ b/src-tauri/src/downloader.rs
@@ -308,40 +308,12 @@ impl Downloader {
.resolve_download_url(browser_type.clone(), version, download_info)
.await?;
- // Check existing file size — if it matches the expected size, skip download
+ // Determine if we have a partial file to resume
let mut existing_size: u64 = 0;
if let Ok(meta) = std::fs::metadata(&file_path) {
existing_size = meta.len();
}
- // Do a HEAD request to get the expected file size for skip/resume decisions
- let head_response = self
- .client
- .head(&download_url)
- .header(
- "User-Agent",
- "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/136.0.0.0 Safari/537.36",
- )
- .send()
- .await
- .ok();
-
- let expected_size = head_response.as_ref().and_then(|r| r.content_length());
-
- // If existing file matches expected size, skip download entirely
- if existing_size > 0 {
- if let Some(expected) = expected_size {
- if existing_size == expected {
- log::info!(
- "Archive {} already exists with correct size ({} bytes), skipping download",
- file_path.display(),
- existing_size
- );
- return Ok(file_path);
- }
- }
- }
-
// Build request, add Range only if we have bytes. If the server responds with 416 (Range Not
// Satisfiable), delete the partial file and retry once without the Range header.
let response = {
@@ -415,6 +387,20 @@ impl Downloader {
existing_size = 0;
}
+ // If the existing file already matches the total size, skip the download
+ if existing_size > 0 {
+ if let Some(total) = total_size {
+ if existing_size >= total {
+ log::info!(
+ "Archive {} already complete ({} bytes), skipping download",
+ file_path.display(),
+ existing_size
+ );
+ return Ok(file_path);
+ }
+ }
+ }
+
let mut downloaded = existing_size;
let start_time = std::time::Instant::now();
let mut last_update = start_time;
diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs
index 1dcf230..168d0a4 100644
--- a/src-tauri/src/lib.rs
+++ b/src-tauri/src/lib.rs
@@ -297,7 +297,7 @@ async fn fetch_dynamic_proxy(
.fetch_dynamic_proxy(&url, &format)
.await?;
- // Validate the proxy actually works by routing through a temporary local proxy
+ // Validate the proxy actually works by connecting through it
crate::proxy_manager::PROXY_MANAGER
.check_proxy_validity("_dynamic_test", &settings)
.await
diff --git a/src-tauri/src/proxy_manager.rs b/src-tauri/src/proxy_manager.rs
index e84c66d..0103bba 100644
--- a/src-tauri/src/proxy_manager.rs
+++ b/src-tauri/src/proxy_manager.rs
@@ -907,6 +907,63 @@ impl ProxyManager {
.map(|p| p.proxy_settings.clone())
}
+ fn classify_proxy_error(raw_error: &str, settings: &ProxySettings) -> String {
+ let err = raw_error.to_lowercase();
+ let proxy_addr = format!("{}:{}", settings.host, settings.port);
+
+ if err.contains("connection refused") {
+ return format!(
+ "Connection refused by {proxy_addr}. The proxy server is not accepting connections."
+ );
+ }
+ if err.contains("connection reset") {
+ return format!(
+ "Connection reset by {proxy_addr}. The proxy server closed the connection unexpectedly."
+ );
+ }
+ if err.contains("timed out") || err.contains("deadline has elapsed") {
+ return format!("Connection to {proxy_addr} timed out. The proxy server is not responding.");
+ }
+ if err.contains("no such host") || err.contains("dns") || err.contains("resolve") {
+ return format!(
+ "Could not resolve proxy host '{}'. Check that the hostname is correct.",
+ settings.host
+ );
+ }
+ if err.contains("authentication") || err.contains("407") || err.contains("proxy auth") {
+ return format!(
+ "Proxy authentication failed for {proxy_addr}. Check your username and password."
+ );
+ }
+ if err.contains("403") || err.contains("forbidden") {
+ return format!("Access denied by {proxy_addr} (403 Forbidden).");
+ }
+ if err.contains("402") {
+ return format!(
+ "Payment required by {proxy_addr} (402). Your proxy subscription may have expired."
+ );
+ }
+ if err.contains("502") || err.contains("bad gateway") {
+ return format!(
+ "Bad gateway from {proxy_addr} (502). The upstream proxy server may be down."
+ );
+ }
+ if err.contains("503") || err.contains("service unavailable") {
+ return format!("Proxy {proxy_addr} is temporarily unavailable (503).");
+ }
+ if err.contains("socks") && err.contains("unreachable") {
+ return format!("SOCKS proxy {proxy_addr} could not reach the target. The proxy server may not have internet access.");
+ }
+ if err.contains("invalid proxy") || err.contains("unsupported proxy") {
+ return format!(
+ "Invalid proxy configuration for {proxy_addr}. Check the proxy type and address."
+ );
+ }
+
+ // Generic fallback — still show the proxy address for context
+ format!("Proxy check failed for {proxy_addr}. Could not connect through the proxy.")
+ }
+
// Build proxy URL string from ProxySettings
fn build_proxy_url(proxy_settings: &ProxySettings) -> String {
let mut url = format!("{}://", proxy_settings.proxy_type);
@@ -928,9 +985,8 @@ impl ProxyManager {
url
}
- // Check if a proxy is valid by routing through a temporary local donut-proxy.
- // This tests the exact same code path the browser uses, ensuring that if the
- // check passes, the browser connection will work too.
+ // Check if a proxy is valid by routing through a temporary in-process donut-proxy.
+ // This tests the same code path the browser uses (local proxy -> upstream).
pub async fn check_proxy_validity(
&self,
proxy_id: &str,
@@ -938,19 +994,41 @@ impl ProxyManager {
) -> Result {
let upstream_url = Self::build_proxy_url(proxy_settings);
- // Start a temporary local proxy that tunnels through the upstream
- let proxy_config = crate::proxy_runner::start_proxy_process(Some(upstream_url), None)
+ // Bind a temporary local proxy server in-process (no child process needed)
+ let listener = tokio::net::TcpListener::bind("127.0.0.1:0")
.await
- .map_err(|e| format!("Failed to start test proxy: {e}"))?;
+ .map_err(|e| format!("Failed to bind test proxy: {e}"))?;
+ let local_port = listener
+ .local_addr()
+ .map_err(|e| format!("Failed to get local address: {e}"))?
+ .port();
- let local_url = format!("http://127.0.0.1:{}", proxy_config.local_port.unwrap_or(0));
- let proxy_id_clone = proxy_config.id.clone();
+ let upstream_for_task = upstream_url.clone();
+ let proxy_task = tokio::spawn(async move {
+ use crate::proxy_server::BypassMatcher;
+ let bypass_matcher = BypassMatcher::new(&[]);
+ let upstream = Some(upstream_for_task);
+ // Accept up to 10 connections (enough for IP check which tries multiple endpoints)
+ for _ in 0..10 {
+ let accept =
+ tokio::time::timeout(std::time::Duration::from_secs(15), listener.accept()).await;
+ match accept {
+ Ok(Ok((stream, _))) => {
+ let upstream = upstream.clone();
+ let matcher = bypass_matcher.clone();
+ tokio::spawn(async move {
+ crate::proxy_server::handle_proxy_connection(stream, upstream, matcher).await;
+ });
+ }
+ _ => break,
+ }
+ }
+ });
- // Fetch public IP through the local proxy (same path the browser uses)
+ let local_url = format!("http://127.0.0.1:{local_port}");
let ip_result = ip_utils::fetch_public_ip(Some(&local_url)).await;
- // Stop the temporary proxy regardless of result
- let _ = crate::proxy_runner::stop_proxy_process(&proxy_id_clone).await;
+ proxy_task.abort();
let ip = match ip_result {
Ok(ip) => ip,
@@ -964,7 +1042,10 @@ impl ProxyManager {
is_valid: false,
};
let _ = self.save_proxy_check_cache(proxy_id, &failed_result);
- return Err(format!("Failed to fetch public IP: {e}"));
+
+ let err_str = e.to_string();
+ let user_message = Self::classify_proxy_error(&err_str, proxy_settings);
+ return Err(user_message);
}
};
diff --git a/src-tauri/src/proxy_server.rs b/src-tauri/src/proxy_server.rs
index 5b0eed7..6852ea4 100644
--- a/src-tauri/src/proxy_server.rs
+++ b/src-tauri/src/proxy_server.rs
@@ -883,6 +883,87 @@ fn build_reqwest_client_with_proxy(
Ok(client_builder.proxy(proxy).build()?)
}
+/// Handle a single proxy connection (used by both the proxy worker and in-process proxy checks).
+pub async fn handle_proxy_connection(
+ mut stream: tokio::net::TcpStream,
+ upstream_url: Option,
+ bypass_matcher: BypassMatcher,
+) {
+ let _ = stream.set_nodelay(true);
+
+ if stream.readable().await.is_err() {
+ return;
+ }
+
+ let mut peek_buffer = [0u8; 16];
+ match stream.read(&mut peek_buffer).await {
+ Ok(0) => {}
+ Ok(n) => {
+ let request_start_upper = String::from_utf8_lossy(&peek_buffer[..n.min(7)]).to_uppercase();
+ let is_connect = request_start_upper.starts_with("CONNECT");
+
+ if is_connect {
+ let mut full_request = Vec::with_capacity(4096);
+ full_request.extend_from_slice(&peek_buffer[..n]);
+
+ let mut remaining = [0u8; 4096];
+ let mut total_read = n;
+ let max_reads = 100;
+ let mut reads = 0;
+
+ loop {
+ if reads >= max_reads {
+ break;
+ }
+ match stream.read(&mut remaining).await {
+ Ok(0) => {
+ if full_request.ends_with(b"\r\n\r\n")
+ || full_request.ends_with(b"\n\n")
+ || total_read > 0
+ {
+ break;
+ }
+ return;
+ }
+ Ok(m) => {
+ reads += 1;
+ total_read += m;
+ full_request.extend_from_slice(&remaining[..m]);
+ if full_request.ends_with(b"\r\n\r\n") || full_request.ends_with(b"\n\n") {
+ break;
+ }
+ }
+ Err(_) => {
+ if total_read > 0 {
+ break;
+ }
+ return;
+ }
+ }
+ }
+
+ let _ =
+ handle_connect_from_buffer(stream, full_request, upstream_url, bypass_matcher).await;
+ return;
+ }
+
+ // Non-CONNECT: prepend consumed bytes and pass to hyper
+ let prepended_bytes = peek_buffer[..n].to_vec();
+ let prepended_reader = PrependReader {
+ prepended: prepended_bytes,
+ prepended_pos: 0,
+ inner: stream,
+ };
+ let io = TokioIo::new(prepended_reader);
+ let service =
+ service_fn(move |req| handle_request(req, upstream_url.clone(), bypass_matcher.clone()));
+
+ let _ = http1::Builder::new().serve_connection(io, service).await;
+ }
+ Err(_) => {}
+ }
+}
+
pub async fn run_proxy_server(config: ProxyConfig) -> Result<(), Box> {
log::error!(
"Proxy worker starting, looking for config id: {}",
@@ -1052,145 +1133,11 @@ pub async fn run_proxy_server(config: ProxyConfig) -> Result<(), Box {
- // Enable TCP_NODELAY to ensure small packets are sent immediately
- // This is critical for CONNECT responses to be sent before tunneling begins
- let _ = stream.set_nodelay(true);
- log::error!("DEBUG: Accepted connection from {:?}", peer_addr);
-
+ Ok((stream, _peer_addr)) => {
let upstream = upstream_url.clone();
let matcher = bypass_matcher.clone();
-
tokio::task::spawn(async move {
- // Wait for the stream to have readable data before attempting to read.
- // This prevents read() from returning 0 on a fresh connection before
- // the client's data arrives.
- if stream.readable().await.is_err() {
- return;
- }
-
- let mut peek_buffer = [0u8; 16];
- match stream.read(&mut peek_buffer).await {
- Ok(0) => {}
- Ok(n) => {
- // Check if this looks like a CONNECT request
- // Be more lenient - check if the first bytes match "CONNECT" (case-insensitive)
- let request_start_upper =
- String::from_utf8_lossy(&peek_buffer[..n.min(7)]).to_uppercase();
- let is_connect = request_start_upper.starts_with("CONNECT");
-
- log::error!(
- "DEBUG: Read {} bytes, starts with: {:?}, is_connect: {}",
- n,
- String::from_utf8_lossy(&peek_buffer[..n.min(20)]),
- is_connect
- );
-
- if is_connect {
- // Handle CONNECT request manually for tunneling
- let mut full_request = Vec::with_capacity(4096);
- full_request.extend_from_slice(&peek_buffer[..n]);
-
- // Read the rest of the CONNECT request until we have the full headers
- // CONNECT requests end with \r\n\r\n (or \n\n)
- let mut remaining = [0u8; 4096];
- let mut total_read = n;
- let max_reads = 100; // Prevent infinite loop
- let mut reads = 0;
-
- loop {
- if reads >= max_reads {
- log::error!("DEBUG: Max reads reached, breaking");
- break;
- }
-
- match stream.read(&mut remaining).await {
- Ok(0) => {
- // Connection closed, but we might have a complete request
- if full_request.ends_with(b"\r\n\r\n") || full_request.ends_with(b"\n\n") {
- break;
- }
- // If we have some data, try to process it anyway
- if total_read > 0 {
- break;
- }
- return; // No data at all
- }
- Ok(m) => {
- reads += 1;
- total_read += m;
- full_request.extend_from_slice(&remaining[..m]);
-
- // Check if we have complete headers
- if full_request.ends_with(b"\r\n\r\n") || full_request.ends_with(b"\n\n") {
- break;
- }
-
- // Also check if we have enough to parse (at least "CONNECT host:port HTTP/1.x")
- if total_read >= 20 {
- // Check if we have a newline that might indicate end of request line
- if let Some(pos) = full_request.iter().position(|&b| b == b'\n') {
- if pos < full_request.len() - 1 {
- // We have at least the request line, check if we have headers
- let request_str = String::from_utf8_lossy(&full_request);
- if request_str.contains("\r\n\r\n") || request_str.contains("\n\n") {
- break;
- }
- }
- }
- }
- }
- Err(e) => {
- log::error!("DEBUG: Error reading CONNECT request: {:?}", e);
- // If we have some data, try to process it
- if total_read > 0 {
- break;
- }
- return;
- }
- }
- }
-
- // Handle CONNECT manually
- log::error!(
- "DEBUG: Handling CONNECT manually for: {}",
- String::from_utf8_lossy(&full_request[..full_request.len().min(200)])
- );
- if let Err(e) =
- handle_connect_from_buffer(stream, full_request, upstream, matcher).await
- {
- log::error!("Error handling CONNECT request: {:?}", e);
- } else {
- log::error!("DEBUG: CONNECT handled successfully");
- }
- return;
- }
-
- // Not CONNECT (or partial read) - reconstruct stream with consumed bytes prepended
- // This is critical: we MUST prepend any bytes we consumed, even if < 7 bytes
- log::error!(
- "DEBUG: Non-CONNECT request, first {} bytes: {:?}",
- n,
- String::from_utf8_lossy(&peek_buffer[..n.min(50)])
- );
- let prepended_bytes = peek_buffer[..n].to_vec();
- let prepended_reader = PrependReader {
- prepended: prepended_bytes,
- prepended_pos: 0,
- inner: stream,
- };
- let io = TokioIo::new(prepended_reader);
- let service =
- service_fn(move |req| handle_request(req, upstream.clone(), matcher.clone()));
-
- if let Err(err) = http1::Builder::new().serve_connection(io, service).await {
- log::error!("Error serving connection: {:?}", err);
- }
- }
- Err(e) => {
- log::error!("Error reading from connection: {:?}", e);
- }
- }
+ handle_proxy_connection(stream, upstream, matcher).await;
});
}
Err(e) => {