Compare commits

...

19 Commits

Author SHA1 Message Date
DaZuo0122
5a23175a83 Update: Build instructions for just 2026-02-02 13:00:19 +08:00
DaZuo0122
57492ab654 Add: Justfile to replace cmake and make 2026-02-02 12:32:56 +08:00
DaZuo0122
7054ff77a7 Fix: http/3 alpn bugs 2026-01-18 23:05:41 +08:00
DaZuo0122
9bcb7549f3 Bump version to 0.4.0 2026-01-17 22:09:23 +08:00
DaZuo0122
1da9b915d8 Update documents 2026-01-17 20:13:37 +08:00
DaZuo0122
94762d139a Add: flag to make watch keep running 2026-01-17 20:07:13 +08:00
DaZuo0122
f349d4b4fa Add: description in help message 2026-01-17 19:49:53 +08:00
DaZuo0122
7f6ee839b2 Add: Leak-D for dns leak detection 2026-01-17 19:42:54 +08:00
DaZuo0122
a82a7fe2ad Add: include interface pickup failure in log 2026-01-17 19:10:52 +08:00
DaZuo0122
d5b92ede7b Fix: main thread timeout early than work thread 2026-01-17 19:07:10 +08:00
DaZuo0122
144e801e13 Add: verbose for dns leak iface picking process 2026-01-17 18:53:07 +08:00
DaZuo0122
cfa96bde08 Add: dns leak detection 2026-01-17 18:45:24 +08:00
DaZuo0122
ccd4a31d21 Add: H3 support - incomplete 2026-01-17 13:47:37 +08:00
DaZuo0122
840ceec38f Add: default logging filter to suppress geoip logging 2026-01-17 12:54:24 +08:00
DaZuo0122
c538e31174 Add: per-hop and rdns for probe trace 2026-01-17 12:51:41 +08:00
DaZuo0122
7e87edb411 Add: verbose flag to show logs in detail 2026-01-17 00:15:46 +08:00
DaZuo0122
7746511fc4 Add: socks5 support. It may have problems with DoT, will see. 2026-01-16 23:59:02 +08:00
DaZuo0122
edd1779920 Merge branch 'master' of https://gitea.markyan04.cn/manbo/WTFnet 2026-01-16 23:20:41 +08:00
DaZuo0122
cb022127c0 Add multiple features 2026-01-16 23:16:58 +08:00
49 changed files with 9426 additions and 265 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
/target /target
/data /data
/dist

View File

@@ -1,41 +0,0 @@
cmake_minimum_required(VERSION 3.20)
project(wtfnet LANGUAGES NONE)
set(CARGO_CMD cargo)
set(CARGO_TARGET_DIR "${CMAKE_BINARY_DIR}/cargo-target")
set(BIN_NAME "wtfn${CMAKE_EXECUTABLE_SUFFIX}")
set(BIN_PATH "${CARGO_TARGET_DIR}/release/${BIN_NAME}")
file(READ "${CMAKE_SOURCE_DIR}/crates/wtfnet-cli/Cargo.toml" CLI_TOML)
string(REGEX MATCH "version = \"([0-9]+\\.[0-9]+\\.[0-9]+)\"" CLI_VERSION_MATCH "${CLI_TOML}")
if(CMAKE_MATCH_1)
set(PACKAGE_VERSION "${CMAKE_MATCH_1}")
else()
set(PACKAGE_VERSION "0.1.0")
endif()
add_custom_command(
OUTPUT "${BIN_PATH}"
COMMAND "${CMAKE_COMMAND}" -E env CARGO_TARGET_DIR="${CARGO_TARGET_DIR}"
"${CARGO_CMD}" build --release --workspace --bin wtfn
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
COMMENT "Building wtfn with cargo"
VERBATIM
)
add_custom_target(wtfnet_build ALL DEPENDS "${BIN_PATH}")
install(PROGRAMS "${BIN_PATH}" DESTINATION bin)
install(DIRECTORY "${CMAKE_SOURCE_DIR}/data" DESTINATION share/wtfnet)
add_dependencies(install wtfnet_build)
set(CPACK_PACKAGE_NAME "wtfnet")
set(CPACK_PACKAGE_VERSION "${PACKAGE_VERSION}")
set(CPACK_PACKAGE_FILE_NAME "wtfnet-${PACKAGE_VERSION}-${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_PROCESSOR}")
if(WIN32)
set(CPACK_GENERATOR "ZIP")
else()
set(CPACK_GENERATOR "TGZ")
endif()
include(CPack)

897
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -10,4 +10,9 @@ members = [
"crates/wtfnet-geoip", "crates/wtfnet-geoip",
"crates/wtfnet-probe", "crates/wtfnet-probe",
"crates/wtfnet-dns", "crates/wtfnet-dns",
"crates/wtfnet-dnsleak",
"crates/wtfnet-http",
"crates/wtfnet-tls",
"crates/wtfnet-discover",
"crates/wtfnet-diag",
] ]

View File

@@ -1,18 +0,0 @@
BUILD_DIR ?= build
.PHONY: build configure package install clean
configure:
cmake -S . -B $(BUILD_DIR)
build: configure
cmake --build $(BUILD_DIR)
package: build
cmake --build $(BUILD_DIR) --target package
install: build
cmake --build $(BUILD_DIR) --target install
clean:
cmake -E rm -rf $(BUILD_DIR)

109
README.md
View File

@@ -6,9 +6,11 @@ WTFnet is a pure CLI toolbox for diagnosing network problems on Linux and Window
- System snapshot: interfaces, IPs, routes, DNS config. - System snapshot: interfaces, IPs, routes, DNS config.
- Ports, neighbors, and trusted root certificates. - Ports, neighbors, and trusted root certificates.
- Probing: ping, tcping, traceroute (best-effort). - Probing: ping, tcping, traceroute (best-effort).
- DNS: query/detect/watch with GeoIP, DoT/DoH, and SOCKS5 for DoH. - DNS: query/detect/watch with GeoIP, DoT/DoH, and SOCKS5 support.
- DNS leak detection with policy profiles and privacy modes (best-effort).
- GeoIP offline lookup via GeoLite2 Country/ASN. - GeoIP offline lookup via GeoLite2 Country/ASN.
- Subnet calculator: subnet/contains/overlap/summarize. - Subnet calculator: subnet/contains/overlap/summarize.
- Discover: mDNS/SSDP plus LLMNR/NBNS.
## Quickstart ## Quickstart
```bash ```bash
@@ -34,14 +36,38 @@ wtfn neigh list --ipv6
wtfn geoip lookup 8.8.8.8 wtfn geoip lookup 8.8.8.8
wtfn probe ping example.com --count 4 wtfn probe ping example.com --count 4
wtfn probe tcping example.com:443 --count 4 wtfn probe tcping example.com:443 --count 4
wtfn probe tcping example.com:443 --socks5 socks5://127.0.0.1:10808
wtfn probe trace example.com:443 --max-hops 20 wtfn probe trace example.com:443 --max-hops 20
# DNS # DNS
wtfn dns query example.com A wtfn dns query example.com A
wtfn dns query example.com AAAA --server 1.1.1.1 wtfn dns query example.com AAAA --server 1.1.1.1
wtfn dns query example.com A --transport doh --server 1.1.1.1 --tls-name cloudflare-dns.com wtfn dns query example.com A --transport doh --server 1.1.1.1 --tls-name cloudflare-dns.com
wtfn dns query example.com A --transport dot --server 1.1.1.1 --tls-name cloudflare-dns.com --socks5 socks5://127.0.0.1:10808
wtfn dns detect example.com --transport doh --servers 1.1.1.1 --tls-name cloudflare-dns.com wtfn dns detect example.com --transport doh --servers 1.1.1.1 --tls-name cloudflare-dns.com
wtfn dns watch --duration 10s --filter example.com wtfn dns watch --duration 10s --filter example.com
wtfn dns watch --follow
wtfn dns leak status
wtfn dns leak watch --duration 10s --profile proxy-stub
wtfn dns leak watch --follow
wtfn dns leak report report.json
# TLS
wtfn tls handshake example.com:443
wtfn tls handshake example.com:443 --socks5 socks5://127.0.0.1:10808
wtfn tls cert example.com:443
wtfn tls verify example.com:443
wtfn tls alpn example.com:443 --alpn h2,http/1.1
# Discover
wtfn discover mdns --duration 3s
wtfn discover ssdp --duration 3s
wtfn discover llmnr --duration 3s
wtfn discover nbns --duration 3s
# Diag
wtfn diag --out report.json --json
wtfn diag --bundle report.zip
# Calc # Calc
wtfn calc contains 192.168.0.0/16 192.168.1.0/24 wtfn calc contains 192.168.0.0/16 192.168.1.0/24
@@ -49,26 +75,8 @@ wtfn calc overlap 10.0.0.0/24 10.0.1.0/24
wtfn calc summarize 10.0.0.0/24 10.0.1.0/24 wtfn calc summarize 10.0.0.0/24 10.0.1.0/24
``` ```
## Supported flags ## Command reference
Global flags: See `docs/COMMANDS.md` for the full list of commands and flags (with descriptions).
- `--json` / `--pretty`
- `--no-color` / `--quiet`
- `-v` / `-vv`
- `--log-level <error|warn|info|debug|trace>`
- `--log-format <text|json>`
- `--log-file <path>`
Command flags (implemented):
- `sys ip`: `--all`, `--iface <name>`
- `sys route`: `--ipv4`, `--ipv6`, `--to <ip>`
- `ports listen`: `--tcp`, `--udp`, `--port <n>`
- `neigh list`: `--ipv4`, `--ipv6`, `--iface <name>`
- `probe ping`: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping`: `--count <n>`, `--timeout-ms <n>`, `--no-geoip`
- `probe trace`: `--max-hops <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--no-geoip`
- `dns query`: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--timeout-ms <n>`
- `dns detect`: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch`: `--duration <Ns|Nms>`, `--iface <name>`, `--filter <pattern>`
## GeoIP data files ## GeoIP data files
GeoLite2 mmdb files should live in `data/`. GeoLite2 mmdb files should live in `data/`.
@@ -77,16 +85,37 @@ Lookup order:
2) `data/` next to the CLI binary 2) `data/` next to the CLI binary
3) `data/` in the current working directory 3) `data/` in the current working directory
## Build and package ## Build
### Only build binary
```bash ```bash
cmake -S . -B build cargo build --release
cmake --build build
cmake --build build --target package
``` ```
Install: ### Build and package
1. Prepare GeoLite2 databases (required `GeoLite2-ASN.mmdb` and `GeoLite2-Country.mmdb` ):
```bash ```bash
cmake --build build --target install # Place your mmdb files under data/
mkdir data
```
> **Note**: This step requires `python3` and `just`.
2. Use `just` to run build and package command (Note: you don't need bash environment on windows):
```bash
# You will find package under dist/, zip file on windows, tar.gz file on linux
just release
```
## HTTP/3 (experimental)
HTTP/3 support is feature-gated and best-effort. Enable it only when you want to test QUIC
connectivity.
To enable locally for testing:
```bash
cargo run -p wtfnet-cli --features wtfnet-http/http3 -- http head https://cloudflare-quic.com --http3
``` ```
## Roadmap ## Roadmap
@@ -107,23 +136,39 @@ cmake --build build --target install
- diag: bundle export (zip) - diag: bundle export (zip)
### v0.3 (future upgrades) ### v0.3 (future upgrades)
- richer trace output (reverse lookup, per-hop loss) - richer trace output (reverse lookup, per-hop loss, per-hop stats)
- TLS extras: OCSP stapling indicator, more chain parsing - HTTP timing accuracy (connect/tls)
- TLS extras: OCSP stapling indicator, richer cert parsing
- ports conns improvements (top talkers / summary) - ports conns improvements (top talkers / summary)
- better baseline/diff for system roots - better baseline/diff for system roots
- smarter "diagnose <domain>" workflow mode - optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (current requirements)
- dns leak detection (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage ## Current stage
Implemented: Implemented:
- Core CLI with JSON output and logging. - Core CLI with JSON output and logging.
- sys, ports, neigh, cert roots. - sys, ports, neigh, cert roots.
- geoip, probe, dns query/detect/watch. - geoip, probe, dns query/detect/watch.
- DoT/DoH + SOCKS5 proxy for DoH. - http head/get with timing and GeoIP.
- tls handshake/verify/cert/alpn.
- DoT/DoH + SOCKS5 proxy support.
- discover mdns/ssdp/llmnr/nbns.
- dns leak detection (status/watch/report).
- diag report + bundle.
- calc subcrate with subnet/contains/overlap/summarize. - calc subcrate with subnet/contains/overlap/summarize.
- CMake/Makefile build + package + install targets. - CMake/Makefile build + package + install targets.
- Basic unit tests for calc and TLS parsing.
In progress: In progress:
- http, tls, discover, diag. - dns leak: DoH heuristic classification (optional).
- dns leak: Leak-D mismatch correlation (optional).
See `docs/implementation_status.md` for a design-vs-implementation view.
## License ## License
MIT (see `LICENSE`). MIT (see `LICENSE`).

View File

@@ -200,3 +200,32 @@ fn overlap_v6(a: Ipv6Net, b: Ipv6Net) -> bool {
let b_end = u128::from(b.broadcast()); let b_end = u128::from(b.broadcast());
a_start <= b_end && b_start <= a_end a_start <= b_end && b_start <= a_end
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn subnet_v4_from_mask() {
let info = subnet_info("192.168.1.10 255.255.255.0").expect("subnet");
assert_eq!(info.cidr, "192.168.1.10/24");
assert_eq!(info.network, "192.168.1.0");
assert_eq!(info.broadcast.as_deref(), Some("192.168.1.255"));
assert_eq!(info.usable_addresses, "254");
}
#[test]
fn contains_and_overlap() {
assert!(contains("192.168.0.0/16", "192.168.1.0/24").unwrap());
assert!(overlap("10.0.0.0/24", "10.0.0.128/25").unwrap());
assert!(!overlap("10.0.0.0/24", "10.0.1.0/24").unwrap());
}
#[test]
fn summarize_ipv4() {
let inputs = vec!["10.0.0.0/24".to_string(), "10.0.1.0/24".to_string()];
let result = summarize(&inputs).expect("summarize");
assert_eq!(result.len(), 1);
assert_eq!(result[0].to_string(), "10.0.0.0/23");
}
}

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "wtfnet-cli" name = "wtfnet-cli"
version = "0.1.0" version = "0.4.0"
edition = "2024" edition = "2024"
[[bin]] [[bin]]
@@ -11,13 +11,19 @@ path = "src/main.rs"
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
tokio = { version = "1", features = ["macros", "rt-multi-thread"] } time = { version = "0.3", features = ["formatting", "parsing"] }
tokio = { version = "1", features = ["macros", "rt-multi-thread", "signal"] }
wtfnet-core = { path = "../wtfnet-core" } wtfnet-core = { path = "../wtfnet-core" }
wtfnet-calc = { path = "../wtfnet-calc" } wtfnet-calc = { path = "../wtfnet-calc" }
wtfnet-geoip = { path = "../wtfnet-geoip" } wtfnet-geoip = { path = "../wtfnet-geoip" }
wtfnet-platform = { path = "../wtfnet-platform" } wtfnet-platform = { path = "../wtfnet-platform" }
wtfnet-probe = { path = "../wtfnet-probe" } wtfnet-probe = { path = "../wtfnet-probe" }
wtfnet-dns = { path = "../wtfnet-dns", features = ["pcap"] } wtfnet-dns = { path = "../wtfnet-dns", features = ["pcap"] }
wtfnet-dnsleak = { path = "../wtfnet-dnsleak", features = ["pcap"] }
wtfnet-http = { path = "../wtfnet-http" }
wtfnet-tls = { path = "../wtfnet-tls" }
wtfnet-discover = { path = "../wtfnet-discover" }
wtfnet-diag = { path = "../wtfnet-diag" }
[target.'cfg(windows)'.dependencies] [target.'cfg(windows)'.dependencies]
wtfnet-platform-windows = { path = "../wtfnet-platform-windows" } wtfnet-platform-windows = { path = "../wtfnet-platform-windows" }

File diff suppressed because it is too large Load Diff

View File

@@ -3,6 +3,7 @@ use std::path::{Path, PathBuf};
use time::format_description::well_known::Rfc3339; use time::format_description::well_known::Rfc3339;
use time::OffsetDateTime; use time::OffsetDateTime;
use tracing_subscriber::prelude::*; use tracing_subscriber::prelude::*;
use tracing_subscriber::EnvFilter;
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CommandEnvelope<T> { pub struct CommandEnvelope<T> {
@@ -227,9 +228,10 @@ fn init_logging_text(
log_file: Option<&PathBuf>, log_file: Option<&PathBuf>,
) -> Result<LoggingHandle, Box<dyn std::error::Error>> { ) -> Result<LoggingHandle, Box<dyn std::error::Error>> {
let (writer, guard) = logging_writer(log_file)?; let (writer, guard) = logging_writer(log_file)?;
let filter = build_log_filter(level_filter);
let layer = tracing_subscriber::fmt::layer() let layer = tracing_subscriber::fmt::layer()
.with_writer(writer) .with_writer(writer)
.with_filter(level_filter); .with_filter(filter);
tracing_subscriber::registry().with(layer).init(); tracing_subscriber::registry().with(layer).init();
Ok(LoggingHandle { _guard: guard }) Ok(LoggingHandle { _guard: guard })
} }
@@ -239,14 +241,27 @@ fn init_logging_json(
log_file: Option<&PathBuf>, log_file: Option<&PathBuf>,
) -> Result<LoggingHandle, Box<dyn std::error::Error>> { ) -> Result<LoggingHandle, Box<dyn std::error::Error>> {
let (writer, guard) = logging_writer(log_file)?; let (writer, guard) = logging_writer(log_file)?;
let filter = build_log_filter(level_filter);
let layer = tracing_subscriber::fmt::layer() let layer = tracing_subscriber::fmt::layer()
.with_writer(writer) .with_writer(writer)
.json() .json()
.with_filter(level_filter); .with_filter(filter);
tracing_subscriber::registry().with(layer).init(); tracing_subscriber::registry().with(layer).init();
Ok(LoggingHandle { _guard: guard }) Ok(LoggingHandle { _guard: guard })
} }
fn build_log_filter(level_filter: tracing_subscriber::filter::LevelFilter) -> EnvFilter {
if let Ok(filter) = EnvFilter::try_from_env("NETTOOL_LOG_FILTER") {
return filter;
}
if let Ok(filter) = EnvFilter::try_from_default_env() {
return filter;
}
EnvFilter::default()
.add_directive(level_filter.into())
.add_directive("maxminddb::decoder=off".parse().unwrap())
}
fn logging_writer( fn logging_writer(
log_file: Option<&PathBuf>, log_file: Option<&PathBuf>,
) -> Result< ) -> Result<

View File

@@ -0,0 +1,12 @@
[package]
name = "wtfnet-diag"
version = "0.1.0"
edition = "2024"
[dependencies]
serde = { version = "1", features = ["derive"] }
serde_json = "1"
thiserror = "2"
wtfnet-platform = { path = "../wtfnet-platform" }
wtfnet-dns = { path = "../wtfnet-dns" }
zip = "0.6"

View File

@@ -0,0 +1,142 @@
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::fs::File;
use std::io::Write;
use std::path::Path;
use thiserror::Error;
use wtfnet_dns::{DnsDetectResult, DnsTransport};
use wtfnet_platform::{DnsConfigSnapshot, ListenSocket, NetInterface, NeighborEntry, RouteEntry};
use wtfnet_platform::{Platform, PlatformError};
use zip::write::FileOptions;
#[derive(Debug, Error)]
pub enum DiagError {
#[error("platform error: {0}")]
Platform(String),
#[error("dns error: {0}")]
Dns(String),
#[error("io error: {0}")]
Io(String),
#[error("zip error: {0}")]
Zip(String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiagOptions {
pub dns_detect_domain: Option<String>,
pub dns_detect_timeout_ms: u64,
pub dns_detect_repeat: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DiagReport {
pub interfaces: Option<Vec<NetInterface>>,
pub routes: Option<Vec<RouteEntry>>,
pub dns_config: Option<DnsConfigSnapshot>,
pub ports_listen: Option<Vec<ListenSocket>>,
pub neighbors: Option<Vec<NeighborEntry>>,
pub dns_detect: Option<DnsDetectResult>,
pub warnings: Vec<String>,
}
pub async fn run(platform: &Platform, options: DiagOptions) -> Result<DiagReport, DiagError> {
let mut warnings = Vec::new();
let interfaces = match platform.sys.interfaces().await {
Ok(value) => Some(value),
Err(err) => {
warnings.push(format_platform_error("interfaces", err));
None
}
};
let routes = match platform.sys.routes().await {
Ok(value) => Some(value),
Err(err) => {
warnings.push(format_platform_error("routes", err));
None
}
};
let dns_config = match platform.sys.dns_config().await {
Ok(value) => Some(value),
Err(err) => {
warnings.push(format_platform_error("dns_config", err));
None
}
};
let ports_listen = match platform.ports.listening().await {
Ok(value) => Some(value),
Err(err) => {
warnings.push(format_platform_error("ports_listen", err));
None
}
};
let neighbors = match platform.neigh.neighbors().await {
Ok(value) => Some(value),
Err(err) => {
warnings.push(format_platform_error("neighbors", err));
None
}
};
let dns_detect = if let Some(domain) = options.dns_detect_domain.as_ref() {
match wtfnet_dns::detect(
domain,
&wtfnet_dns::default_detect_servers(DnsTransport::Udp),
DnsTransport::Udp,
None,
options.dns_detect_repeat,
options.dns_detect_timeout_ms,
)
.await
{
Ok(value) => Some(value),
Err(err) => {
warnings.push(format!("dns_detect: {err}"));
None
}
}
} else {
None
};
Ok(DiagReport {
interfaces,
routes,
dns_config,
ports_listen,
neighbors,
dns_detect,
warnings,
})
}
pub fn write_bundle(
path: &Path,
meta_json: &Value,
report_json: &Value,
) -> Result<(), DiagError> {
let file = File::create(path).map_err(|err| DiagError::Io(err.to_string()))?;
let mut zip = zip::ZipWriter::new(file);
let options = FileOptions::default().compression_method(zip::CompressionMethod::Deflated);
zip.start_file("meta.json", options)
.map_err(|err| DiagError::Zip(err.to_string()))?;
let meta_bytes = serde_json::to_vec_pretty(meta_json)
.map_err(|err| DiagError::Io(err.to_string()))?;
zip.write_all(&meta_bytes)
.map_err(|err| DiagError::Io(err.to_string()))?;
zip.start_file("report.json", options)
.map_err(|err| DiagError::Zip(err.to_string()))?;
let report_bytes = serde_json::to_vec_pretty(report_json)
.map_err(|err| DiagError::Io(err.to_string()))?;
zip.write_all(&report_bytes)
.map_err(|err| DiagError::Io(err.to_string()))?;
zip.finish()
.map_err(|err| DiagError::Zip(err.to_string()))?;
Ok(())
}
fn format_platform_error(section: &str, err: PlatformError) -> String {
format!("{section}: {} ({:?})", err.message, err.code)
}

View File

@@ -0,0 +1,11 @@
[package]
name = "wtfnet-discover"
version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
mdns-sd = "0.8"
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["rt"] }

View File

@@ -0,0 +1,530 @@
use hickory_proto::op::{Message, MessageType, Query};
use hickory_proto::rr::{Name, RData, RecordType};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::{Duration, Instant};
use thiserror::Error;
#[derive(Debug, Error)]
pub enum DiscoverError {
#[error("mdns error: {0}")]
Mdns(String),
#[error("io error: {0}")]
Io(String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdnsOptions {
pub duration_ms: u64,
pub service_type: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SsdpOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrOptions {
pub duration_ms: u64,
pub name: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdnsService {
pub service_type: String,
pub fullname: String,
pub hostname: Option<String>,
pub addresses: Vec<String>,
pub port: Option<u16>,
pub properties: BTreeMap<String, String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdnsReport {
pub duration_ms: u64,
pub service_type: Option<String>,
pub services: Vec<MdnsService>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SsdpService {
pub from: String,
pub st: Option<String>,
pub usn: Option<String>,
pub location: Option<String>,
pub server: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SsdpReport {
pub duration_ms: u64,
pub services: Vec<SsdpService>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrAnswer {
pub from: String,
pub name: String,
pub record_type: String,
pub data: String,
pub ttl: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrReport {
pub duration_ms: u64,
pub name: String,
pub answers: Vec<LlmnrAnswer>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsNodeStatus {
pub from: String,
pub names: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsReport {
pub duration_ms: u64,
pub nodes: Vec<NbnsNodeStatus>,
}
pub async fn mdns_discover(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || mdns_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Mdns(err.to_string()))?
}
pub async fn ssdp_discover(options: SsdpOptions) -> Result<SsdpReport, DiscoverError> {
tokio::task::spawn_blocking(move || ssdp_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn llmnr_discover(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
tokio::task::spawn_blocking(move || llmnr_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn nbns_discover(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || nbns_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
fn mdns_discover_blocking(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
let daemon = ServiceDaemon::new().map_err(|err| DiscoverError::Mdns(err.to_string()))?;
let mut service_types = BTreeSet::new();
if let Some(service_type) = options.service_type.as_ref() {
service_types.insert(service_type.clone());
} else {
let receiver = daemon
.browse("_services._dns-sd._udp.local.")
.map_err(|err| DiscoverError::Mdns(err.to_string()))?;
let deadline = Instant::now() + Duration::from_millis(options.duration_ms / 2);
while Instant::now() < deadline {
match receiver.recv_timeout(Duration::from_millis(200)) {
Ok(ServiceEvent::ServiceFound(service_type, _)) => {
service_types.insert(service_type);
}
Ok(_) => {}
Err(_) => {}
}
}
}
let mut services = Vec::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
for service_type in service_types.iter() {
let receiver = daemon
.browse(service_type)
.map_err(|err| DiscoverError::Mdns(err.to_string()))?;
while Instant::now() < deadline {
match receiver.recv_timeout(Duration::from_millis(200)) {
Ok(ServiceEvent::ServiceResolved(info)) => {
services.push(format_service_info(service_type, &info));
}
Ok(_) => {}
Err(_) => break,
}
}
}
Ok(MdnsReport {
duration_ms: options.duration_ms,
service_type: options.service_type,
services,
})
}
fn format_service_info(service_type: &str, info: &ServiceInfo) -> MdnsService {
let mut addresses = Vec::new();
for addr in info.get_addresses().iter() {
addresses.push(addr.to_string());
}
let mut properties = BTreeMap::new();
for prop in info.get_properties().iter() {
properties.insert(prop.key().to_string(), prop.val_str().to_string());
}
MdnsService {
service_type: service_type.to_string(),
fullname: info.get_fullname().to_string(),
hostname: Some(info.get_hostname().to_string()),
addresses,
port: Some(info.get_port()),
properties,
}
}
fn ssdp_discover_blocking(options: SsdpOptions) -> Result<SsdpReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let request = [
"M-SEARCH * HTTP/1.1",
"HOST: 239.255.255.250:1900",
"MAN: \"ssdp:discover\"",
"MX: 1",
"ST: ssdp:all",
"",
"",
]
.join("\r\n");
let target = "239.255.255.250:1900";
let _ = socket.send_to(request.as_bytes(), target);
let mut services = Vec::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Ok(payload) = std::str::from_utf8(&buf[..len]) {
if let Some(entry) = parse_ssdp_response(payload, from) {
services.push(entry);
}
}
}
Err(_) => continue,
}
}
Ok(SsdpReport {
duration_ms: options.duration_ms,
services,
})
}
fn llmnr_discover_blocking(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let name = options
.name
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| "wpad".to_string());
let query = build_llmnr_query(&name)
.map_err(|err| DiscoverError::Io(format!("llmnr build query: {err}")))?;
let target = "224.0.0.252:5355";
let _ = socket.send_to(&query, target);
let mut answers = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(entries) = parse_llmnr_response(&buf[..len], from.ip()) {
for entry in entries {
let key = format!(
"{}|{}|{}|{}",
entry.from, entry.name, entry.record_type, entry.data
);
if seen.insert(key) {
answers.push(entry);
}
}
}
}
Err(_) => continue,
}
}
Ok(LlmnrReport {
duration_ms: options.duration_ms,
name,
answers,
})
}
fn nbns_discover_blocking(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_broadcast(true)
.map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let query = build_nbns_node_status_query();
let _ = socket.send_to(&query, "255.255.255.255:137");
let mut nodes = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(names) = parse_nbns_node_status(&buf[..len]) {
let key = format!("{}|{}", from.ip(), names.join(","));
if seen.insert(key) {
nodes.push(NbnsNodeStatus {
from: from.ip().to_string(),
names,
});
}
}
}
Err(_) => continue,
}
}
Ok(NbnsReport {
duration_ms: options.duration_ms,
nodes,
})
}
fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
let mut st = None;
let mut usn = None;
let mut location = None;
let mut server = None;
for line in payload.lines() {
let line = line.trim();
if let Some((key, value)) = line.split_once(':') {
let key = key.trim().to_ascii_lowercase();
let value = value.trim().to_string();
match key.as_str() {
"st" => st = Some(value),
"usn" => usn = Some(value),
"location" => location = Some(value),
"server" => server = Some(value),
_ => {}
}
}
}
if st.is_none() && usn.is_none() && location.is_none() && server.is_none() {
return None;
}
Some(SsdpService {
from: from.to_string(),
st,
usn,
location,
server,
})
}
fn build_llmnr_query(name: &str) -> Result<Vec<u8>, String> {
let name = Name::from_ascii(name).map_err(|err| format!("invalid name: {err}"))?;
let mut message = Message::new();
message
.set_id(0)
.set_message_type(MessageType::Query)
.set_recursion_desired(false)
.add_query(Query::query(name.clone(), RecordType::A))
.add_query(Query::query(name, RecordType::AAAA));
message.to_vec().map_err(|err| err.to_string())
}
fn parse_llmnr_response(payload: &[u8], from: IpAddr) -> Option<Vec<LlmnrAnswer>> {
let message = Message::from_vec(payload).ok()?;
if message.message_type() != MessageType::Response {
return None;
}
let mut answers = Vec::new();
for record in message.answers() {
let record_type = record.record_type();
let data = match record.data() {
Some(RData::A(addr)) => addr.to_string(),
Some(RData::AAAA(addr)) => addr.to_string(),
_ => continue,
};
answers.push(LlmnrAnswer {
from: from.to_string(),
name: record.name().to_string(),
record_type: record_type.to_string(),
data,
ttl: record.ttl(),
});
}
if answers.is_empty() {
None
} else {
Some(answers)
}
}
fn build_nbns_node_status_query() -> Vec<u8> {
let mut buf = Vec::with_capacity(50);
let id = nbns_query_id();
buf.extend_from_slice(&id.to_be_bytes());
buf.extend_from_slice(&0u16.to_be_bytes()); // flags
buf.extend_from_slice(&1u16.to_be_bytes()); // qdcount
buf.extend_from_slice(&0u16.to_be_bytes()); // ancount
buf.extend_from_slice(&0u16.to_be_bytes()); // nscount
buf.extend_from_slice(&0u16.to_be_bytes()); // arcount
buf.extend_from_slice(&nbns_encode_name("*", 0x00));
buf.extend_from_slice(&0x0021u16.to_be_bytes()); // NBSTAT
buf.extend_from_slice(&0x0001u16.to_be_bytes()); // IN
buf
}
fn nbns_query_id() -> u16 {
let nanos = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.subsec_nanos();
(nanos & 0xffff) as u16
}
fn nbns_encode_name(name: &str, suffix: u8) -> Vec<u8> {
let mut raw = [b' '; 16];
let mut bytes = name.as_bytes().to_vec();
for byte in bytes.iter_mut() {
byte.make_ascii_uppercase();
}
for (idx, byte) in bytes.iter().take(15).enumerate() {
raw[idx] = *byte;
}
raw[15] = suffix;
let mut encoded = Vec::with_capacity(34);
encoded.push(32);
for byte in raw {
let high = ((byte >> 4) & 0x0f) + b'A';
let low = (byte & 0x0f) + b'A';
encoded.push(high);
encoded.push(low);
}
encoded.push(0);
encoded
}
fn parse_nbns_node_status(payload: &[u8]) -> Option<Vec<String>> {
if payload.len() < 12 {
return None;
}
let flags = u16::from_be_bytes([payload[2], payload[3]]);
if flags & 0x8000 == 0 {
return None;
}
let qdcount = u16::from_be_bytes([payload[4], payload[5]]) as usize;
let ancount = u16::from_be_bytes([payload[6], payload[7]]) as usize;
let mut offset = 12;
for _ in 0..qdcount {
offset = skip_dns_name(payload, offset)?;
if offset + 4 > payload.len() {
return None;
}
offset += 4;
}
let mut names = Vec::new();
for _ in 0..ancount {
offset = skip_dns_name(payload, offset)?;
if offset + 10 > payload.len() {
return None;
}
let rr_type = u16::from_be_bytes([payload[offset], payload[offset + 1]]);
let _rr_class = u16::from_be_bytes([payload[offset + 2], payload[offset + 3]]);
let _ttl = u32::from_be_bytes([
payload[offset + 4],
payload[offset + 5],
payload[offset + 6],
payload[offset + 7],
]);
let rdlength = u16::from_be_bytes([payload[offset + 8], payload[offset + 9]]) as usize;
offset += 10;
if offset + rdlength > payload.len() {
return None;
}
if rr_type == 0x0021 && rdlength > 0 {
if let Some(list) = parse_nbns_name_list(&payload[offset..offset + rdlength]) {
names.extend(list);
}
}
offset += rdlength;
}
if names.is_empty() {
None
} else {
Some(names)
}
}
fn parse_nbns_name_list(payload: &[u8]) -> Option<Vec<String>> {
let count = *payload.first()? as usize;
let mut offset = 1;
let mut names = Vec::new();
for _ in 0..count {
if offset + 18 > payload.len() {
return None;
}
let name_bytes = &payload[offset..offset + 15];
let suffix = payload[offset + 15];
let name = String::from_utf8_lossy(name_bytes)
.trim_end()
.to_string();
names.push(format!("{name}<{suffix:02x}>"));
offset += 18;
}
Some(names)
}
fn skip_dns_name(payload: &[u8], mut offset: usize) -> Option<usize> {
if offset >= payload.len() {
return None;
}
loop {
let len = *payload.get(offset)?;
if len & 0xc0 == 0xc0 {
if offset + 1 >= payload.len() {
return None;
}
return Some(offset + 2);
}
if len == 0 {
return Some(offset + 1);
}
offset += 1 + len as usize;
if offset >= payload.len() {
return None;
}
}
}

View File

@@ -7,10 +7,16 @@ edition = "2024"
hickory-resolver = { version = "0.24", features = ["dns-over-tls", "dns-over-https", "dns-over-https-rustls", "dns-over-rustls", "native-certs"] } hickory-resolver = { version = "0.24", features = ["dns-over-tls", "dns-over-https", "dns-over-https-rustls", "dns-over-rustls", "native-certs"] }
hickory-proto = "0.24" hickory-proto = "0.24"
reqwest = { version = "0.11", features = ["rustls-tls", "socks"] } reqwest = { version = "0.11", features = ["rustls-tls", "socks"] }
rustls = "0.21"
rustls-native-certs = "0.6"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
thiserror = "2" thiserror = "2"
tokio = { version = "1", features = ["time"] } tokio = { version = "1", features = ["io-util", "time"] }
tokio-rustls = "0.24"
tokio-socks = "0.5"
url = "2"
pnet = { version = "0.34", optional = true } pnet = { version = "0.34", optional = true }
tracing = "0.1"
[features] [features]
pcap = ["dep:pnet"] pcap = ["dep:pnet"]

View File

@@ -8,12 +8,19 @@ use hickory_resolver::system_conf::read_system_conf;
use hickory_proto::op::{Message, MessageType, Query}; use hickory_proto::op::{Message, MessageType, Query};
use hickory_proto::rr::Name; use hickory_proto::rr::Name;
use reqwest::Proxy; use reqwest::Proxy;
use rustls::{Certificate, ClientConfig, RootCertStore, ServerName};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::str::FromStr; use std::str::FromStr;
use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use thiserror::Error; use thiserror::Error;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio_rustls::TlsConnector;
use tokio_socks::tcp::Socks5Stream;
use tracing::debug;
use url::Url;
#[cfg(feature = "pcap")] #[cfg(feature = "pcap")]
use pnet::datalink::{self, Channel, Config as DatalinkConfig, NetworkInterface}; use pnet::datalink::{self, Channel, Config as DatalinkConfig, NetworkInterface};
@@ -163,12 +170,26 @@ pub async fn query(
timeout_ms: u64, timeout_ms: u64,
) -> Result<DnsQueryReport, DnsError> { ) -> Result<DnsQueryReport, DnsError> {
let record_type = parse_record_type(record_type)?; let record_type = parse_record_type(record_type)?;
debug!(
domain,
record_type = %record_type,
transport = %transport,
server = ?server.as_ref().map(|value| value.addr),
proxy = ?proxy.as_deref(),
timeout_ms,
"dns query start"
);
if let Some(proxy) = proxy { if let Some(proxy) = proxy {
if transport != DnsTransport::Doh {
return Err(DnsError::ProxyUnsupported(transport.to_string()));
}
let server = server.ok_or_else(|| DnsError::MissingServer(transport.to_string()))?; let server = server.ok_or_else(|| DnsError::MissingServer(transport.to_string()))?;
return doh_query_via_proxy(domain, record_type, server, timeout_ms, proxy).await; return match transport {
DnsTransport::Doh => {
doh_query_via_proxy(domain, record_type, server, timeout_ms, proxy).await
}
DnsTransport::Dot => {
dot_query_via_proxy(domain, record_type, server, timeout_ms, proxy).await
}
_ => Err(DnsError::ProxyUnsupported(transport.to_string())),
};
} }
let resolver = build_resolver(server.clone(), transport, timeout_ms)?; let resolver = build_resolver(server.clone(), transport, timeout_ms)?;
let start = Instant::now(); let start = Instant::now();
@@ -234,6 +255,15 @@ pub async fn detect(
repeat: u32, repeat: u32,
timeout_ms: u64, timeout_ms: u64,
) -> Result<DnsDetectResult, DnsError> { ) -> Result<DnsDetectResult, DnsError> {
debug!(
domain,
transport = %transport,
servers = servers.len(),
proxy = ?proxy.as_deref(),
repeat,
timeout_ms,
"dns detect start"
);
let mut results = Vec::new(); let mut results = Vec::new();
for server in servers { for server in servers {
for _ in 0..repeat.max(1) { for _ in 0..repeat.max(1) {
@@ -300,6 +330,12 @@ pub async fn watch(_options: DnsWatchOptions) -> Result<DnsWatchReport, DnsError
#[cfg(feature = "pcap")] #[cfg(feature = "pcap")]
pub async fn watch(options: DnsWatchOptions) -> Result<DnsWatchReport, DnsError> { pub async fn watch(options: DnsWatchOptions) -> Result<DnsWatchReport, DnsError> {
debug!(
iface = ?options.iface,
duration_ms = options.duration_ms,
filter = ?options.filter,
"dns watch start"
);
let iface = match select_interface(options.iface.as_deref()) { let iface = match select_interface(options.iface.as_deref()) {
Some(value) => value, Some(value) => value,
None => { None => {
@@ -330,6 +366,15 @@ pub async fn watch(options: DnsWatchOptions) -> Result<DnsWatchReport, DnsError>
match rx.next() { match rx.next() {
Ok(frame) => { Ok(frame) => {
if let Some(event) = parse_dns_frame(frame, start, &filter) { if let Some(event) = parse_dns_frame(frame, start, &filter) {
debug!(
src = %event.src,
dst = %event.dst,
query_name = %event.query_name,
query_type = %event.query_type,
rcode = %event.rcode,
is_response = event.is_response,
"dns watch event"
);
events.push(event); events.push(event);
} }
} }
@@ -424,6 +469,13 @@ async fn doh_query_via_proxy(
timeout_ms: u64, timeout_ms: u64,
proxy: String, proxy: String,
) -> Result<DnsQueryReport, DnsError> { ) -> Result<DnsQueryReport, DnsError> {
debug!(
domain,
record_type = %record_type,
server = %server.addr,
proxy = %proxy,
"dns doh via proxy"
);
let tls_name = server let tls_name = server
.name .name
.clone() .clone()
@@ -512,6 +564,158 @@ async fn doh_query_via_proxy(
}) })
} }
async fn dot_query_via_proxy(
domain: &str,
record_type: RecordType,
server: DnsServerTarget,
timeout_ms: u64,
proxy: String,
) -> Result<DnsQueryReport, DnsError> {
debug!(
domain,
record_type = %record_type,
server = %server.addr,
proxy = %proxy,
"dns dot via proxy"
);
let tls_name = server
.name
.clone()
.ok_or_else(|| DnsError::MissingTlsName("dot".to_string()))?;
let name = Name::from_ascii(domain)
.map_err(|err| DnsError::Resolver(format!("invalid domain: {err}")))?;
let mut message = Message::new();
message
.set_id(0)
.set_message_type(MessageType::Query)
.set_recursion_desired(true)
.add_query(Query::query(name, record_type));
let body = message
.to_vec()
.map_err(|err| DnsError::Resolver(err.to_string()))?;
if body.len() > u16::MAX as usize {
return Err(DnsError::Resolver("dns message too large".to_string()));
}
let connector = build_tls_connector()?;
let proxy_config = parse_socks5_proxy(&proxy)?;
let target = if proxy_config.remote_dns {
(tls_name.clone(), server.addr.port())
} else {
(server.addr.ip().to_string(), server.addr.port())
};
let timeout = Duration::from_millis(timeout_ms);
let tcp = tokio::time::timeout(
timeout,
Socks5Stream::connect(proxy_config.addr.as_str(), target),
)
.await
.map_err(|_| DnsError::Resolver("timeout".to_string()))?
.map_err(|err| DnsError::Proxy(err.to_string()))?
.into_inner();
let server_name = ServerName::try_from(tls_name.as_str())
.map_err(|_| DnsError::MissingTlsName(tls_name.clone()))?;
let mut stream = tokio::time::timeout(timeout, connector.connect(server_name, tcp))
.await
.map_err(|_| DnsError::Resolver("timeout".to_string()))?
.map_err(|err| DnsError::Resolver(err.to_string()))?;
let start = Instant::now();
let response_bytes = tokio::time::timeout(timeout, async {
let length = (body.len() as u16).to_be_bytes();
stream.write_all(&length).await?;
stream.write_all(&body).await?;
stream.flush().await?;
let mut len_buf = [0u8; 2];
stream.read_exact(&mut len_buf).await?;
let response_len = u16::from_be_bytes(len_buf) as usize;
let mut response = vec![0u8; response_len];
stream.read_exact(&mut response).await?;
Ok::<Vec<u8>, std::io::Error>(response)
})
.await
.map_err(|_| DnsError::Resolver("timeout".to_string()))?
.map_err(|err| DnsError::Resolver(err.to_string()))?;
let response =
Message::from_vec(&response_bytes).map_err(|err| DnsError::Resolver(err.to_string()))?;
let duration_ms = start.elapsed().as_millis();
let mut answers = Vec::new();
for record in response.answers() {
let ttl = record.ttl();
let name = record.name().to_string();
let record_type = record.record_type().to_string();
if let Some(data) = record.data() {
if let Some(data) = format_rdata(data) {
answers.push(DnsAnswer {
name,
record_type,
ttl,
data,
});
}
}
}
Ok(DnsQueryReport {
domain: domain.to_string(),
record_type: record_type.to_string(),
transport: DnsTransport::Dot.to_string(),
server: Some(server.addr.to_string()),
server_name: Some(tls_name),
proxy: Some(proxy),
rcode: response.response_code().to_string(),
answers,
duration_ms,
})
}
fn build_tls_connector() -> Result<TlsConnector, DnsError> {
let mut roots = RootCertStore::empty();
let store = rustls_native_certs::load_native_certs()
.map_err(|err| DnsError::Io(err.to_string()))?;
for cert in store {
roots
.add(&Certificate(cert.0))
.map_err(|err| DnsError::Resolver(err.to_string()))?;
}
let config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
Ok(TlsConnector::from(Arc::new(config)))
}
struct Socks5Proxy {
addr: String,
remote_dns: bool,
}
fn parse_socks5_proxy(value: &str) -> Result<Socks5Proxy, DnsError> {
let url = Url::parse(value).map_err(|_| DnsError::Proxy(value.to_string()))?;
let scheme = url.scheme();
let remote_dns = match scheme {
"socks5" => false,
"socks5h" => true,
_ => return Err(DnsError::ProxyUnsupported(scheme.to_string())),
};
if !url.username().is_empty() || url.password().is_some() {
return Err(DnsError::Proxy("proxy auth not supported".to_string()));
}
let host = url
.host_str()
.ok_or_else(|| DnsError::Proxy(value.to_string()))?;
let port = url
.port_or_known_default()
.ok_or_else(|| DnsError::Proxy(value.to_string()))?;
Ok(Socks5Proxy {
addr: format!("{host}:{port}"),
remote_dns,
})
}
#[cfg(feature = "pcap")] #[cfg(feature = "pcap")]
fn select_interface(name: Option<&str>) -> Option<NetworkInterface> { fn select_interface(name: Option<&str>) -> Option<NetworkInterface> {
let interfaces = datalink::interfaces(); let interfaces = datalink::interfaces();

View File

@@ -0,0 +1,17 @@
[package]
name = "wtfnet-dnsleak"
version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
ipnet = { version = "2", features = ["serde"] }
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["rt"] }
tracing = "0.1"
wtfnet-platform = { path = "../wtfnet-platform" }
pnet = { version = "0.34", optional = true }
[features]
pcap = ["dep:pnet"]

View File

@@ -0,0 +1,60 @@
use crate::report::LeakTransport;
use hickory_proto::op::{Message, MessageType};
use hickory_proto::rr::RData;
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use wtfnet_platform::FlowProtocol;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassifiedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub is_response: bool,
pub answer_ips: Vec<IpAddr>,
}
pub struct ParsedDns {
pub qname: String,
pub qtype: String,
pub rcode: String,
pub is_response: bool,
pub answer_ips: Vec<IpAddr>,
}
pub fn parse_dns_message(payload: &[u8]) -> Option<ParsedDns> {
let message = Message::from_vec(payload).ok()?;
let is_response = message.message_type() == MessageType::Response;
let query = message.queries().first()?;
let qname = query.name().to_utf8();
let qtype = query.query_type().to_string();
let rcode = message.response_code().to_string();
let mut answer_ips = Vec::new();
if is_response {
for record in message.answers() {
if let Some(data) = record.data() {
match data {
RData::A(addr) => answer_ips.push(IpAddr::V4(addr.0)),
RData::AAAA(addr) => answer_ips.push(IpAddr::V6(addr.0)),
_ => {}
}
}
}
}
Some(ParsedDns {
qname,
qtype,
rcode,
is_response,
answer_ips,
})
}

View File

@@ -0,0 +1,222 @@
mod classify;
mod policy;
mod privacy;
mod report;
mod route;
mod rules;
mod sensor;
use crate::classify::ClassifiedEvent;
use crate::sensor::{capture_events, SensorEvent, TcpEvent};
use std::time::Instant;
use thiserror::Error;
use tracing::debug;
use wtfnet_platform::{FlowOwnerProvider, FlowTuple};
pub use crate::policy::{LeakPolicy, LeakPolicyProfile, PolicySummary};
pub use crate::privacy::{apply_privacy, PrivacyMode};
pub use crate::report::{LeakEvent, LeakReport, LeakSummary, LeakTransport, RouteClass, Severity};
pub use crate::sensor::{iface_diagnostics, IfaceDiag};
#[derive(Debug, Error)]
pub enum DnsLeakError {
#[error("not supported: {0}")]
NotSupported(String),
#[error("io error: {0}")]
Io(String),
#[error("policy error: {0}")]
Policy(String),
}
#[derive(Debug, Clone)]
pub struct LeakWatchOptions {
pub duration_ms: u64,
pub iface: Option<String>,
pub policy: LeakPolicy,
pub privacy: PrivacyMode,
pub include_events: bool,
}
pub async fn watch(
options: LeakWatchOptions,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> Result<LeakReport, DnsLeakError> {
debug!(
duration_ms = options.duration_ms,
iface = ?options.iface,
include_events = options.include_events,
"dns leak watch start"
);
let start = Instant::now();
let events = capture_events(&options).await?;
let mut leak_events = Vec::new();
let mut dns_cache: std::collections::HashMap<std::net::IpAddr, DnsCacheEntry> =
std::collections::HashMap::new();
for event in events {
match event {
SensorEvent::Dns(event) => {
let enriched = enrich_event(event, flow_owner).await;
if enriched.is_response {
update_dns_cache(&mut dns_cache, &enriched);
continue;
}
if let Some(decision) = rules::evaluate(&enriched, &options.policy) {
let mut leak_event = report::LeakEvent::from_decision(enriched, decision);
privacy::apply_privacy(&mut leak_event, options.privacy);
leak_events.push(leak_event);
}
}
SensorEvent::Tcp(event) => {
if let Some(leak_event) =
evaluate_mismatch(event, flow_owner, &mut dns_cache, options.privacy).await
{
leak_events.push(leak_event);
}
}
}
}
let summary = LeakSummary::from_events(&leak_events);
let report = LeakReport {
duration_ms: start.elapsed().as_millis() as u64,
policy: options.policy.summary(),
summary,
events: if options.include_events {
leak_events
} else {
Vec::new()
},
};
Ok(report)
}
async fn enrich_event(
event: ClassifiedEvent,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> report::EnrichedEvent {
let mut enriched = route::enrich_route(event);
if let Some(provider) = flow_owner {
let flow = FlowTuple {
proto: enriched.proto,
src_ip: enriched.src_ip,
src_port: enriched.src_port,
dst_ip: enriched.dst_ip,
dst_port: enriched.dst_port,
};
match provider.owner_of(flow).await {
Ok(result) => {
enriched.owner = result.owner;
enriched.owner_confidence = result.confidence;
enriched.owner_failure = result.failure_reason;
}
Err(err) => {
enriched.owner_failure = Some(err.message);
}
}
}
enriched
}
struct DnsCacheEntry {
qname: String,
route_class: RouteClass,
timestamp_ms: u128,
}
const DNS_CACHE_TTL_MS: u128 = 60_000;
fn update_dns_cache(cache: &mut std::collections::HashMap<std::net::IpAddr, DnsCacheEntry>, event: &report::EnrichedEvent) {
let Some(qname) = event.qname.as_ref() else { return };
let now = event.timestamp_ms;
prune_dns_cache(cache, now);
for ip in event.answer_ips.iter() {
debug!(
"dns leak cache insert ip={} qname={} route={:?}",
ip, qname, event.route_class
);
cache.insert(
*ip,
DnsCacheEntry {
qname: qname.clone(),
route_class: event.route_class,
timestamp_ms: now,
},
);
}
}
fn prune_dns_cache(
cache: &mut std::collections::HashMap<std::net::IpAddr, DnsCacheEntry>,
now_ms: u128,
) {
cache.retain(|_, entry| now_ms.saturating_sub(entry.timestamp_ms) <= DNS_CACHE_TTL_MS);
}
async fn evaluate_mismatch(
event: TcpEvent,
flow_owner: Option<&dyn FlowOwnerProvider>,
cache: &mut std::collections::HashMap<std::net::IpAddr, DnsCacheEntry>,
privacy: PrivacyMode,
) -> Option<LeakEvent> {
prune_dns_cache(cache, event.timestamp_ms);
debug!(
"dns leak tcp syn dst_ip={} dst_port={} cache_size={}",
event.dst_ip,
event.dst_port,
cache.len()
);
let entry = cache.get(&event.dst_ip)?;
let tcp_route = route::route_class_for(event.src_ip, event.dst_ip, event.iface_name.as_deref());
if tcp_route == entry.route_class {
debug!(
"dns leak mismatch skip dst_ip={} tcp_route={:?} dns_route={:?}",
event.dst_ip, tcp_route, entry.route_class
);
return None;
}
let mut enriched = report::EnrichedEvent {
timestamp_ms: event.timestamp_ms,
proto: wtfnet_platform::FlowProtocol::Tcp,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
iface_name: event.iface_name.clone(),
transport: LeakTransport::Unknown,
qname: Some(entry.qname.clone()),
qtype: None,
rcode: None,
is_response: false,
answer_ips: Vec::new(),
route_class: tcp_route,
owner: None,
owner_confidence: wtfnet_platform::FlowOwnerConfidence::None,
owner_failure: None,
};
if let Some(provider) = flow_owner {
let flow = FlowTuple {
proto: wtfnet_platform::FlowProtocol::Tcp,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
};
if let Ok(result) = provider.owner_of(flow).await {
enriched.owner = result.owner;
enriched.owner_confidence = result.confidence;
enriched.owner_failure = result.failure_reason;
}
}
let decision = rules::LeakDecision {
leak_type: report::LeakType::D,
severity: Severity::P2,
policy_rule_id: "LEAK_D_MISMATCH".to_string(),
};
let mut leak_event = report::LeakEvent::from_decision(enriched, decision);
privacy::apply_privacy(&mut leak_event, privacy);
Some(leak_event)
}

View File

@@ -0,0 +1,113 @@
use ipnet::IpNet;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum LeakPolicyProfile {
FullTunnel,
ProxyStub,
Split,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakPolicy {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub loopback_ifaces: Vec<String>,
pub allowed_destinations: Vec<IpNet>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
pub proxy_required_domains: Vec<String>,
pub allowlist_domains: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PolicySummary {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub allowed_destinations: Vec<String>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
}
impl LeakPolicy {
pub fn from_profile(profile: LeakPolicyProfile, ifaces: &[String]) -> Self {
let loopback_ifaces = detect_loopback_ifaces(ifaces);
let tunnel_ifaces = detect_tunnel_ifaces(ifaces);
let allowed_ifaces = match profile {
LeakPolicyProfile::FullTunnel | LeakPolicyProfile::ProxyStub => {
merge_lists(&loopback_ifaces, &tunnel_ifaces)
}
LeakPolicyProfile::Split => merge_lists(&loopback_ifaces, &tunnel_ifaces),
};
LeakPolicy {
profile,
allowed_ifaces,
tunnel_ifaces,
loopback_ifaces,
allowed_destinations: Vec::new(),
allowed_ports: Vec::new(),
allowed_processes: Vec::new(),
proxy_required_domains: Vec::new(),
allowlist_domains: Vec::new(),
}
}
pub fn summary(&self) -> PolicySummary {
PolicySummary {
profile: self.profile,
allowed_ifaces: self.allowed_ifaces.clone(),
tunnel_ifaces: self.tunnel_ifaces.clone(),
allowed_destinations: self
.allowed_destinations
.iter()
.map(|net| net.to_string())
.collect(),
allowed_ports: self.allowed_ports.clone(),
allowed_processes: self.allowed_processes.clone(),
}
}
}
fn detect_loopback_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name == "lo"
|| name == "lo0"
|| name.contains("loopback")
|| name.contains("localhost")
})
.cloned()
.collect()
}
fn detect_tunnel_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
})
.cloned()
.collect()
}
fn merge_lists(a: &[String], b: &[String]) -> Vec<String> {
let mut out = Vec::new();
for value in a.iter().chain(b.iter()) {
if !out.iter().any(|entry| entry == value) {
out.push(value.clone());
}
}
out
}

View File

@@ -0,0 +1,35 @@
use crate::report::LeakEvent;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PrivacyMode {
Full,
Redacted,
Minimal,
}
pub fn apply_privacy(event: &mut LeakEvent, mode: PrivacyMode) {
match mode {
PrivacyMode::Full => {}
PrivacyMode::Redacted => {
if let Some(value) = event.qname.as_ref() {
event.qname = Some(redact_domain(value));
}
}
PrivacyMode::Minimal => {
event.qname = None;
event.qtype = None;
event.rcode = None;
}
}
}
fn redact_domain(value: &str) -> String {
let parts: Vec<&str> = value.split('.').filter(|part| !part.is_empty()).collect();
if parts.len() >= 2 {
format!("{}.{}", parts[parts.len() - 2], parts[parts.len() - 1])
} else {
value.to_string()
}
}

View File

@@ -0,0 +1,194 @@
use crate::policy::PolicySummary;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use std::net::IpAddr;
use wtfnet_platform::{FlowOwner, FlowOwnerConfidence, FlowProtocol};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum LeakTransport {
Udp53,
Tcp53,
Dot,
Doh,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum LeakType {
A,
B,
C,
D,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum RouteClass {
Loopback,
Tunnel,
Physical,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Severity {
P0,
P1,
P2,
P3,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnrichedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub is_response: bool,
pub answer_ips: Vec<IpAddr>,
pub route_class: RouteClass,
pub owner: Option<FlowOwner>,
pub owner_confidence: FlowOwnerConfidence,
pub owner_failure: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakEvent {
pub timestamp_ms: u128,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub iface_name: Option<String>,
pub route_class: RouteClass,
pub dst_ip: String,
pub dst_port: u16,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
pub attribution_confidence: FlowOwnerConfidence,
pub attribution_failure: Option<String>,
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakTypeCount {
pub leak_type: LeakType,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SummaryItem {
pub key: String,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakSummary {
pub total: usize,
pub by_type: Vec<LeakTypeCount>,
pub top_processes: Vec<SummaryItem>,
pub top_destinations: Vec<SummaryItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakReport {
pub duration_ms: u64,
pub policy: PolicySummary,
pub summary: LeakSummary,
pub events: Vec<LeakEvent>,
}
impl LeakEvent {
pub fn from_decision(event: EnrichedEvent, decision: crate::rules::LeakDecision) -> Self {
let (pid, ppid, process_name, process_path) = event
.owner
.as_ref()
.map(|owner| {
(
owner.pid,
owner.ppid,
owner.process_name.clone(),
owner.process_path.clone(),
)
})
.unwrap_or((None, None, None, None));
LeakEvent {
timestamp_ms: event.timestamp_ms,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
iface_name: event.iface_name,
route_class: event.route_class,
dst_ip: event.dst_ip.to_string(),
dst_port: event.dst_port,
pid,
ppid,
process_name,
process_path,
attribution_confidence: event.owner_confidence,
attribution_failure: event.owner_failure,
leak_type: decision.leak_type,
severity: decision.severity,
policy_rule_id: decision.policy_rule_id,
}
}
}
impl LeakSummary {
pub fn from_events(events: &[LeakEvent]) -> Self {
let total = events.len();
let mut by_type_map: HashMap<LeakType, usize> = HashMap::new();
let mut process_map: BTreeMap<String, usize> = BTreeMap::new();
let mut dest_map: BTreeMap<String, usize> = BTreeMap::new();
for event in events {
*by_type_map.entry(event.leak_type).or_insert(0) += 1;
if let Some(name) = event.process_name.as_ref() {
*process_map.entry(name.clone()).or_insert(0) += 1;
}
let dst_key = format!("{}:{}", event.dst_ip, event.dst_port);
*dest_map.entry(dst_key).or_insert(0) += 1;
}
let mut by_type = by_type_map
.into_iter()
.map(|(leak_type, count)| LeakTypeCount { leak_type, count })
.collect::<Vec<_>>();
by_type.sort_by(|a, b| a.leak_type.cmp(&b.leak_type));
let top_processes = top_items(process_map, 5);
let top_destinations = top_items(dest_map, 5);
LeakSummary {
total,
by_type,
top_processes,
top_destinations,
}
}
}
fn top_items(map: BTreeMap<String, usize>, limit: usize) -> Vec<SummaryItem> {
let mut items = map
.into_iter()
.map(|(key, count)| SummaryItem { key, count })
.collect::<Vec<_>>();
items.sort_by(|a, b| b.count.cmp(&a.count).then_with(|| a.key.cmp(&b.key)));
items.truncate(limit);
items
}

View File

@@ -0,0 +1,53 @@
use crate::classify::ClassifiedEvent;
use crate::report::{EnrichedEvent, RouteClass};
use wtfnet_platform::FlowOwnerConfidence;
pub fn enrich_route(event: ClassifiedEvent) -> EnrichedEvent {
let route_class = route_class_for(event.src_ip, event.dst_ip, event.iface_name.as_deref());
EnrichedEvent {
timestamp_ms: event.timestamp_ms,
proto: event.proto,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
iface_name: event.iface_name,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
is_response: event.is_response,
answer_ips: event.answer_ips,
route_class,
owner: None,
owner_confidence: FlowOwnerConfidence::None,
owner_failure: None,
}
}
pub fn route_class_for(
src_ip: std::net::IpAddr,
dst_ip: std::net::IpAddr,
iface_name: Option<&str>,
) -> RouteClass {
if src_ip.is_loopback() || dst_ip.is_loopback() {
RouteClass::Loopback
} else if iface_name.map(is_tunnel_iface).unwrap_or(false) {
RouteClass::Tunnel
} else if iface_name.is_some() {
RouteClass::Physical
} else {
RouteClass::Unknown
}
}
fn is_tunnel_iface(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
}

View File

@@ -0,0 +1,116 @@
use crate::policy::LeakPolicy;
use crate::report::{EnrichedEvent, LeakTransport, LeakType, Severity};
#[derive(Debug, Clone)]
pub struct LeakDecision {
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
pub fn evaluate(event: &EnrichedEvent, policy: &LeakPolicy) -> Option<LeakDecision> {
match event.transport {
LeakTransport::Udp53 | LeakTransport::Tcp53 => {
if is_proxy_required(event, policy) && !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::B,
severity: Severity::P1,
policy_rule_id: "LEAK_B_PROXY_REQUIRED".to_string(),
});
}
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::A,
severity: Severity::P0,
policy_rule_id: "LEAK_A_PLAINTEXT".to_string(),
});
}
}
LeakTransport::Dot | LeakTransport::Doh => {
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::C,
severity: Severity::P1,
policy_rule_id: "LEAK_C_ENCRYPTED".to_string(),
});
}
}
LeakTransport::Unknown => {}
}
None
}
fn is_allowed(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let has_rules = !policy.allowed_ifaces.is_empty()
|| !policy.allowed_destinations.is_empty()
|| !policy.allowed_ports.is_empty()
|| !policy.allowed_processes.is_empty();
if !has_rules {
return false;
}
if let Some(iface) = event.iface_name.as_ref() {
if policy
.allowed_ifaces
.iter()
.any(|allowed| allowed.eq_ignore_ascii_case(iface))
{
return true;
}
}
if policy
.allowed_ports
.iter()
.any(|port| *port == event.dst_port)
{
return true;
}
if policy
.allowed_destinations
.iter()
.any(|net| net.contains(&event.dst_ip))
{
return true;
}
if let Some(name) = event
.owner
.as_ref()
.and_then(|owner| owner.process_name.as_ref())
{
if policy
.allowed_processes
.iter()
.any(|value| value.eq_ignore_ascii_case(name))
{
return true;
}
}
false
}
fn is_proxy_required(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let Some(qname) = event.qname.as_ref() else {
return false;
};
let qname = qname.to_ascii_lowercase();
if policy.proxy_required_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
}) {
return true;
}
if !policy.allowlist_domains.is_empty() {
let allowed = policy.allowlist_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
});
return !allowed;
}
false
}

View File

@@ -0,0 +1,444 @@
use crate::classify::{parse_dns_message, ClassifiedEvent};
use crate::report::LeakTransport;
use crate::DnsLeakError;
use std::collections::HashSet;
use std::net::IpAddr;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tracing::debug;
use wtfnet_platform::FlowProtocol;
use crate::LeakWatchOptions;
#[cfg(feature = "pcap")]
use pnet::datalink::{self, Channel, Config as DatalinkConfig};
#[cfg(feature = "pcap")]
use std::sync::mpsc;
#[cfg(feature = "pcap")]
const OPEN_IFACE_TIMEOUT_MS: u64 = 700;
#[cfg(feature = "pcap")]
const FRAME_RECV_TIMEOUT_MS: u64 = 200;
#[cfg(not(feature = "pcap"))]
pub async fn capture_events(_options: &LeakWatchOptions) -> Result<Vec<SensorEvent>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub async fn capture_events(options: &LeakWatchOptions) -> Result<Vec<SensorEvent>, DnsLeakError> {
let options = options.clone();
let iface_list = datalink::interfaces();
let candidates = format_iface_list(&iface_list);
let select_budget_ms = (iface_list.len().max(1) as u64).saturating_mul(OPEN_IFACE_TIMEOUT_MS);
let timeout_ms = options
.duration_ms
.saturating_add(select_budget_ms)
.saturating_add(2000);
let handle = tokio::task::spawn_blocking(move || capture_events_blocking(options));
match tokio::time::timeout(Duration::from_millis(timeout_ms), handle).await {
Ok(joined) => joined.map_err(|err| DnsLeakError::Io(err.to_string()))?,
Err(_) => {
return Err(DnsLeakError::Io(
format!(
"capture timed out waiting for interface; candidates: {candidates}"
),
))
}
}
}
#[derive(Debug, Clone)]
pub struct TcpEvent {
pub timestamp_ms: u128,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
}
#[derive(Debug, Clone)]
pub enum SensorEvent {
Dns(ClassifiedEvent),
Tcp(TcpEvent),
}
#[derive(Debug, Clone)]
pub struct IfaceDiag {
pub name: String,
pub open_ok: bool,
pub error: String,
}
#[cfg(not(feature = "pcap"))]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
let interfaces = datalink::interfaces();
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let mut out = Vec::new();
for iface in interfaces {
let result = match open_channel_with_timeout(iface.clone(), &config) {
Ok((_iface, _rx)) => IfaceDiag {
name: iface.name,
open_ok: true,
error: "-".to_string(),
},
Err(err) => IfaceDiag {
name: iface.name,
open_ok: false,
error: err,
},
};
out.push(result);
}
Ok(out)
}
#[cfg(feature = "pcap")]
fn capture_events_blocking(options: LeakWatchOptions) -> Result<Vec<SensorEvent>, DnsLeakError> {
use pnet::packet::ethernet::{EtherTypes, EthernetPacket};
use pnet::packet::Packet;
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let (iface, mut rx) = select_interface(options.iface.as_deref(), &config)?;
let local_ips = iface.ips.iter().map(|ip| ip.ip()).collect::<Vec<_>>();
let iface_name = iface.name.clone();
let (frame_tx, frame_rx) = mpsc::channel();
std::thread::spawn(move || loop {
match rx.next() {
Ok(frame) => {
if frame_tx.send(frame.to_vec()).is_err() {
break;
}
}
Err(_) => continue,
}
});
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut events = Vec::new();
let mut seen = HashSet::new();
while Instant::now() < deadline {
let frame = match frame_rx.recv_timeout(Duration::from_millis(FRAME_RECV_TIMEOUT_MS)) {
Ok(frame) => frame,
Err(_) => continue,
};
let ethernet = match EthernetPacket::new(&frame) {
Some(packet) => packet,
None => continue,
};
let event = match ethernet.get_ethertype() {
EtherTypes::Ipv4 => parse_ipv4(
ethernet.payload(),
&local_ips,
&iface_name,
),
EtherTypes::Ipv6 => parse_ipv6(
ethernet.payload(),
&local_ips,
&iface_name,
),
_ => None,
};
if let Some(event) = event {
let key = match &event {
SensorEvent::Dns(value) => format!(
"dns:{:?}|{}|{}|{}|{}",
value.transport, value.src_ip, value.src_port, value.dst_ip, value.dst_port
),
SensorEvent::Tcp(value) => format!(
"tcp:{}|{}|{}|{}",
value.src_ip, value.src_port, value.dst_ip, value.dst_port
),
};
if seen.insert(key) {
match &event {
SensorEvent::Dns(value) => {
debug!(
transport = ?value.transport,
src_ip = %value.src_ip,
src_port = value.src_port,
dst_ip = %value.dst_ip,
dst_port = value.dst_port,
"dns leak event"
);
}
SensorEvent::Tcp(value) => {
debug!(
src_ip = %value.src_ip,
src_port = value.src_port,
dst_ip = %value.dst_ip,
dst_port = value.dst_port,
"dns leak tcp event"
);
}
}
events.push(event);
}
}
}
Ok(events)
}
#[cfg(feature = "pcap")]
fn parse_ipv4(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::Packet;
let ipv4 = Ipv4Packet::new(payload)?;
let src = IpAddr::V4(ipv4.get_source());
let dst = IpAddr::V4(ipv4.get_destination());
if !local_ips.contains(&src) && !local_ips.contains(&dst) {
return None;
}
match ipv4.get_next_level_protocol() {
IpNextHeaderProtocols::Udp => parse_udp(src, dst, ipv4.payload(), iface_name),
IpNextHeaderProtocols::Tcp => parse_tcp(src, dst, ipv4.payload(), iface_name),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_ipv6(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv6::Ipv6Packet;
use pnet::packet::Packet;
let ipv6 = Ipv6Packet::new(payload)?;
let src = IpAddr::V6(ipv6.get_source());
let dst = IpAddr::V6(ipv6.get_destination());
if !local_ips.contains(&src) && !local_ips.contains(&dst) {
return None;
}
match ipv6.get_next_header() {
IpNextHeaderProtocols::Udp => parse_udp(src, dst, ipv6.payload(), iface_name),
IpNextHeaderProtocols::Tcp => parse_tcp(src, dst, ipv6.payload(), iface_name),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_udp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::udp::UdpPacket;
use pnet::packet::Packet;
let udp = UdpPacket::new(payload)?;
let src_port = udp.get_source();
let dst_port = udp.get_destination();
if src_port != 53 && dst_port != 53 {
return None;
}
let parsed = parse_dns_message(udp.payload())?;
Some(SensorEvent::Dns(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Udp,
src_ip,
src_port,
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport: LeakTransport::Udp53,
qname: Some(parsed.qname),
qtype: Some(parsed.qtype),
rcode: Some(parsed.rcode),
is_response: parsed.is_response,
answer_ips: parsed.answer_ips,
}))
}
#[cfg(feature = "pcap")]
fn parse_tcp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::tcp::TcpPacket;
let tcp = TcpPacket::new(payload)?;
let dst_port = tcp.get_destination();
let src_port = tcp.get_source();
let transport = match dst_port {
53 => LeakTransport::Tcp53,
853 => LeakTransport::Dot,
_ => {
let flags = tcp.get_flags();
let syn = flags & 0x02 != 0;
let ack = flags & 0x10 != 0;
if syn && !ack {
return Some(SensorEvent::Tcp(TcpEvent {
timestamp_ms: now_ms(),
src_ip,
src_port,
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
}));
}
return None;
}
};
Some(SensorEvent::Dns(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Tcp,
src_ip,
src_port,
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport,
qname: None,
qtype: None,
rcode: None,
is_response: false,
answer_ips: Vec::new(),
}))
}
#[cfg(feature = "pcap")]
fn select_interface(
name: Option<&str>,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), DnsLeakError> {
let interfaces = datalink::interfaces();
if let Some(name) = name {
debug!("dns leak iface pick: requested={name}");
let iface = interfaces
.iter()
.find(|iface| iface.name == name)
.cloned()
.ok_or_else(|| {
DnsLeakError::Io(format!(
"interface '{name}' not found; candidates: {}",
format_iface_list(&interfaces)
))
})?;
return open_channel_with_timeout(iface, config).map_err(|err| {
DnsLeakError::Io(format!(
"failed to open capture on interface ({err}); candidates: {}",
format_iface_list(&interfaces)
))
});
}
let ordered = order_interfaces(&interfaces);
for iface in ordered.iter() {
debug!("dns leak iface pick: try={}", iface.name);
match open_channel_with_timeout(iface.clone(), config) {
Ok(channel) => return Ok(channel),
Err(err) => {
debug!(
"dns leak iface pick: failed iface={} err={}",
iface.name, err
);
}
}
}
Err(DnsLeakError::Io(format!(
"no suitable interface found; candidates: {}",
format_iface_list(&interfaces)
)))
}
#[cfg(feature = "pcap")]
fn open_channel_with_timeout(
iface: datalink::NetworkInterface,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), String> {
let (tx, rx) = mpsc::channel();
let config = config.clone();
std::thread::spawn(move || {
let result = match datalink::channel(&iface, config) {
Ok(Channel::Ethernet(_, rx)) => Ok(rx),
Ok(_) => Err("unsupported channel".to_string()),
Err(err) => Err(err.to_string()),
};
let _ = tx.send((iface, result));
});
let timeout = Duration::from_millis(OPEN_IFACE_TIMEOUT_MS);
match rx.recv_timeout(timeout) {
Ok((iface, Ok(rx))) => Ok((iface, rx)),
Ok((_iface, Err(err))) => Err(err),
Err(_) => Err("timeout opening capture".to_string()),
}
}
#[cfg(feature = "pcap")]
fn is_named_fallback(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("wlan")
|| name.contains("wifi")
|| name.contains("wi-fi")
|| name.contains("ethernet")
|| name.contains("eth")
|| name.contains("lan")
}
#[cfg(feature = "pcap")]
fn order_interfaces(
interfaces: &[datalink::NetworkInterface],
) -> Vec<datalink::NetworkInterface> {
let mut preferred = Vec::new();
let mut others = Vec::new();
for iface in interfaces.iter() {
if iface.is_loopback() {
continue;
}
if is_named_fallback(&iface.name) || !iface.ips.is_empty() {
preferred.push(iface.clone());
} else {
others.push(iface.clone());
}
}
preferred.extend(others);
if preferred.is_empty() {
interfaces.to_vec()
} else {
preferred
}
}
#[cfg(feature = "pcap")]
fn format_iface_list(interfaces: &[datalink::NetworkInterface]) -> String {
if interfaces.is_empty() {
return "-".to_string();
}
interfaces
.iter()
.map(|iface| iface.name.as_str())
.collect::<Vec<_>>()
.join(", ")
}
#[cfg(feature = "pcap")]
fn now_ms() -> u128 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis()
}

View File

@@ -0,0 +1,25 @@
[package]
name = "wtfnet-http"
version = "0.1.0"
edition = "2024"
[dependencies]
reqwest = { version = "0.11", features = ["rustls-tls"] }
rustls = "0.21"
rustls-native-certs = "0.6"
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["net", "time"] }
tokio-rustls = "0.24"
tokio-socks = "0.5"
url = "2"
tracing = "0.1"
h3 = { version = "0.0.8", optional = true }
h3-quinn = { version = "0.0.10", optional = true }
quinn = { version = "0.11", optional = true }
http = "1"
webpki-roots = "1"
bytes = "1"
[features]
http3 = ["dep:h3", "dep:h3-quinn", "dep:quinn"]

View File

@@ -0,0 +1,639 @@
use reqwest::{Client, Method, Proxy, StatusCode};
use rustls::{Certificate, ClientConfig, RootCertStore, ServerName};
use serde::{Deserialize, Serialize};
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::net::lookup_host;
use thiserror::Error;
use tokio::time::timeout;
use tokio_rustls::TlsConnector;
use tokio_socks::tcp::Socks5Stream;
use tracing::debug;
use url::Url;
#[cfg(feature = "http3")]
use bytes::Buf;
#[cfg(feature = "http3")]
use http::Request;
#[cfg(feature = "http3")]
use quinn::ClientConfig as QuinnClientConfig;
#[cfg(feature = "http3")]
use quinn::Endpoint;
#[cfg(feature = "http3")]
use quinn::crypto::rustls::QuicClientConfig;
#[cfg(feature = "http3")]
use webpki_roots::TLS_SERVER_ROOTS;
#[derive(Debug, Error)]
pub enum HttpError {
#[error("invalid url: {0}")]
Url(String),
#[error("request error: {0}")]
Request(String),
#[error("response error: {0}")]
Response(String),
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HttpTiming {
pub total_ms: u128,
pub dns_ms: Option<u128>,
pub connect_ms: Option<u128>,
pub tls_ms: Option<u128>,
pub ttfb_ms: Option<u128>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HttpReport {
pub url: String,
pub final_url: Option<String>,
pub method: String,
pub status: Option<u16>,
pub http_version: Option<String>,
pub resolved_ips: Vec<String>,
pub headers: Vec<(String, String)>,
pub body: Option<String>,
pub warnings: Vec<String>,
pub timing: HttpTiming,
}
#[derive(Debug, Clone, Copy)]
pub enum HttpMethod {
Head,
Get,
}
impl HttpMethod {
fn to_reqwest(self) -> Method {
match self {
HttpMethod::Head => Method::HEAD,
HttpMethod::Get => Method::GET,
}
}
}
#[derive(Debug, Clone)]
pub struct HttpRequestOptions {
pub method: HttpMethod,
pub timeout_ms: u64,
pub follow_redirects: Option<u32>,
pub max_body_bytes: usize,
pub show_headers: bool,
pub show_body: bool,
pub http1_only: bool,
pub http2_only: bool,
pub http3: bool,
pub http3_only: bool,
pub proxy: Option<String>,
}
pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport, HttpError> {
debug!(
url,
method = ?opts.method,
timeout_ms = opts.timeout_ms,
follow_redirects = ?opts.follow_redirects,
http1_only = opts.http1_only,
http2_only = opts.http2_only,
proxy = ?opts.proxy,
"http request start"
);
let parsed = Url::parse(url).map_err(|err| HttpError::Url(err.to_string()))?;
let host = parsed
.host_str()
.ok_or_else(|| HttpError::Url("missing host".to_string()))?;
let port = parsed
.port_or_known_default()
.ok_or_else(|| HttpError::Url("missing port".to_string()))?;
let mut resolved_ips = Vec::new();
let dns_start = Instant::now();
if let Ok(ip) = host.parse::<IpAddr>() {
resolved_ips.push(ip.to_string());
} else {
let addrs = lookup_host((host, port))
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
for addr in addrs {
resolved_ips.push(addr.ip().to_string());
}
resolved_ips.sort();
resolved_ips.dedup();
if resolved_ips.is_empty() {
return Err(HttpError::Request("no addresses resolved".to_string()));
}
}
let dns_ms = dns_start.elapsed().as_millis();
let mut warnings = Vec::new();
if opts.http3 || opts.http3_only {
if !cfg!(feature = "http3") {
warnings.push("http3 feature not enabled in build".to_string());
if opts.http3_only {
return Err(HttpError::Request(
"http3-only requested but feature is not enabled".to_string(),
));
}
}
}
#[cfg(feature = "http3")]
{
if opts.http3 || opts.http3_only {
match http3_request(url, &opts, &resolved_ips, dns_ms).await {
Ok((report, mut h3_warnings)) => {
warnings.append(&mut h3_warnings);
return Ok(HttpReport {
warnings,
..report
});
}
Err(err) => {
let err_string = err.to_string();
let category = classify_http3_error(&err_string);
warnings.push(format!(
"http3 failed (category={category}): {err_string}"
));
if opts.http3_only {
return Err(err);
}
}
}
}
}
let mut builder = Client::builder().timeout(Duration::from_millis(opts.timeout_ms));
builder = if let Some(max) = opts.follow_redirects {
builder.redirect(reqwest::redirect::Policy::limited(max as usize))
} else {
builder.redirect(reqwest::redirect::Policy::none())
};
if let Some(proxy) = opts.proxy.as_ref() {
let proxy = Proxy::all(proxy).map_err(|err| HttpError::Request(err.to_string()))?;
builder = builder.proxy(proxy);
}
if opts.http1_only {
builder = builder.http1_only();
}
if opts.http2_only {
builder = builder.http2_prior_knowledge();
}
if let Some(first) = resolved_ips.first() {
if let Ok(ip) = first.parse::<IpAddr>() {
let addr = SocketAddr::new(ip, port);
builder = builder.resolve(host, addr);
}
}
let client = builder.build().map_err(|err| HttpError::Request(err.to_string()))?;
let (connect_ms, tls_ms, timing_warnings) = measure_connect_tls(
&parsed,
host,
port,
&resolved_ips,
opts.proxy.as_deref(),
opts.timeout_ms,
)
.await;
warnings.extend(timing_warnings);
let start = Instant::now();
let response = client
.request(opts.method.to_reqwest(), parsed.clone())
.send()
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
let ttfb_ms = start.elapsed().as_millis();
let status = response.status();
let final_url = response.url().to_string();
let version = response.version();
let headers = if opts.show_headers {
response
.headers()
.iter()
.map(|(name, value)| {
let value = value.to_str().unwrap_or("-").to_string();
(name.to_string(), value)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let body = if opts.show_body {
let bytes = response
.bytes()
.await
.map_err(|err| HttpError::Response(err.to_string()))?;
let sliced = if bytes.len() > opts.max_body_bytes {
&bytes[..opts.max_body_bytes]
} else {
&bytes
};
Some(String::from_utf8_lossy(sliced).to_string())
} else {
None
};
let total_ms = start.elapsed().as_millis();
Ok(HttpReport {
url: url.to_string(),
final_url: Some(final_url),
method: match opts.method {
HttpMethod::Head => "HEAD".to_string(),
HttpMethod::Get => "GET".to_string(),
},
status: status_code(status),
http_version: Some(format!("{version:?}")),
resolved_ips,
headers,
body,
warnings,
timing: HttpTiming {
total_ms,
dns_ms: Some(dns_ms),
connect_ms,
tls_ms,
ttfb_ms: Some(ttfb_ms),
},
})
}
fn status_code(status: StatusCode) -> Option<u16> {
Some(status.as_u16())
}
struct Socks5Proxy {
addr: String,
remote_dns: bool,
}
fn parse_socks5_proxy(value: &str) -> Result<Socks5Proxy, HttpError> {
let url = Url::parse(value).map_err(|err| HttpError::Request(err.to_string()))?;
let scheme = url.scheme();
let remote_dns = match scheme {
"socks5" => false,
"socks5h" => true,
_ => {
return Err(HttpError::Request(format!(
"unsupported proxy scheme: {scheme}"
)))
}
};
let host = url
.host_str()
.ok_or_else(|| HttpError::Request("invalid proxy host".to_string()))?;
let port = url
.port_or_known_default()
.ok_or_else(|| HttpError::Request("invalid proxy port".to_string()))?;
Ok(Socks5Proxy {
addr: format!("{host}:{port}"),
remote_dns,
})
}
async fn measure_connect_tls(
parsed: &Url,
host: &str,
port: u16,
resolved_ips: &[String],
proxy: Option<&str>,
timeout_ms: u64,
) -> (Option<u128>, Option<u128>, Vec<String>) {
let mut warnings = Vec::new();
let scheme = parsed.scheme();
if scheme != "http" && scheme != "https" {
warnings.push(format!("timing unavailable for scheme: {scheme}"));
return (None, None, warnings);
}
let timeout_dur = Duration::from_millis(timeout_ms);
let connect_start = Instant::now();
let tcp = if let Some(proxy) = proxy {
match parse_socks5_proxy(proxy) {
Ok(proxy) => {
let target = if proxy.remote_dns {
(host, port)
} else if let Some(ip) = resolved_ips.first() {
(ip.as_str(), port)
} else {
warnings.push("no resolved IPs for proxy connect".to_string());
return (None, None, warnings);
};
match timeout(timeout_dur, Socks5Stream::connect(proxy.addr.as_str(), target))
.await
{
Ok(Ok(stream)) => stream.into_inner(),
Ok(Err(err)) => {
warnings.push(format!("proxy connect failed: {err}"));
return (None, None, warnings);
}
Err(_) => {
warnings.push("proxy connect timed out".to_string());
return (None, None, warnings);
}
}
}
Err(err) => {
warnings.push(format!("proxy timing skipped: {err}"));
return (None, None, warnings);
}
}
} else {
let addr = match resolved_ips.first().and_then(|ip| ip.parse::<IpAddr>().ok()) {
Some(ip) => SocketAddr::new(ip, port),
None => {
warnings.push("no resolved IPs for connect timing".to_string());
return (None, None, warnings);
}
};
match timeout(timeout_dur, tokio::net::TcpStream::connect(addr)).await {
Ok(Ok(stream)) => stream,
Ok(Err(err)) => {
warnings.push(format!("connect failed: {err}"));
return (None, None, warnings);
}
Err(_) => {
warnings.push("connect timed out".to_string());
return (None, None, warnings);
}
}
};
let connect_ms = connect_start.elapsed().as_millis();
if scheme == "http" {
return (Some(connect_ms), None, warnings);
}
let tls_start = Instant::now();
let tls = match build_tls_connector() {
Ok(connector) => connector,
Err(err) => {
warnings.push(format!("tls timing skipped: {err}"));
return (Some(connect_ms), None, warnings);
}
};
let server_name = match ServerName::try_from(host) {
Ok(name) => name,
Err(_) => {
warnings.push("invalid tls server name".to_string());
return (Some(connect_ms), None, warnings);
}
};
match timeout(timeout_dur, tls.connect(server_name, tcp)).await {
Ok(Ok(_)) => {}
Ok(Err(err)) => {
warnings.push(format!("tls handshake failed: {err}"));
return (Some(connect_ms), None, warnings);
}
Err(_) => {
warnings.push("tls handshake timed out".to_string());
return (Some(connect_ms), None, warnings);
}
}
let tls_ms = tls_start.elapsed().as_millis();
(Some(connect_ms), Some(tls_ms), warnings)
}
fn build_tls_connector() -> Result<TlsConnector, HttpError> {
let mut roots = RootCertStore::empty();
let store = rustls_native_certs::load_native_certs()
.map_err(|err| HttpError::Request(err.to_string()))?;
for cert in store {
roots
.add(&Certificate(cert.0))
.map_err(|err| HttpError::Request(err.to_string()))?;
}
let config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
Ok(TlsConnector::from(Arc::new(config)))
}
#[cfg(feature = "http3")]
fn classify_http3_error(message: &str) -> &'static str {
let message = message.to_ascii_lowercase();
if message.contains("timeout") || message.contains("timed out") {
return "timeout";
}
if message.contains("no resolved ips") || message.contains("no addresses resolved") {
return "resolve";
}
if message.contains("udp") && message.contains("blocked") {
return "udp_blocked";
}
if message.contains("quic") || message.contains("connection refused") {
return "connect";
}
if message.contains("alpn") || message.contains("application protocol") {
return "alpn";
}
if message.contains("tls")
|| message.contains("certificate")
|| message.contains("crypto")
|| message.contains("handshake")
{
return "tls";
}
if message.contains("permission denied") || message.contains("access is denied") {
return "permission";
}
"unknown"
}
#[cfg(feature = "http3")]
async fn http3_request(
url: &str,
opts: &HttpRequestOptions,
resolved_ips: &[String],
dns_ms: u128,
) -> Result<(HttpReport, Vec<String>), HttpError> {
let mut warnings = Vec::new();
let parsed = Url::parse(url).map_err(|err| HttpError::Url(err.to_string()))?;
if parsed.scheme() != "https" {
return Err(HttpError::Request("http3 requires https scheme".to_string()));
}
if opts.proxy.is_some() {
return Err(HttpError::Request(
"http3 proxying is not supported".to_string(),
));
}
let host = parsed
.host_str()
.ok_or_else(|| HttpError::Url("missing host".to_string()))?;
let port = parsed
.port_or_known_default()
.ok_or_else(|| HttpError::Url("missing port".to_string()))?;
let quinn_config = build_quinn_config()?;
let candidates = resolved_ips
.iter()
.filter_map(|value| value.parse::<IpAddr>().ok())
.collect::<Vec<_>>();
if candidates.is_empty() {
return Err(HttpError::Request("no resolved IPs for http3".to_string()));
}
let mut endpoint_guard = None;
let mut connection = None;
let mut connect_ms = None;
for ip in candidates {
let bind_addr = match ip {
IpAddr::V4(_) => "0.0.0.0:0",
IpAddr::V6(_) => "[::]:0",
};
let mut endpoint = Endpoint::client(bind_addr.parse().unwrap())
.map_err(|err| HttpError::Request(err.to_string()))?;
endpoint.set_default_client_config(quinn_config.clone());
let connect_start = Instant::now();
let connecting = match endpoint.connect(SocketAddr::new(ip, port), host) {
Ok(connecting) => connecting,
Err(err) => {
warnings.push(format!("http3 connect failed to {ip}: {err}"));
continue;
}
};
match timeout(Duration::from_millis(opts.timeout_ms), connecting).await {
Ok(Ok(conn)) => {
connect_ms = Some(connect_start.elapsed().as_millis());
connection = Some(conn);
endpoint_guard = Some(endpoint);
break;
}
Ok(Err(err)) => {
warnings.push(format!("http3 connect failed to {ip}: {err}"));
}
Err(_) => {
warnings.push(format!("http3 connect to {ip} timed out"));
}
}
}
let connection = connection.ok_or_else(|| {
HttpError::Request("http3 connect failed for all resolved IPs".to_string())
})?;
let connect_ms = connect_ms.unwrap_or_default();
let conn = h3_quinn::Connection::new(connection);
let (mut driver, mut send_request) = h3::client::new(conn)
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
tokio::spawn(async move {
let _ = driver.wait_idle().await;
});
let start = Instant::now();
let method = match opts.method {
HttpMethod::Head => http::Method::HEAD,
HttpMethod::Get => http::Method::GET,
};
let request = Request::builder()
.method(method)
.uri(parsed.as_str())
.header("user-agent", "wtfnet")
.body(())
.map_err(|err| HttpError::Request(err.to_string()))?;
let mut stream = send_request
.send_request(request)
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
stream
.finish()
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
let response = stream
.recv_response()
.await
.map_err(|err| HttpError::Response(err.to_string()))?;
let ttfb_ms = start.elapsed().as_millis();
let status = response.status();
let final_url = parsed.to_string();
let headers = if opts.show_headers {
response
.headers()
.iter()
.map(|(name, value)| {
let value = value.to_str().unwrap_or("-").to_string();
(name.to_string(), value)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let body = if opts.show_body {
let mut buf = Vec::new();
while let Some(chunk) = stream
.recv_data()
.await
.map_err(|err| HttpError::Response(err.to_string()))?
{
let mut chunk = chunk;
while chunk.has_remaining() {
let bytes = chunk.copy_to_bytes(chunk.remaining());
buf.extend_from_slice(&bytes);
}
if buf.len() >= opts.max_body_bytes {
buf.truncate(opts.max_body_bytes);
break;
}
}
Some(String::from_utf8_lossy(&buf).to_string())
} else {
None
};
let total_ms = start.elapsed().as_millis();
warnings.push("http3 timing for tls/connect is best-effort".to_string());
let _endpoint_guard = endpoint_guard;
let report = HttpReport {
url: url.to_string(),
final_url: Some(final_url),
method: match opts.method {
HttpMethod::Head => "HEAD".to_string(),
HttpMethod::Get => "GET".to_string(),
},
status: Some(status.as_u16()),
http_version: Some("HTTP/3".to_string()),
resolved_ips: resolved_ips.to_vec(),
headers,
body,
warnings: Vec::new(),
timing: HttpTiming {
total_ms,
dns_ms: Some(dns_ms),
connect_ms: Some(connect_ms),
tls_ms: None,
ttfb_ms: Some(ttfb_ms),
},
};
Ok((report, warnings))
}
#[cfg(feature = "http3")]
fn build_quinn_config() -> Result<QuinnClientConfig, HttpError> {
let mut roots = quinn::rustls::RootCertStore::empty();
roots.extend(TLS_SERVER_ROOTS.iter().cloned());
let mut crypto = quinn::rustls::ClientConfig::builder()
.with_root_certificates(roots)
.with_no_client_auth();
crypto.alpn_protocols = vec![b"h3".to_vec()];
let mut client_config = QuinnClientConfig::new(Arc::new(
QuicClientConfig::try_from(crypto)
.map_err(|err| HttpError::Request(format!("quinn config error: {err}")))?,
));
let mut transport = quinn::TransportConfig::default();
transport.keep_alive_interval(Some(Duration::from_secs(5)));
client_config.transport_config(Arc::new(transport));
Ok(client_config)
}

View File

@@ -2,11 +2,13 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig}; use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use sha2::Digest; use sha2::Digest;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
use wtfnet_core::ErrorCode; use wtfnet_core::ErrorCode;
use wtfnet_platform::{ use wtfnet_platform::{
CertProvider, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider, NetInterface, CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider, FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
}; };
use x509_parser::oid_registry::{ use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256, OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -19,6 +21,7 @@ pub fn platform() -> Platform {
ports: Arc::new(LinuxPortsProvider), ports: Arc::new(LinuxPortsProvider),
cert: Arc::new(LinuxCertProvider), cert: Arc::new(LinuxCertProvider),
neigh: Arc::new(LinuxNeighProvider), neigh: Arc::new(LinuxNeighProvider),
flow_owner: Arc::new(LinuxFlowOwnerProvider),
} }
} }
@@ -26,6 +29,7 @@ struct LinuxSysProvider;
struct LinuxPortsProvider; struct LinuxPortsProvider;
struct LinuxCertProvider; struct LinuxCertProvider;
struct LinuxNeighProvider; struct LinuxNeighProvider;
struct LinuxFlowOwnerProvider;
#[async_trait] #[async_trait]
impl SysProvider for LinuxSysProvider { impl SysProvider for LinuxSysProvider {
@@ -240,6 +244,63 @@ fn parse_linux_tcp_with_inode_map(
Ok(sockets) Ok(sockets)
} }
fn parse_linux_tcp_conns(
path: &str,
is_v6: bool,
inode_map: &HashMap<String, ProcInfo>,
) -> Result<Vec<ConnSocket>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut sockets = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = parts[1];
let remote = parts[2];
let state = parts[3];
let inode = parts.get(9).copied();
if state == "0A" {
continue;
}
let local_addr = match parse_proc_socket_addr(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
let (pid, ppid, process_name, process_path) =
inode.and_then(|value| inode_map.get(value)).map_or(
(None, None, None, None),
|info| {
(
Some(info.pid),
info.ppid,
info.name.clone(),
info.path.clone(),
)
},
);
sockets.push(ConnSocket {
proto: "tcp".to_string(),
local_addr,
remote_addr,
state: Some(map_tcp_state(state)),
pid,
ppid,
process_name,
process_path,
});
}
Ok(sockets)
}
fn parse_linux_udp_with_inode_map( fn parse_linux_udp_with_inode_map(
path: &str, path: &str,
is_v6: bool, is_v6: bool,
@@ -286,6 +347,24 @@ fn parse_linux_udp_with_inode_map(
Ok(sockets) Ok(sockets)
} }
fn map_tcp_state(value: &str) -> String {
match value {
"01" => "ESTABLISHED",
"02" => "SYN_SENT",
"03" => "SYN_RECV",
"04" => "FIN_WAIT1",
"05" => "FIN_WAIT2",
"06" => "TIME_WAIT",
"07" => "CLOSE",
"08" => "CLOSE_WAIT",
"09" => "LAST_ACK",
"0A" => "LISTEN",
"0B" => "CLOSING",
_ => "UNKNOWN",
}
.to_string()
}
fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> { fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
let mut parts = value.split(':'); let mut parts = value.split(':');
let addr_hex = parts.next()?; let addr_hex = parts.next()?;
@@ -300,6 +379,20 @@ fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
} }
} }
fn parse_proc_socket_addr_value(value: &str, is_v6: bool) -> Option<SocketAddr> {
let mut parts = value.split(':');
let addr_hex = parts.next()?;
let port_hex = parts.next()?;
let port = u16::from_str_radix(port_hex, 16).ok()?;
if is_v6 {
let addr = parse_ipv6_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V6(addr), port))
} else {
let addr = parse_ipv4_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V4(addr), port))
}
}
fn parse_linux_arp(contents: &str) -> Vec<NeighborEntry> { fn parse_linux_arp(contents: &str) -> Vec<NeighborEntry> {
let mut neighbors = Vec::new(); let mut neighbors = Vec::new();
for (idx, line) in contents.lines().enumerate() { for (idx, line) in contents.lines().enumerate() {
@@ -407,6 +500,138 @@ fn read_ppid(pid: u32) -> Option<u32> {
Some(ppid) Some(ppid)
} }
#[derive(Clone)]
struct ProcSocketEntry {
local: SocketAddr,
remote: SocketAddr,
inode: String,
}
fn parse_proc_socket_entries(
path: &str,
is_v6: bool,
) -> Result<Vec<ProcSocketEntry>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut entries = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
continue;
}
let local = parts[1];
let remote = parts[2];
let inode = match parts.get(9) {
Some(value) => (*value).to_string(),
None => continue,
};
let local_addr = match parse_proc_socket_addr_value(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr_value(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
entries.push(ProcSocketEntry {
local: local_addr,
remote: remote_addr,
inode,
});
}
Ok(entries)
}
fn match_flow_entry<'a>(
flow: &FlowTuple,
entries: &'a [ProcSocketEntry],
match_remote: bool,
) -> Option<(&'a ProcSocketEntry, FlowOwnerConfidence)> {
for entry in entries {
let local_match = entry.local.port() == flow.src_port
&& (entry.local.ip() == flow.src_ip
|| entry.local.ip().is_unspecified()
|| entry.local.ip().is_loopback() && flow.src_ip.is_loopback());
if !local_match {
continue;
}
if match_remote {
let remote_match = entry.remote.port() == flow.dst_port
&& (entry.remote.ip() == flow.dst_ip
|| entry.remote.ip().is_unspecified());
if remote_match {
return Some((entry, FlowOwnerConfidence::High));
}
} else {
return Some((entry, FlowOwnerConfidence::Medium));
}
}
None
}
fn resolve_flow_owner(
flow: &FlowTuple,
) -> Result<FlowOwnerResult, PlatformError> {
let inode_map = build_inode_map();
let entries = match flow.proto {
FlowProtocol::Tcp => {
let mut out = parse_proc_socket_entries("/proc/net/tcp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/tcp6", true)?);
out
}
FlowProtocol::Udp => {
let mut out = parse_proc_socket_entries("/proc/net/udp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/udp6", true)?);
out
}
};
let match_remote = matches!(flow.proto, FlowProtocol::Tcp);
let matched = match_flow_entry(flow, &entries, match_remote)
.or_else(|| {
if matches!(flow.proto, FlowProtocol::Udp) {
match_flow_entry(flow, &entries, false)
} else {
None
}
});
let (entry, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let owner = inode_map.get(&entry.inode).map(|info| FlowOwner {
pid: Some(info.pid),
ppid: info.ppid,
process_name: info.name.clone(),
process_path: info.path.clone(),
});
if owner.is_none() {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::Low,
failure_reason: Some("socket owner not found".to_string()),
});
}
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> { fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs() let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?; .map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -518,6 +743,22 @@ impl PortsProvider for LinuxPortsProvider {
.filter(|socket| extract_port(&socket.local_addr) == Some(port)) .filter(|socket| extract_port(&socket.local_addr) == Some(port))
.collect()) .collect())
} }
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError> {
let inode_map = build_inode_map();
let mut sockets = Vec::new();
sockets.extend(parse_linux_tcp_conns(
"/proc/net/tcp",
false,
&inode_map,
)?);
sockets.extend(parse_linux_tcp_conns(
"/proc/net/tcp6",
true,
&inode_map,
)?);
Ok(sockets)
}
} }
#[async_trait] #[async_trait]
@@ -535,3 +776,10 @@ impl NeighProvider for LinuxNeighProvider {
Ok(parse_linux_arp(&contents)) Ok(parse_linux_arp(&contents))
} }
} }
#[async_trait]
impl FlowOwnerProvider for LinuxFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -2,6 +2,7 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig}; use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use regex::Regex; use regex::Regex;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use sha2::Digest; use sha2::Digest;
use x509_parser::oid_registry::{ use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256, OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -10,8 +11,9 @@ use x509_parser::oid_registry::{
use std::sync::Arc; use std::sync::Arc;
use wtfnet_core::ErrorCode; use wtfnet_core::ErrorCode;
use wtfnet_platform::{ use wtfnet_platform::{
CertProvider, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider, NetInterface, CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider, FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
}; };
pub fn platform() -> Platform { pub fn platform() -> Platform {
@@ -20,6 +22,7 @@ pub fn platform() -> Platform {
ports: Arc::new(WindowsPortsProvider), ports: Arc::new(WindowsPortsProvider),
cert: Arc::new(WindowsCertProvider), cert: Arc::new(WindowsCertProvider),
neigh: Arc::new(WindowsNeighProvider), neigh: Arc::new(WindowsNeighProvider),
flow_owner: Arc::new(WindowsFlowOwnerProvider),
} }
} }
@@ -27,6 +30,7 @@ struct WindowsSysProvider;
struct WindowsPortsProvider; struct WindowsPortsProvider;
struct WindowsCertProvider; struct WindowsCertProvider;
struct WindowsNeighProvider; struct WindowsNeighProvider;
struct WindowsFlowOwnerProvider;
#[async_trait] #[async_trait]
impl SysProvider for WindowsSysProvider { impl SysProvider for WindowsSysProvider {
@@ -333,6 +337,33 @@ fn parse_windows_listeners() -> Result<Vec<ListenSocket>, PlatformError> {
Ok(sockets) Ok(sockets)
} }
fn parse_windows_connections() -> Result<Vec<ConnSocket>, PlatformError> {
let proc_map = load_windows_process_map();
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut sockets = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if !trimmed.starts_with("TCP") {
continue;
}
if let Some(mut socket) = parse_netstat_tcp_conn_line(trimmed) {
enrich_conn_socket(&mut socket, &proc_map);
sockets.push(socket);
}
}
Ok(sockets)
}
fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> { fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> {
let parts: Vec<&str> = line.split_whitespace().collect(); let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 { if parts.len() < 5 {
@@ -358,6 +389,32 @@ fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> {
}) })
} }
fn parse_netstat_tcp_conn_line(line: &str) -> Option<ConnSocket> {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
return None;
}
let local = parts[1];
let remote = parts[2];
let state = parts[3];
let pid = parts[4].parse::<u32>().ok();
if state == "LISTENING" {
return None;
}
Some(ConnSocket {
proto: "tcp".to_string(),
local_addr: local.to_string(),
remote_addr: remote.to_string(),
state: Some(state.to_string()),
pid,
ppid: None,
process_name: None,
process_path: None,
})
}
fn parse_netstat_udp_line(line: &str) -> Option<ListenSocket> { fn parse_netstat_udp_line(line: &str) -> Option<ListenSocket> {
let parts: Vec<&str> = line.split_whitespace().collect(); let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 { if parts.len() < 4 {
@@ -429,6 +486,17 @@ fn enrich_socket(socket: &mut ListenSocket, map: &HashMap<u32, ProcInfo>) {
} }
} }
fn enrich_conn_socket(socket: &mut ConnSocket, map: &HashMap<u32, ProcInfo>) {
let pid = match socket.pid {
Some(pid) => pid,
None => return,
};
if let Some(info) = map.get(&pid) {
socket.process_name = info.name.clone();
socket.process_path = info.path.clone();
}
}
#[derive(Clone)] #[derive(Clone)]
struct ProcInfo { struct ProcInfo {
name: Option<String>, name: Option<String>,
@@ -515,6 +583,155 @@ fn parse_csv_line(line: &str) -> Vec<String> {
out out
} }
#[derive(Clone)]
struct FlowEntry {
proto: FlowProtocol,
local: SocketAddr,
remote: Option<SocketAddr>,
pid: u32,
}
fn parse_netstat_flow_entries() -> Result<Vec<FlowEntry>, PlatformError> {
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut entries = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if trimmed.starts_with("TCP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 5 {
continue;
}
let state = parts[3];
if state == "LISTENING" {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let remote = match parse_netstat_addr(parts[2]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[4].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Tcp,
local,
remote: Some(remote),
pid,
});
} else if trimmed.starts_with("UDP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[3].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Udp,
local,
remote: None,
pid,
});
}
}
Ok(entries)
}
fn parse_netstat_addr(value: &str) -> Option<SocketAddr> {
let value = value.trim();
if value == "*:*" {
return None;
}
if let Some(rest) = value.strip_prefix('[') {
let end = rest.find(']')?;
let host = &rest[..end];
let port = rest[end + 2..].parse::<u16>().ok()?;
let host = host.split('%').next().unwrap_or(host);
let ip: IpAddr = host.parse().ok()?;
return Some(SocketAddr::new(ip, port));
}
let pos = value.rfind(':')?;
let host = &value[..pos];
let port = value[pos + 1..].parse::<u16>().ok()?;
let ip: IpAddr = host.parse().ok()?;
Some(SocketAddr::new(ip, port))
}
fn resolve_flow_owner(flow: &FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
let entries = parse_netstat_flow_entries()?;
let proc_map = load_windows_process_map();
let mut matched: Option<(u32, FlowOwnerConfidence)> = None;
for entry in entries {
if entry.proto != flow.proto {
continue;
}
let local_match = entry.local.ip() == flow.src_ip && entry.local.port() == flow.src_port;
if !local_match {
continue;
}
match flow.proto {
FlowProtocol::Tcp => {
if let Some(remote) = entry.remote {
if remote.ip() == flow.dst_ip && remote.port() == flow.dst_port {
matched = Some((entry.pid, FlowOwnerConfidence::High));
break;
}
}
}
FlowProtocol::Udp => {
matched = Some((entry.pid, FlowOwnerConfidence::Medium));
break;
}
}
}
let (pid, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let info = proc_map.get(&pid);
let owner = Some(FlowOwner {
pid: Some(pid),
ppid: None,
process_name: info.and_then(|value| value.name.clone()),
process_path: info.and_then(|value| value.path.clone()),
});
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> { fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs() let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?; .map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -605,6 +822,10 @@ impl PortsProvider for WindowsPortsProvider {
.filter(|socket| extract_port(&socket.local_addr) == Some(port)) .filter(|socket| extract_port(&socket.local_addr) == Some(port))
.collect()) .collect())
} }
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError> {
parse_windows_connections()
}
} }
#[async_trait] #[async_trait]
@@ -628,3 +849,10 @@ impl NeighProvider for WindowsNeighProvider {
Ok(parse_arp_output(&text)) Ok(parse_arp_output(&text))
} }
} }
#[async_trait]
impl FlowOwnerProvider for WindowsFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -1,5 +1,6 @@
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use std::sync::Arc; use std::sync::Arc;
use wtfnet_core::ErrorCode; use wtfnet_core::ErrorCode;
@@ -46,6 +47,18 @@ pub struct ListenSocket {
pub owner: Option<String>, pub owner: Option<String>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnSocket {
pub proto: String,
pub local_addr: String,
pub remote_addr: String,
pub state: Option<String>,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RootCert { pub struct RootCert {
pub subject: String, pub subject: String,
@@ -68,6 +81,46 @@ pub struct NeighborEntry {
pub state: Option<String>, pub state: Option<String>,
} }
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum FlowProtocol {
Udp,
Tcp,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum FlowOwnerConfidence {
High,
Medium,
Low,
None,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwner {
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwnerResult {
pub owner: Option<FlowOwner>,
pub confidence: FlowOwnerConfidence,
pub failure_reason: Option<String>,
}
#[derive(Debug, Clone)]
pub struct FlowTuple {
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct PlatformError { pub struct PlatformError {
pub code: ErrorCode, pub code: ErrorCode,
@@ -98,6 +151,7 @@ pub trait SysProvider: Send + Sync {
pub trait PortsProvider: Send + Sync { pub trait PortsProvider: Send + Sync {
async fn listening(&self) -> Result<Vec<ListenSocket>, PlatformError>; async fn listening(&self) -> Result<Vec<ListenSocket>, PlatformError>;
async fn who_owns(&self, port: u16) -> Result<Vec<ListenSocket>, PlatformError>; async fn who_owns(&self, port: u16) -> Result<Vec<ListenSocket>, PlatformError>;
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError>;
} }
#[async_trait] #[async_trait]
@@ -110,9 +164,15 @@ pub trait NeighProvider: Send + Sync {
async fn neighbors(&self) -> Result<Vec<NeighborEntry>, PlatformError>; async fn neighbors(&self) -> Result<Vec<NeighborEntry>, PlatformError>;
} }
#[async_trait]
pub trait FlowOwnerProvider: Send + Sync {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError>;
}
pub struct Platform { pub struct Platform {
pub sys: Arc<dyn SysProvider>, pub sys: Arc<dyn SysProvider>,
pub ports: Arc<dyn PortsProvider>, pub ports: Arc<dyn PortsProvider>,
pub cert: Arc<dyn CertProvider>, pub cert: Arc<dyn CertProvider>,
pub neigh: Arc<dyn NeighProvider>, pub neigh: Arc<dyn NeighProvider>,
pub flow_owner: Arc<dyn FlowOwnerProvider>,
} }

View File

@@ -12,3 +12,7 @@ tokio = { version = "1", features = ["net", "time"] }
surge-ping = "0.8" surge-ping = "0.8"
wtfnet-geoip = { path = "../wtfnet-geoip" } wtfnet-geoip = { path = "../wtfnet-geoip" }
libc = "0.2" libc = "0.2"
tokio-socks = "0.5"
url = "2"
tracing = "0.1"
hickory-resolver = { version = "0.24", features = ["system-config"] }

View File

@@ -13,12 +13,20 @@ use pnet::transport::{
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use socket2::{Domain, Protocol, Socket, Type}; use socket2::{Domain, Protocol, Socket, Type};
use std::collections::{HashMap, HashSet};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
#[cfg(unix)]
use std::mem::size_of_val; use std::mem::size_of_val;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
use hickory_resolver::system_conf::read_system_conf;
use hickory_resolver::TokioAsyncResolver;
use thiserror::Error; use thiserror::Error;
use tokio::net::{TcpStream, lookup_host}; use tokio::net::{TcpStream, lookup_host};
use tokio::time::timeout; use tokio::time::timeout;
use tokio_socks::tcp::Socks5Stream;
use tracing::debug;
use url::Url;
use wtfnet_geoip::GeoIpRecord; use wtfnet_geoip::GeoIpRecord;
#[derive(Debug, Error)] #[derive(Debug, Error)]
@@ -27,6 +35,10 @@ pub enum ProbeError {
Resolve(String), Resolve(String),
#[error("io error: {0}")] #[error("io error: {0}")]
Io(String), Io(String),
#[error("invalid proxy: {0}")]
InvalidProxy(String),
#[error("proxy error: {0}")]
Proxy(String),
#[error("timeout")] #[error("timeout")]
Timeout, Timeout,
#[error("ping error: {0}")] #[error("ping error: {0}")]
@@ -85,6 +97,12 @@ pub struct TraceHop {
pub ttl: u8, pub ttl: u8,
pub addr: Option<String>, pub addr: Option<String>,
pub rtt_ms: Option<u128>, pub rtt_ms: Option<u128>,
pub rtt_samples: Vec<Option<u128>>,
pub min_ms: Option<u128>,
pub avg_ms: Option<f64>,
pub max_ms: Option<u128>,
pub loss_pct: f64,
pub rdns: Option<String>,
pub note: Option<String>, pub note: Option<String>,
pub geoip: Option<GeoIpRecord>, pub geoip: Option<GeoIpRecord>,
} }
@@ -97,6 +115,8 @@ pub struct TraceReport {
pub port: u16, pub port: u16,
pub max_hops: u8, pub max_hops: u8,
pub timeout_ms: u64, pub timeout_ms: u64,
pub per_hop: u32,
pub rdns: bool,
pub protocol: String, pub protocol: String,
pub hops: Vec<TraceHop>, pub hops: Vec<TraceHop>,
} }
@@ -107,7 +127,15 @@ pub async fn ping(
timeout_ms: u64, timeout_ms: u64,
interval_ms: u64, interval_ms: u64,
) -> Result<PingReport, ProbeError> { ) -> Result<PingReport, ProbeError> {
debug!(
target,
count,
timeout_ms,
interval_ms,
"probe ping start"
);
let addr = resolve_one(target).await?; let addr = resolve_one(target).await?;
debug!(ip = %addr, "probe ping resolved");
let mut results = Vec::new(); let mut results = Vec::new();
let mut received = 0u32; let mut received = 0u32;
let mut min = None; let mut min = None;
@@ -183,9 +211,45 @@ pub async fn tcp_ping(
port: u16, port: u16,
count: u32, count: u32,
timeout_ms: u64, timeout_ms: u64,
proxy: Option<&str>,
prefer_ipv4: bool,
) -> Result<TcpPingReport, ProbeError> { ) -> Result<TcpPingReport, ProbeError> {
let addr = resolve_one(target).await?; debug!(
let socket_addr = SocketAddr::new(addr, port); target,
port,
count,
timeout_ms,
proxy = ?proxy,
prefer_ipv4,
"probe tcp ping start"
);
let (report_ip, target_host, proxy_addr) = if let Some(proxy) = proxy {
let proxy = parse_socks5_proxy(proxy)?;
if proxy.remote_dns {
(None, target.to_string(), proxy.addr)
} else {
let addr = if prefer_ipv4 {
resolve_one_prefer_ipv4(target).await?
} else {
resolve_one(target).await?
};
(Some(addr), addr.to_string(), proxy.addr)
}
} else {
let addr = if prefer_ipv4 {
resolve_one_prefer_ipv4(target).await?
} else {
resolve_one(target).await?
};
(Some(addr), addr.to_string(), String::new())
};
let socket_addr = report_ip.map(|addr| SocketAddr::new(addr, port));
debug!(
report_ip = ?report_ip,
target_host = %target_host,
proxy_addr = %proxy_addr,
"probe tcp ping resolved"
);
let timeout_dur = Duration::from_millis(timeout_ms); let timeout_dur = Duration::from_millis(timeout_ms);
let mut results = Vec::new(); let mut results = Vec::new();
let mut received = 0u32; let mut received = 0u32;
@@ -196,9 +260,27 @@ pub async fn tcp_ping(
for seq in 0..count { for seq in 0..count {
let seq = seq as u16; let seq = seq as u16;
let start = Instant::now(); let start = Instant::now();
let attempt = timeout(timeout_dur, TcpStream::connect(socket_addr)).await; let attempt: Result<TcpStream, ProbeError> = if proxy.is_some() {
let target = (target_host.as_str(), port);
let stream = timeout(
timeout_dur,
Socks5Stream::connect(proxy_addr.as_str(), target),
)
.await
.map_err(|_| ProbeError::Timeout)?
.map_err(|err| ProbeError::Proxy(err.to_string()))?;
Ok(stream.into_inner())
} else {
timeout(
timeout_dur,
TcpStream::connect(socket_addr.expect("missing socket addr")),
)
.await
.map_err(|_| ProbeError::Timeout)?
.map_err(|err| ProbeError::Io(err.to_string()))
};
match attempt { match attempt {
Ok(Ok(_stream)) => { Ok(_stream) => {
let rtt = start.elapsed().as_millis(); let rtt = start.elapsed().as_millis();
received += 1; received += 1;
min = Some(min.map_or(rtt, |value: u128| value.min(rtt))); min = Some(min.map_or(rtt, |value: u128| value.min(rtt)));
@@ -210,27 +292,20 @@ pub async fn tcp_ping(
error: None, error: None,
}); });
} }
Ok(Err(err)) => { Err(err) => {
results.push(TcpPingResult { results.push(TcpPingResult {
seq, seq,
rtt_ms: None, rtt_ms: None,
error: Some(err.to_string()), error: Some(err.to_string()),
}); });
} }
Err(_) => {
results.push(TcpPingResult {
seq,
rtt_ms: None,
error: Some("timeout".to_string()),
});
}
} }
} }
let summary = build_summary(count, received, min, max, sum); let summary = build_summary(count, received, min, max, sum);
Ok(TcpPingReport { Ok(TcpPingReport {
target: target.to_string(), target: target.to_string(),
ip: Some(addr.to_string()), ip: report_ip.map(|addr| addr.to_string()),
geoip: None, geoip: None,
port, port,
timeout_ms, timeout_ms,
@@ -245,42 +320,96 @@ pub async fn tcp_trace(
port: u16, port: u16,
max_hops: u8, max_hops: u8,
timeout_ms: u64, timeout_ms: u64,
per_hop: u32,
rdns: bool,
) -> Result<TraceReport, ProbeError> { ) -> Result<TraceReport, ProbeError> {
debug!(
target,
port,
max_hops,
timeout_ms,
"probe tcp trace start"
);
let addr = resolve_one(target).await?; let addr = resolve_one(target).await?;
debug!(ip = %addr, "probe tcp trace resolved");
let socket_addr = SocketAddr::new(addr, port); let socket_addr = SocketAddr::new(addr, port);
let timeout_dur = Duration::from_millis(timeout_ms); let timeout_dur = Duration::from_millis(timeout_ms);
let mut hops = Vec::new(); let mut hops = Vec::new();
let mut rdns_lookup = if rdns {
Some(ReverseDns::new(timeout_dur)?)
} else {
None
};
for ttl in 1..=max_hops { for ttl in 1..=max_hops {
let addr = socket_addr; debug!(ttl, per_hop, "probe tcp trace hop start");
let start = Instant::now(); let mut samples = Vec::new();
let result = let mut last_error = None;
tokio::task::spawn_blocking(move || tcp_connect_with_ttl(addr, ttl, timeout_dur)) for _ in 0..per_hop.max(1) {
.await let addr = socket_addr;
.map_err(|err| ProbeError::Io(err.to_string()))?; let start = Instant::now();
let result =
tokio::task::spawn_blocking(move || tcp_connect_with_ttl(addr, ttl, timeout_dur))
.await
.map_err(|err| ProbeError::Io(err.to_string()))?;
match result { match result {
Ok(()) => { Ok(()) => {
let rtt = start.elapsed().as_millis(); let rtt = start.elapsed().as_millis();
hops.push(TraceHop { debug!(ttl, rtt_ms = rtt, "probe tcp trace hop reply");
ttl, samples.push(Some(rtt));
addr: Some(socket_addr.ip().to_string()), }
rtt_ms: Some(rtt), Err(err) => {
note: None, let message = err.to_string();
geoip: None, debug!(ttl, error = %message, "probe tcp trace hop error");
}); last_error = Some(message);
break; samples.push(None);
}
} }
Err(err) => { }
let rtt = start.elapsed().as_millis();
hops.push(TraceHop { let (min_ms, avg_ms, max_ms, loss_pct) = stats_from_samples(&samples);
ttl, let rtt_ms = avg_ms.map(|value| value.round() as u128);
addr: None, let rdns_name = if rdns {
rtt_ms: Some(rtt), if let Some(lookup) = rdns_lookup.as_mut() {
note: Some(err.to_string()), lookup.lookup(socket_addr.ip()).await
geoip: None, } else {
}); None
} }
} else {
None
};
let note = if loss_pct >= 100.0 {
last_error
} else {
None
};
hops.push(TraceHop {
ttl,
addr: Some(socket_addr.ip().to_string()),
rtt_ms,
rtt_samples: samples,
min_ms,
avg_ms,
max_ms,
loss_pct,
rdns: rdns_name,
note,
geoip: None,
});
debug!(
ttl,
loss_pct,
min_ms = ?min_ms,
avg_ms = ?avg_ms,
max_ms = ?max_ms,
"probe tcp trace hop summary"
);
if loss_pct < 100.0 {
break;
} }
} }
@@ -291,6 +420,8 @@ pub async fn tcp_trace(
port, port,
max_hops, max_hops,
timeout_ms, timeout_ms,
per_hop,
rdns,
protocol: "tcp".to_string(), protocol: "tcp".to_string(),
hops, hops,
}) })
@@ -301,42 +432,117 @@ pub async fn udp_trace(
port: u16, port: u16,
max_hops: u8, max_hops: u8,
timeout_ms: u64, timeout_ms: u64,
per_hop: u32,
rdns: bool,
) -> Result<TraceReport, ProbeError> { ) -> Result<TraceReport, ProbeError> {
debug!(
target,
port,
max_hops,
timeout_ms,
"probe udp trace start"
);
let addr = resolve_one(target).await?; let addr = resolve_one(target).await?;
debug!(ip = %addr, "probe udp trace resolved");
let timeout_dur = Duration::from_millis(timeout_ms); let timeout_dur = Duration::from_millis(timeout_ms);
let mut hops = Vec::new(); let mut hops = Vec::new();
let mut rdns_lookup = if rdns {
Some(ReverseDns::new(timeout_dur)?)
} else {
None
};
for ttl in 1..=max_hops { for ttl in 1..=max_hops {
let addr = SocketAddr::new(addr, port); debug!(ttl, per_hop, "probe udp trace hop start");
let start = Instant::now(); let mut samples = Vec::new();
let result = tokio::task::spawn_blocking(move || udp_trace_hop(addr, ttl, timeout_dur)) let mut hop_addr = None;
.await let mut reached_any = false;
.map_err(|err| ProbeError::Io(err.to_string()))?; let mut last_error = None;
let mut addr_set = HashSet::new();
match result { for _ in 0..per_hop.max(1) {
Ok((hop_addr, reached)) => { let addr = SocketAddr::new(addr, port);
let rtt = start.elapsed().as_millis(); let start = Instant::now();
hops.push(TraceHop { let result = tokio::task::spawn_blocking(move || udp_trace_hop(addr, ttl, timeout_dur))
ttl, .await
addr: hop_addr.map(|ip| ip.to_string()), .map_err(|err| ProbeError::Io(err.to_string()))?;
rtt_ms: Some(rtt),
note: None, match result {
geoip: None, Ok((addr, reached)) => {
}); let rtt = start.elapsed().as_millis();
if reached { debug!(
break; ttl,
addr = ?addr,
rtt_ms = rtt,
reached,
"probe udp trace hop reply"
);
samples.push(Some(rtt));
if let Some(ip) = addr {
addr_set.insert(ip);
if hop_addr.is_none() {
hop_addr = Some(ip);
}
}
if reached {
reached_any = true;
}
}
Err(err) => {
let message = err.to_string();
debug!(ttl, error = %message, "probe udp trace hop error");
last_error = Some(message);
samples.push(None);
} }
} }
Err(err) => { }
hops.push(TraceHop {
ttl, let (min_ms, avg_ms, max_ms, loss_pct) = stats_from_samples(&samples);
addr: None, let rtt_ms = avg_ms.map(|value| value.round() as u128);
rtt_ms: None, let rdns_name = if rdns {
note: Some(err.to_string()), if let (Some(ip), Some(lookup)) = (hop_addr, rdns_lookup.as_mut()) {
geoip: None, lookup.lookup(ip).await
}); } else {
None
} }
} else {
None
};
let note = if loss_pct >= 100.0 {
last_error
} else if addr_set.len() > 1 {
Some("multiple hop addresses".to_string())
} else {
None
};
hops.push(TraceHop {
ttl,
addr: hop_addr.map(|ip| ip.to_string()),
rtt_ms,
rtt_samples: samples,
min_ms,
avg_ms,
max_ms,
loss_pct,
rdns: rdns_name,
note,
geoip: None,
});
debug!(
ttl,
loss_pct,
min_ms = ?min_ms,
avg_ms = ?avg_ms,
max_ms = ?max_ms,
reached_any,
"probe udp trace hop summary"
);
if reached_any {
break;
} }
} }
@@ -347,6 +553,8 @@ pub async fn udp_trace(
port, port,
max_hops, max_hops,
timeout_ms, timeout_ms,
per_hop,
rdns,
protocol: "udp".to_string(), protocol: "udp".to_string(),
hops, hops,
}) })
@@ -379,6 +587,35 @@ fn build_summary(
} }
} }
fn stats_from_samples(
samples: &[Option<u128>],
) -> (Option<u128>, Option<f64>, Option<u128>, f64) {
let mut min = None;
let mut max = None;
let mut sum = 0u128;
let mut received = 0u32;
for sample in samples {
if let Some(rtt) = sample {
received += 1;
min = Some(min.map_or(*rtt, |value: u128| value.min(*rtt)));
max = Some(max.map_or(*rtt, |value: u128| value.max(*rtt)));
sum += *rtt;
}
}
let sent = samples.len() as u32;
let loss_pct = if sent == 0 {
0.0
} else {
((sent - received) as f64 / sent as f64) * 100.0
};
let avg_ms = if received == 0 {
None
} else {
Some(sum as f64 / received as f64)
};
(min, avg_ms, max, loss_pct)
}
async fn resolve_one(target: &str) -> Result<IpAddr, ProbeError> { async fn resolve_one(target: &str) -> Result<IpAddr, ProbeError> {
let mut iter = lookup_host((target, 0)) let mut iter = lookup_host((target, 0))
.await .await
@@ -388,6 +625,84 @@ async fn resolve_one(target: &str) -> Result<IpAddr, ProbeError> {
.ok_or_else(|| ProbeError::Resolve("no address found".to_string())) .ok_or_else(|| ProbeError::Resolve("no address found".to_string()))
} }
async fn resolve_one_prefer_ipv4(target: &str) -> Result<IpAddr, ProbeError> {
let mut iter = lookup_host((target, 0))
.await
.map_err(|err| ProbeError::Resolve(err.to_string()))?;
let mut fallback = None;
for addr in iter.by_ref() {
if addr.ip().is_ipv4() {
return Ok(addr.ip());
}
if fallback.is_none() {
fallback = Some(addr.ip());
}
}
fallback.ok_or_else(|| ProbeError::Resolve("no address found".to_string()))
}
struct ReverseDns {
resolver: TokioAsyncResolver,
cache: HashMap<IpAddr, Option<String>>,
timeout: Duration,
}
impl ReverseDns {
fn new(timeout: Duration) -> Result<Self, ProbeError> {
let (config, opts) = match read_system_conf() {
Ok((config, opts)) => (config, opts),
Err(_) => (ResolverConfig::default(), ResolverOpts::default()),
};
let resolver = TokioAsyncResolver::tokio(config, opts);
Ok(Self {
resolver,
cache: HashMap::new(),
timeout,
})
}
async fn lookup(&mut self, ip: IpAddr) -> Option<String> {
if let Some(value) = self.cache.get(&ip) {
return value.clone();
}
let result = timeout(self.timeout, self.resolver.reverse_lookup(ip)).await;
let value = match result {
Ok(Ok(response)) => response.iter().next().map(|name| name.to_utf8()),
_ => None,
};
self.cache.insert(ip, value.clone());
value
}
}
struct Socks5Proxy {
addr: String,
remote_dns: bool,
}
fn parse_socks5_proxy(value: &str) -> Result<Socks5Proxy, ProbeError> {
let url = Url::parse(value).map_err(|_| ProbeError::InvalidProxy(value.to_string()))?;
let scheme = url.scheme();
let remote_dns = match scheme {
"socks5" => false,
"socks5h" => true,
_ => return Err(ProbeError::InvalidProxy(value.to_string())),
};
if !url.username().is_empty() || url.password().is_some() {
return Err(ProbeError::Proxy("proxy auth not supported".to_string()));
}
let host = url
.host_str()
.ok_or_else(|| ProbeError::InvalidProxy(value.to_string()))?;
let port = url
.port_or_known_default()
.ok_or_else(|| ProbeError::InvalidProxy(value.to_string()))?;
Ok(Socks5Proxy {
addr: format!("{host}:{port}"),
remote_dns,
})
}
fn tcp_connect_with_ttl(addr: SocketAddr, ttl: u8, timeout: Duration) -> Result<(), ProbeError> { fn tcp_connect_with_ttl(addr: SocketAddr, ttl: u8, timeout: Duration) -> Result<(), ProbeError> {
let domain = match addr.ip() { let domain = match addr.ip() {
IpAddr::V4(_) => Domain::IPV4, IpAddr::V4(_) => Domain::IPV4,

View File

@@ -0,0 +1,16 @@
[package]
name = "wtfnet-tls"
version = "0.1.0"
edition = "2024"
[dependencies]
rustls = { version = "0.21", features = ["dangerous_configuration"] }
rustls-native-certs = "0.6"
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["net", "time"] }
tokio-rustls = "0.24"
x509-parser = "0.16"
tokio-socks = "0.5"
url = "2"
tracing = "0.1"

View File

@@ -0,0 +1,583 @@
use rustls::{Certificate, ClientConfig, RootCertStore, ServerName};
use serde::{Deserialize, Serialize};
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use thiserror::Error;
use tokio::net::TcpStream;
use tokio::time::timeout;
use tokio_rustls::TlsConnector;
use tokio_socks::tcp::Socks5Stream;
use tracing::debug;
use url::Url;
use x509_parser::prelude::{FromDer, X509Certificate};
#[derive(Debug, Error)]
pub enum TlsError {
#[error("invalid target: {0}")]
InvalidTarget(String),
#[error("invalid sni: {0}")]
InvalidSni(String),
#[error("io error: {0}")]
Io(String),
#[error("tls error: {0}")]
Tls(String),
#[error("parse error: {0}")]
Parse(String),
#[error("timeout")]
Timeout,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsCertSummary {
pub subject: String,
pub issuer: String,
pub not_before: String,
pub not_after: String,
pub san: Vec<String>,
pub signature_algorithm: Option<String>,
pub key_usage: Option<Vec<String>>,
pub extended_key_usage: Option<Vec<String>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsHandshakeReport {
pub target: String,
pub sni: Option<String>,
pub alpn_offered: Vec<String>,
pub alpn_negotiated: Option<String>,
pub tls_version: Option<String>,
pub cipher: Option<String>,
pub ocsp_stapled: Option<bool>,
pub cert_chain: Vec<TlsCertSummary>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsVerifyReport {
pub target: String,
pub sni: Option<String>,
pub alpn_offered: Vec<String>,
pub alpn_negotiated: Option<String>,
pub tls_version: Option<String>,
pub cipher: Option<String>,
pub ocsp_stapled: Option<bool>,
pub verified: bool,
pub error: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsCertReport {
pub target: String,
pub sni: Option<String>,
pub ocsp_stapled: Option<bool>,
pub cert_chain: Vec<TlsCertSummary>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct TlsAlpnReport {
pub target: String,
pub sni: Option<String>,
pub alpn_offered: Vec<String>,
pub alpn_negotiated: Option<String>,
}
#[derive(Debug, Clone)]
pub struct TlsOptions {
pub sni: Option<String>,
pub alpn: Vec<String>,
pub timeout_ms: u64,
pub insecure: bool,
pub socks5: Option<String>,
pub prefer_ipv4: bool,
pub show_extensions: bool,
pub ocsp: bool,
}
pub async fn handshake(target: &str, options: TlsOptions) -> Result<TlsHandshakeReport, TlsError> {
debug!(
target,
sni = ?options.sni,
alpn = ?options.alpn,
proxy = ?options.socks5,
timeout_ms = options.timeout_ms,
prefer_ipv4 = options.prefer_ipv4,
"tls handshake start"
);
let (host, port, server_name) = parse_target(target, options.sni.as_deref())?;
let connector = build_connector(options.insecure, &options.alpn)?;
let stream = connect(
host.as_str(),
port,
options.socks5.as_deref(),
connector,
server_name,
options.timeout_ms,
options.prefer_ipv4,
)
.await?;
let (_, session) = stream.get_ref();
Ok(TlsHandshakeReport {
target: target.to_string(),
sni: options.sni,
alpn_offered: options.alpn.clone(),
alpn_negotiated: session
.alpn_protocol()
.map(|value| String::from_utf8_lossy(value).to_string()),
tls_version: session.protocol_version().map(|v| format!("{v:?}")),
cipher: session
.negotiated_cipher_suite()
.map(|suite| format!("{suite:?}")),
ocsp_stapled: ocsp_status(session, options.ocsp),
cert_chain: extract_cert_chain(session.peer_certificates(), options.show_extensions)?,
})
}
pub async fn verify(target: &str, options: TlsOptions) -> Result<TlsVerifyReport, TlsError> {
debug!(
target,
sni = ?options.sni,
alpn = ?options.alpn,
proxy = ?options.socks5,
timeout_ms = options.timeout_ms,
prefer_ipv4 = options.prefer_ipv4,
"tls verify start"
);
let (host, port, server_name) = parse_target(target, options.sni.as_deref())?;
let connector = build_connector(false, &options.alpn)?;
match connect(
host.as_str(),
port,
options.socks5.as_deref(),
connector,
server_name,
options.timeout_ms,
options.prefer_ipv4,
)
.await
{
Ok(stream) => {
let (_, session) = stream.get_ref();
Ok(TlsVerifyReport {
target: target.to_string(),
sni: options.sni,
alpn_offered: options.alpn.clone(),
alpn_negotiated: session
.alpn_protocol()
.map(|value| String::from_utf8_lossy(value).to_string()),
tls_version: session.protocol_version().map(|v| format!("{v:?}")),
cipher: session
.negotiated_cipher_suite()
.map(|suite| format!("{suite:?}")),
ocsp_stapled: ocsp_status(session, options.ocsp),
verified: true,
error: None,
})
}
Err(err) => Ok(TlsVerifyReport {
target: target.to_string(),
sni: options.sni,
alpn_offered: options.alpn.clone(),
alpn_negotiated: None,
tls_version: None,
cipher: None,
ocsp_stapled: None,
verified: false,
error: Some(err.to_string()),
}),
}
}
pub async fn certs(target: &str, options: TlsOptions) -> Result<TlsCertReport, TlsError> {
debug!(
target,
sni = ?options.sni,
alpn = ?options.alpn,
proxy = ?options.socks5,
timeout_ms = options.timeout_ms,
prefer_ipv4 = options.prefer_ipv4,
"tls certs start"
);
let (host, port, server_name) = parse_target(target, options.sni.as_deref())?;
let connector = build_connector(options.insecure, &options.alpn)?;
let stream = connect(
host.as_str(),
port,
options.socks5.as_deref(),
connector,
server_name,
options.timeout_ms,
options.prefer_ipv4,
)
.await?;
let (_, session) = stream.get_ref();
Ok(TlsCertReport {
target: target.to_string(),
sni: options.sni,
ocsp_stapled: ocsp_status(session, options.ocsp),
cert_chain: extract_cert_chain(session.peer_certificates(), options.show_extensions)?,
})
}
pub async fn alpn(target: &str, options: TlsOptions) -> Result<TlsAlpnReport, TlsError> {
debug!(
target,
sni = ?options.sni,
alpn = ?options.alpn,
proxy = ?options.socks5,
timeout_ms = options.timeout_ms,
prefer_ipv4 = options.prefer_ipv4,
"tls alpn start"
);
let (host, port, server_name) = parse_target(target, options.sni.as_deref())?;
let connector = build_connector(options.insecure, &options.alpn)?;
let stream = connect(
host.as_str(),
port,
options.socks5.as_deref(),
connector,
server_name,
options.timeout_ms,
options.prefer_ipv4,
)
.await?;
let (_, session) = stream.get_ref();
Ok(TlsAlpnReport {
target: target.to_string(),
sni: options.sni,
alpn_offered: options.alpn.clone(),
alpn_negotiated: session
.alpn_protocol()
.map(|value| String::from_utf8_lossy(value).to_string()),
})
}
fn parse_target(target: &str, sni: Option<&str>) -> Result<(String, u16, ServerName), TlsError> {
let (host, port) = split_host_port(target)?;
let server_name = if let Some(sni) = sni {
ServerName::try_from(sni).map_err(|_| TlsError::InvalidSni(sni.to_string()))?
} else if let Ok(ip) = host.parse::<IpAddr>() {
ServerName::IpAddress(ip)
} else {
ServerName::try_from(host.as_str())
.map_err(|_| TlsError::InvalidSni(host.to_string()))?
};
Ok((host, port, server_name))
}
fn split_host_port(value: &str) -> Result<(String, u16), TlsError> {
if let Some(stripped) = value.strip_prefix('[') {
if let Some(end) = stripped.find(']') {
let host = &stripped[..end];
let rest = &stripped[end + 1..];
let port = rest
.strip_prefix(':')
.ok_or_else(|| TlsError::InvalidTarget(value.to_string()))?;
let port = port
.parse::<u16>()
.map_err(|_| TlsError::InvalidTarget(value.to_string()))?;
return Ok((host.to_string(), port));
}
}
let mut parts = value.rsplitn(2, ':');
let port = parts
.next()
.ok_or_else(|| TlsError::InvalidTarget(value.to_string()))?;
let host = parts
.next()
.ok_or_else(|| TlsError::InvalidTarget(value.to_string()))?;
if host.contains(':') {
return Err(TlsError::InvalidTarget(value.to_string()));
}
let port = port
.parse::<u16>()
.map_err(|_| TlsError::InvalidTarget(value.to_string()))?;
Ok((host.to_string(), port))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn split_host_port_ipv4() {
let (host, port) = split_host_port("example.com:443").unwrap();
assert_eq!(host, "example.com");
assert_eq!(port, 443);
}
#[test]
fn split_host_port_ipv6() {
let (host, port) = split_host_port("[2001:db8::1]:443").unwrap();
assert_eq!(host, "2001:db8::1");
assert_eq!(port, 443);
}
}
fn resolve_addr(host: &str, port: u16) -> Result<SocketAddr, TlsError> {
if let Ok(ip) = host.parse::<IpAddr>() {
return Ok(SocketAddr::new(ip, port));
}
let addr = std::net::ToSocketAddrs::to_socket_addrs(&(host, port))
.map_err(|err| TlsError::Io(err.to_string()))?
.next()
.ok_or_else(|| TlsError::InvalidTarget(host.to_string()))?;
Ok(addr)
}
fn resolve_addr_prefer_ipv4(host: &str, port: u16) -> Result<SocketAddr, TlsError> {
if let Ok(ip) = host.parse::<IpAddr>() {
return Ok(SocketAddr::new(ip, port));
}
let mut iter = std::net::ToSocketAddrs::to_socket_addrs(&(host, port))
.map_err(|err| TlsError::Io(err.to_string()))?;
let mut fallback = None;
for addr in iter.by_ref() {
if addr.is_ipv4() {
return Ok(addr);
}
if fallback.is_none() {
fallback = Some(addr);
}
}
fallback.ok_or_else(|| TlsError::InvalidTarget(host.to_string()))
}
fn build_connector(insecure: bool, alpn: &[String]) -> Result<TlsConnector, TlsError> {
let mut config = if insecure {
ClientConfig::builder()
.with_safe_defaults()
.with_custom_certificate_verifier(Arc::new(NoVerifier))
.with_no_client_auth()
} else {
let mut roots = RootCertStore::empty();
let store = rustls_native_certs::load_native_certs()
.map_err(|err| TlsError::Io(err.to_string()))?;
for cert in store {
roots
.add(&Certificate(cert.0))
.map_err(|err| TlsError::Tls(err.to_string()))?;
}
ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth()
};
if !alpn.is_empty() {
config.alpn_protocols = alpn.iter().map(|p| p.as_bytes().to_vec()).collect();
}
Ok(TlsConnector::from(Arc::new(config)))
}
async fn connect(
host: &str,
port: u16,
proxy: Option<&str>,
connector: TlsConnector,
server_name: ServerName,
timeout_ms: u64,
prefer_ipv4: bool,
) -> Result<tokio_rustls::client::TlsStream<TcpStream>, TlsError> {
let tcp = if let Some(proxy) = proxy {
let proxy_addr = parse_proxy_addr(proxy)?;
let (target_host, remote_dns) = socks5_target_host(proxy, host);
let target = if remote_dns {
(target_host.clone(), port)
} else {
let addr = if prefer_ipv4 {
resolve_addr_prefer_ipv4(target_host.as_str(), port)?
} else {
resolve_addr(target_host.as_str(), port)?
};
(addr.ip().to_string(), port)
};
let stream = timeout(
Duration::from_millis(timeout_ms),
Socks5Stream::connect(proxy_addr.as_str(), target),
)
.await
.map_err(|_| TlsError::Timeout)?
.map_err(|err| TlsError::Io(err.to_string()))?;
stream.into_inner()
} else {
let addr = if prefer_ipv4 {
resolve_addr_prefer_ipv4(host, port)?
} else {
resolve_addr(host, port)?
};
timeout(Duration::from_millis(timeout_ms), TcpStream::connect(addr))
.await
.map_err(|_| TlsError::Timeout)?
.map_err(|err| TlsError::Io(err.to_string()))?
};
let stream = timeout(
Duration::from_millis(timeout_ms),
connector.connect(server_name, tcp),
)
.await
.map_err(|_| TlsError::Timeout)?
.map_err(|err| TlsError::Tls(err.to_string()))?;
Ok(stream)
}
fn parse_proxy_addr(value: &str) -> Result<String, TlsError> {
let url = Url::parse(value).map_err(|_| TlsError::InvalidTarget(value.to_string()))?;
let host = url
.host_str()
.ok_or_else(|| TlsError::InvalidTarget(value.to_string()))?;
let port = url
.port_or_known_default()
.ok_or_else(|| TlsError::InvalidTarget(value.to_string()))?;
Ok(format!("{host}:{port}"))
}
fn socks5_target_host(proxy: &str, host: &str) -> (String, bool) {
let remote_dns = proxy.starts_with("socks5h://");
(host.to_string(), remote_dns)
}
fn extract_cert_chain(
certs: Option<&[Certificate]>,
show_extensions: bool,
) -> Result<Vec<TlsCertSummary>, TlsError> {
let mut results = Vec::new();
if let Some(certs) = certs {
for cert in certs {
let summary = parse_cert(&cert.0, show_extensions)?;
results.push(summary);
}
}
Ok(results)
}
fn parse_cert(der: &[u8], show_extensions: bool) -> Result<TlsCertSummary, TlsError> {
let (_, cert) =
X509Certificate::from_der(der).map_err(|err| TlsError::Parse(err.to_string()))?;
let (key_usage, extended_key_usage, signature_algorithm) = if show_extensions {
(
extract_key_usage(&cert),
extract_extended_key_usage(&cert),
Some(cert.signature_algorithm.algorithm.to_string()),
)
} else {
(None, None, None)
};
Ok(TlsCertSummary {
subject: cert.subject().to_string(),
issuer: cert.issuer().to_string(),
not_before: cert.validity().not_before.to_string(),
not_after: cert.validity().not_after.to_string(),
san: extract_san(&cert),
signature_algorithm,
key_usage,
extended_key_usage,
})
}
fn extract_san(cert: &X509Certificate<'_>) -> Vec<String> {
let mut result = Vec::new();
if let Ok(Some(ext)) = cert.subject_alternative_name() {
for name in ext.value.general_names.iter() {
result.push(name.to_string());
}
}
result
}
fn extract_key_usage(cert: &X509Certificate<'_>) -> Option<Vec<String>> {
let ext = cert.key_usage().ok()??;
let mut result = Vec::new();
if ext.value.digital_signature() {
result.push("digitalSignature".to_string());
}
if ext.value.non_repudiation() {
result.push("nonRepudiation".to_string());
}
if ext.value.key_encipherment() {
result.push("keyEncipherment".to_string());
}
if ext.value.data_encipherment() {
result.push("dataEncipherment".to_string());
}
if ext.value.key_agreement() {
result.push("keyAgreement".to_string());
}
if ext.value.key_cert_sign() {
result.push("keyCertSign".to_string());
}
if ext.value.crl_sign() {
result.push("cRLSign".to_string());
}
if ext.value.encipher_only() {
result.push("encipherOnly".to_string());
}
if ext.value.decipher_only() {
result.push("decipherOnly".to_string());
}
if result.is_empty() {
None
} else {
Some(result)
}
}
fn extract_extended_key_usage(cert: &X509Certificate<'_>) -> Option<Vec<String>> {
let ext = cert.extended_key_usage().ok()??;
let mut result = Vec::new();
if ext.value.any {
result.push("any".to_string());
}
if ext.value.server_auth {
result.push("serverAuth".to_string());
}
if ext.value.client_auth {
result.push("clientAuth".to_string());
}
if ext.value.code_signing {
result.push("codeSigning".to_string());
}
if ext.value.email_protection {
result.push("emailProtection".to_string());
}
if ext.value.time_stamping {
result.push("timeStamping".to_string());
}
if ext.value.ocsp_signing {
result.push("ocspSigning".to_string());
}
for oid in &ext.value.other {
result.push(oid.to_string());
}
if result.is_empty() {
None
} else {
Some(result)
}
}
fn ocsp_status(_session: &rustls::ClientConnection, enabled: bool) -> Option<bool> {
if enabled {
None
} else {
None
}
}
struct NoVerifier;
impl rustls::client::ServerCertVerifier for NoVerifier {
fn verify_server_cert(
&self,
_end_entity: &Certificate,
_intermediates: &[Certificate],
_server_name: &ServerName,
_scts: &mut dyn Iterator<Item = &[u8]>,
_ocsp: &[u8],
_now: SystemTime,
) -> Result<rustls::client::ServerCertVerified, rustls::Error> {
Ok(rustls::client::ServerCertVerified::assertion())
}
}

69
docs/COMMANDS.md Normal file
View File

@@ -0,0 +1,69 @@
# WTFnet Commands
This document lists CLI commands and supported flags. Output defaults to text; use `--json` for structured output.
## Global flags
- `--json` / `--pretty`: emit JSON output (pretty-print if requested)
- `--no-color` / `--quiet`: disable ANSI colors / reduce stdout output
- `-v` / `-vv` / `--verbose`: increase log verbosity
- `--log-level <error|warn|info|debug|trace>`: set log level
- `--log-format <text|json>`: set log format
- `--log-file <path>`: write logs to file
- `NETTOOL_LOG_FILTER` or `RUST_LOG` can override log filters (ex: `maxminddb::decoder=debug`)
## sys
- `sys ifaces`: list network interfaces
- `sys ip` flags: `--all` (include link-local), `--iface <name>` (filter by interface)
- `sys route` flags: `--ipv4`, `--ipv6`, `--to <ip>` (filter by destination)
- `sys dns`: show DNS configuration
## ports
- `ports listen` flags: `--tcp`, `--udp`, `--port <n>` (filter by port)
- `ports who <port>`: find owning processes for a port
- `ports conns` flags: `--top <n>`, `--by-process` (summaries)
## neigh
- `neigh list` flags: `--ipv4`, `--ipv6`, `--iface <name>`
## cert
- `cert roots`: list trusted root certificates
- `cert baseline <path>`: write baseline JSON
- `cert diff <path>`: diff against baseline JSON
## geoip
- `geoip lookup <ip>`: lookup GeoIP
- `geoip status`: show GeoIP database status
## probe
- `probe ping <host>` flags: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping <host:port>` flags: `--count <n>`, `--timeout-ms <n>`, `--socks5 <url>`, `--prefer-ipv4`, `--no-geoip`
- `probe trace <host>` flags: `--max-hops <n>`, `--per-hop <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--rdns`, `--no-geoip`
## dns
- `dns query <domain> <type>` flags: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect <domain>` flags: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch` flags: `--duration <Ns|Nms>`, `--follow` (run until Ctrl-C), `--iface <name>`, `--filter <pattern>`
- `dns leak status` flags: `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`
- `dns leak watch` flags: `--duration <Ns|Nms>`, `--follow` (run until Ctrl-C), `--iface <name>`, `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`, `--privacy <full|redacted|minimal>`, `--out <path>`, `--summary-only`, `--iface-diag` (list capture-capable interfaces)
- `dns leak report` flags: `<path>`, `--privacy <full|redacted|minimal>`
## http
- `http head|get <url>` flags: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--http3` (required feature `http3`), `--http3-only` (required feature `http3`), `--geoip`, `--socks5 <url>`
## tls
- `tls handshake|cert|verify|alpn <host:port>` flags: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`, `--show-extensions`, `--ocsp`
## discover
- `discover mdns` flags: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp` flags: `--duration <Ns|Nms>`
- `discover llmnr` flags: `--duration <Ns|Nms>`, `--name <host>`
- `discover nbns` flags: `--duration <Ns|Nms>`
## diag
- `diag` flags: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## calc
- `calc subnet <cidr>|<ip> <mask>`
- `calc contains <a> <b>`
- `calc overlap <a> <b>`
- `calc summarize <cidr...>`

View File

@@ -0,0 +1,181 @@
# DNS Leak Detector - Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
`--follow` keeps the watch running by resolving the duration to a large
placeholder (one year in milliseconds) and then racing the watch against
`tokio::signal::ctrl_c()`; Ctrl-C returns early with a clean exit code so the
outer loop stops.
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output

172
docs/RELEASE_v0.3.0.md Normal file
View File

@@ -0,0 +1,172 @@
# WTFnet v0.3.0 - Release Plan
Binary name in examples: `wtfn` (current CLI examples use this form).
Project scope: Linux (Debian/Ubuntu) + Windows first-class.
## 0. Summary
v0.3.0 focuses on improving diagnostic depth and fidelity of existing commands rather than adding a "smart doctor" workflow.
Major upgrades in this release:
- richer traceroute output and per-hop statistics
- HTTP timing breakdown accuracy (connect/tls stages)
- optional HTTP/3 support (feature-gated; experimental)
- TLS diagnostics upgrades (OCSP stapling indicator, richer certificate parsing)
- ports connections view and summaries
- improved cert baseline/diff for system roots
- optional discovery expansion (LLMNR/NBNS)
## 1. Goals
### G1. Make existing outputs more trustworthy
- Replace placeholder timing fields with real measured values where feasible.
- Improve trace reliability and readability.
### G2. Expand diagnostics depth, not workflow complexity
- Keep subcommands explicit (no `doctor`, no guided flow).
- Focus on "give me evidence" tools.
### G3. Keep v0.2 compatibility
- Add flags and fields in an additive way.
- Keep default behavior safe and bounded.
## 2. Non-goals (explicitly out of scope)
- No `wtfn doctor ...` / one-shot diagnosis command (move to v0.4+).
- No shell completion scripts or man page generation.
- No new output modes like JSONL streaming / schema negotiation changes (stay stable).
- No OS-native TLS verifier in v0.3.0 (optional future enhancement).
## 3. Feature scope
### 3.1 probe trace: richer output (MUST)
Current: trace exists best-effort.
Target improvements:
- `--rdns`: reverse DNS lookup per hop (best-effort; cached; time-bounded)
- `--per-hop <n>`: send N probes per hop (default 3) to compute:
- avg/min/max RTT per hop
- loss % per hop
- `--icmp` and `--udp` modes remain best-effort; document privilege requirements
- Keep `--geoip` integration: hop IP -> Country/ASN
Acceptance:
- output includes per-hop loss and stable hop formatting
- JSON output contains hop arrays with RTT series
### 3.2 HTTP timing breakdown accuracy (MUST)
Current: `dns_ms` + `ttfb_ms` exist, but connect/tls are placeholders.
Target:
- implement `connect_ms` and `tls_ms` (best-effort) for HTTP/1.1 and HTTP/2
- keep total duration correct and stable
- when measurement unavailable (library limitation), report:
- `null` + add warning, never fake numbers
Acceptance:
- `wtfn http head|get` JSON contains:
- `dns_ms`, `connect_ms`, `tls_ms`, `ttfb_ms`, `total_ms`
- on timeout / failure, partial timing must still be meaningful.
### 3.3 HTTP/3 (optional feature flag) (SHOULD)
Current: feature-gated HTTP/3 path exists but is incomplete; keep disabled in default builds.
Target:
- add `--http3` support behind Cargo feature `http3`
- behavior:
- `--http3-only`: fail if HTTP/3 cannot be used
- `--http3`: try HTTP/3, fallback to HTTP/2 unless `--http3-only`
- provide clear error classes:
- UDP blocked, QUIC handshake timeout, TLS/ALPN mismatch, etc.
Acceptance:
- builds without `http3` feature still work
- with feature enabled, HTTP/3 works on at least one known compatible endpoint
- documented as experimental until stabilized
### 3.4 TLS extras: OCSP + richer cert parsing (MUST)
Current: `tls handshake/verify/cert/alpn` exists.
Target:
- show OCSP stapling presence (if exposed by library)
- richer certificate parsing for leaf and intermediates:
- SANs (DNS/IP)
- key usage / extended key usage (best-effort)
- signature algorithm (best-effort)
- new flags:
- `--show-extensions` (prints richer X.509 info)
- `--ocsp` (show stapling info if present)
Acceptance:
- TLS output includes richer leaf cert details when requested
- `--show-chain` remains fast and bounded
### 3.5 ports conns: active connection view + summaries (SHOULD)
Current: `ports listen/who`.
Target:
- add `wtfn ports conns`
- show active TCP connections with:
- local addr:port
- remote addr:port
- state (ESTABLISHED/TIME_WAIT/etc)
- PID/process name (best-effort)
- add summary mode:
- `--top <n>` show top remote IPs by count
- `--by-process` group by process
Acceptance:
- works on Linux + Windows best-effort
- never requires admin by default; if needed, return partial with warnings
### 3.6 cert roots: stronger baseline/diff (MUST)
Current: cert roots listing exists; baseline/diff exists.
Target improvements:
- normalize matching key: SHA256 fingerprint
- diff categories:
- added / removed
- changed validity (newly expired)
- subject/issuer changes
- add stable JSON schema for baseline files (include schema version)
Acceptance:
- baseline diff is stable across platforms (best-effort fields allowed)
- diff output is human-friendly and JSON-friendly
### 3.7 discover: LLMNR/NBNS (optional) (NICE)
Current: mDNS + SSDP exist; LLMNR/NBNS missing.
Target:
- add `wtfn discover llmnr --duration 3s`
- add `wtfn discover nbns --duration 3s`
- bounded, low-noise, rate-limited
Acceptance:
- best-effort implementation on Windows-first networks
- if unsupported on OS, show "not supported" error with exit code 5 (partial)
## 4. Compatibility & behavior rules
- Command names must remain stable.
- Existing flags must retain meaning.
- JSON output fields are additive only.
- Logging remains stderr-only; JSON output remains clean stdout.
## 5. Deliverables checklist
MUST:
- trace richer output + per-hop loss stats
- HTTP connect/tls timing best-effort with warnings when unknown
- TLS extras: OCSP indicator + richer x509 parsing
- ports conns basic implementation
- cert baseline/diff improvements
SHOULD:
- HTTP/3 behind feature flag
NICE:
- LLMNR/NBNS discovery
## 6. Definition of Done (v0.3.0)
- v0.3.0 builds on Linux (Debian/Ubuntu) + Windows.
- `wtfn probe trace` provides per-hop loss and optional rdns.
- `wtfn http head|get` reports accurate timing breakdown where possible.
- `wtfn tls ...` provides OCSP + SAN/extensions when requested.
- `wtfn ports conns` works best-effort and produces useful output.
- cert baseline/diff is stable and readable.
- No doctor command, no completions, no new output modes.

154
docs/RELEASE_v0.4.0.md Normal file
View File

@@ -0,0 +1,154 @@
# WTFnet v0.4.0 - DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic -> classify (Plain DNS / DoT / DoH) ->
enrich with interface/route/process metadata -> evaluate leak definitions (A/B/C/D) ->
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / kill switch management (detection only)
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple -> process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--out <path>` (write JSON report/events)
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS -> subsequent TCP/TLS connection) for Leak-D mismatch indicator
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands

45
docs/WORK_ITEMS_v0.3.0.md Normal file
View File

@@ -0,0 +1,45 @@
# WTFnet v0.3.0 - Work Items
This is a practical checklist to execute v0.3.0.
## 1) probe/trace upgrades
- [x] add `--per-hop <n>` and store RTT samples per hop
- [x] compute loss% per hop
- [x] add `--rdns` best-effort reverse lookup (cached + time-bounded)
- [x] improve hop formatting + JSON schema
## 2) http timing improvements
- [x] implement `connect_ms` and `tls_ms` timing
- [x] report `null` + warning when measurement unavailable
- [ ] keep current `dns_ms` and `ttfb_ms`
## 3) tls extras
- [x] add OCSP stapling presence indicator (if available)
- [x] parse SANs and key usage / EKU best-effort
- [x] add `--show-extensions` and `--ocsp` flags
## 4) ports conns
- [x] implement `wtfn ports conns`
- [x] add `--top <n>` and `--by-process`
- [x] best-effort PID mapping with warnings
## 5) cert baseline/diff improvements
- [x] baseline schema version
- [x] match by SHA256 fingerprint
- [x] diff categories: add/remove/expired/changed
## 6) optional LLMNR/NBNS
- [x] implement `discover llmnr`
- [x] implement `discover nbns`
- [x] bounded collection, low-noise
## 7) docs updates
- [x] update README roadmap
- [x] update COMMANDS.md with new flags/commands
- [x] add RELEASE_v0.3.0.md
## 8) optional HTTP/3 (last)
- [x] add `http3` cargo feature + deps
- [x] implement `--http3` / `--http3-only`
- [x] define error classification for QUIC failures
- [x] keep feature disabled in default builds until stabilized

33
docs/WORK_ITEMS_v0.4.0.md Normal file
View File

@@ -0,0 +1,33 @@
# WTFnet v0.4.0 - Work Items
This is a practical checklist to execute v0.4.0.
## 1) platform flow ownership
- [x] add FlowOwnerProvider trait + data types
- [x] implement Linux best-effort lookup
- [x] implement Windows best-effort lookup
## 2) new wtfnet-dnsleak crate
- [x] crate scaffold + pcap feature
- [x] UDP/TCP 53 capture + classify
- [x] DoT detection (TCP 853)
- [x] policy model + profiles
- [x] leak rules A/B/C (partial)
- [x] privacy modes
- [x] report + summary builder
## 3) CLI wiring
- [x] add `dns leak status`
- [x] add `dns leak watch`
- [x] add `dns leak report`
## 4) docs updates
- [x] add `docs/RELEASE_v0.4.0.md`
- [x] add `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
- [x] update README roadmap + flags
- [x] update COMMANDS.md
- [x] update status/implementation docs
## 5) follow-ups
- [ ] add DoH heuristic classification (optional)
- [x] add Leak-D mismatch correlation (optional)

View File

@@ -0,0 +1,2 @@
sudo ./target/release/wtfn dns watch --duration 5s
iface: eno1 duration_ms: 5000 filter: -

View File

@@ -0,0 +1,723 @@
Below is a **high-level (language-agnostic)** design for a **client-side DNS leak detector** aimed at *censorship-resistance threat models*, i.e.:
> “Censor/ISP can observe/log DNS intent or infer proxy usage; we want to detect when DNS behavior escapes the intended protection path.”
Ill cover: **definitions**, **detection standards**, **workflow**, **modules**, **passive+active detection**, **outputs**, and **test methodology**.
---
# 1) Scope and goals
## Goals
Your detector should answer, with evidence:
1. **Did any DNS query leave the device outside the intended safe path?**
2. **Which domains leaked?** (when visible)
3. **Which transport leaked?** (UDP/53, TCP/53, DoT/853, DoH)
4. **Which interface leaked?** (Wi-Fi/Ethernet vs tunnel)
5. **Which process/app triggered it?** (if your OS allows attribution)
And in your censorship model, it should also detect:
6. **Split-policy intent leakage**: “unknown/sensitive domains were resolved using domestic/ISP-facing DNS.”
## Non-goals (be explicit)
* Not a censorship circumvention tool itself
* Not a full firewall manager (can suggest fixes, but detection is the core)
* Not perfect attribution on every OS (process mapping may be partial)
---
# 2) Define “DNS leak” precisely (your programs standard)
You need a **formal definition** because “DNS leak” is overloaded.
## Standard definition A (classic VPN / tunnel bypass)
A leak occurs if:
> **An unencrypted DNS query is sent outside the secure tunnel path**
> This is essentially how popular leak test sites define it (“unencrypted DNS query sent OUTSIDE the established VPN tunnel”). ([IP Leak][1])
Your detector should implement it in a machine-checkable way:
**Leak-A condition**
* DNS over **UDP/53 or TCP/53**
* Destination is **not** a “trusted resolver path” (e.g., not the tunnel interface, not loopback stub, not proxy channel)
* Interface is **not** the intended egress
✅ Strong for censorship: plaintext DNS exposes intent.
---
## Standard definition B (split-policy intent leak)
A leak occurs if:
> **A domain that should be “proxied / remote-resolved” was queried via local/ISP-facing DNS.**
This is the “proxy split rules still leak intent” case.
**Leak-B condition**
* Query name matches either:
* a “proxy-required set” (sensitive list, non-allowlist, unknown), or
* a policy rule (“everything except allowlist must resolve via proxy DNS”)
* And the query was observed going to:
* ISP resolver(s) / domestic resolver(s) / non-tunnel interface
✅ This is the leak most users in censorship settings care about.
---
## Standard definition C (encrypted DNS escape / bypass)
A leak occurs if:
> DNS was encrypted, but escaped the intended channel (e.g., app uses its own DoH directly to the Internet).
This matters because DoH hides the QNAME but still creates **observable behavior** and breaks your “DNS must follow proxy” invariant.
**Leak-C condition**
* DoH (RFC 8484) ([IETF Datatracker][2]) or DoT (RFC 7858) ([IETF Datatracker][3]) flow exists
* And it does **not** go through your approved egress path (tunnel/proxy)
✅ Detects “Firefox/Chrome built-in DoH bypass” style cases.
---
## Standard definition D (mismatch risk indicator)
Not a “leak” by itself, but a **proxy inference amplifier**:
> DNS egress region/path differs from traffic egress region/path.
This is a *censorship-resistance hygiene metric*, not a binary leak.
**Mismatch condition**
* Same domain produces:
* DNS resolution via path X
* TCP/TLS connection via path Y
* Where X ≠ Y (interface, ASN region, etc.)
✅ Helps catch “DNS direct, traffic proxy” or “DNS proxy, traffic direct” weirdness.
---
# 3) High-level architecture
## Core components
1. **Policy & Configuration**
* What counts as “safe DNS path”
* Which interfaces are “protected” (tunnel) vs “physical”
* Allowlist / proxy-required sets (optional)
* Known resolver lists (optional)
* Severity thresholds
2. **Traffic Sensor (Passive Monitor)**
* Captures outbound traffic metadata (and optionally payload for DNS parsing)
* Must cover:
* UDP/53, TCP/53
* TCP/853 (DoT)
* HTTPS flows that look like DoH (see below)
* Emits normalized events into a pipeline
3. **Classifier**
* Recognize DNS protocol types:
* Plain DNS
* DoT
* DoH
* Attach confidence scores (especially for DoH)
4. **DNS Parser (for plaintext DNS only)**
* Extract: QNAME, QTYPE, transaction IDs, response codes (optional)
* Store minimally (privacy-aware)
5. **Flow Tracker**
* Correlate packets into “flows”
* Map flow → interface → destination → process (if possible)
* Track timing correlation: DNS → connection attempts
6. **Leak Detector (Rules Engine)**
* Apply Leak-A/B/C/D definitions
* Produce leak events + severity + evidence chain
7. **Active Prober**
* Generates controlled DNS lookups to test behavior
* Can test fail-closed, bypasses, multi-interface behavior, etc.
8. **Report Generator**
* Human-readable summary
* Machine-readable logs (JSON)
* Recommendations (non-invasive)
---
# 4) Workflow (end-to-end)
## Workflow 0: Setup & baseline
1. Enumerate interfaces and routes
* Identify physical NICs
* Identify tunnel / proxy interface (or “expected egress destinations”)
2. Identify system DNS configuration
* Default resolvers per interface
* Local stub presence (127.0.0.1, etc.)
3. Load policy profile
* Full-tunnel, split-tunnel, or proxy-based
4. Start passive monitor
**Output:** “Current state snapshot” (useful even before testing).
---
## Workflow 1: Passive detection loop (always-on)
Continuously:
1. Capture outbound packets/flows
2. Classify as DNS-like (plain DNS / DoT / DoH / unknown)
3. If plaintext DNS → parse QNAME/QTYPE
4. Assign metadata:
* interface
* dst IP/port
* process (if possible)
* timestamp
5. Evaluate leak rules:
* Leak-A/B/C/D
6. Write event log + optional real-time alert
**Key design point:** passive mode should be able to detect leaks **without requiring any special test domain**.
---
## Workflow 2: Active test suite (on-demand)
Active tests exist because some leaks are intermittent or only happen under stress.
### Active Test A: “No plaintext DNS escape”
* Trigger a set of DNS queries (unique random domains)
* Verify **zero UDP/53 & TCP/53** leaves physical interfaces
### Active Test B: “Fail-closed test”
* Temporarily disrupt the “protected path” (e.g., tunnel down)
* Trigger lookups again
* Expected: DNS fails (no fallback to ISP DNS)
### Active Test C: “App bypass test”
* Launch test scenarios that mimic real apps
* Confirm no direct DoH/DoT flows go to public Internet outside the proxy path
### Active Test D: “Split-policy correctness”
* Query domains that should be:
* direct-allowed
* proxy-required
* unknown
* Confirm resolution path matches policy
---
# 5) How to recognize DNS transports (detection mechanics)
## Plain DNS (strongest signal)
**Match conditions**
* UDP dst port 53 OR TCP dst port 53
* Parse DNS header
* Extract QNAME/QTYPE
**Evidence strength:** high
**Intent visibility:** yes (domain visible)
---
## DoT (port-based, easy)
DoT is defined over TLS, typically port **853**. ([IETF Datatracker][3])
**Match conditions**
* TCP dst port 853
* Optionally confirm TLS handshake exists
**Evidence strength:** high
**Intent visibility:** no (domain hidden)
---
## DoH (harder; heuristic + optional allowlists)
DoH is DNS over HTTPS (RFC 8484). ([IETF Datatracker][2])
**Recognizers (from strongest to weakest):**
1. HTTP request with `Content-Type: application/dns-message`
2. Path/pattern common to DoH endpoints (optional list)
3. SNI matches known DoH providers (optional list)
4. Traffic resembles frequent small HTTPS POST/GET bursts typical of DoH (weak)
**Evidence strength:** medium
**Intent visibility:** no (domain hidden)
**Important for your use-case:** you may not need to *prove* its DoH; you mostly need to detect “DNS-like encrypted resolver traffic bypassing the proxy channel.”
---
# 6) Policy model: define “safe DNS path”
You need a simple abstraction users can configure:
### Safe DNS path can be defined by one or more of:
* **Allowed interfaces**
* loopback (local stub)
* tunnel interface
* **Allowed destination set**
* proxy server IP(s)
* internal resolver IP(s)
* **Allowed process**
* only your local stub + proxy allowed to resolve externally
* **Allowed port set**
* maybe only permit 443 to proxy server (if DNS rides inside it)
Then implement:
**A DNS event is a “leak” if it violates safe-path constraints.**
---
# 7) Leak severity model (useful for real-world debugging)
### Severity P0 (critical)
* Plaintext DNS (UDP/TCP 53) on physical interface to ISP/public resolver
* Especially if QNAME matches proxy-required/sensitive list
### Severity P1 (high)
* DoH/DoT bypassing proxy channel directly to public Internet
### Severity P2 (medium)
* Policy mismatch: domain resolved locally but connection later proxied (or vice versa)
### Severity P3 (low / info)
* Authoritative-side “resolver egress exposure” (less relevant for client-side leak detector)
* CDN performance mismatch indicators
---
# 8) Outputs and reporting
## Real-time console output (for debugging)
* “DNS leak detected: Plain DNS”
* domain (if visible)
* destination resolver IP
* interface
* process name (if available)
* policy rule violated
* suggested fix category (e.g., “force stub + block port 53”)
## Forensics log (machine-readable)
A single **LeakEvent** record could include:
* timestamp
* leak_type (A/B/C/D)
* transport (UDP53, TCP53, DoT, DoH)
* qname/qtype (nullable)
* src_iface / dst_ip / dst_port
* process_id/process_name (nullable)
* correlation_id (link DNS → subsequent connection attempt)
* confidence score (esp. DoH)
* raw evidence pointers (pcap offsets / event IDs)
## Summary report
* Leak counts by type
* Top leaking processes
* Top leaking resolver destinations
* Timeline view (bursts often indicate OS fallback behavior)
* “Pass/Fail” per policy definition
---
# 9) Validation strategy (“how do I know my detector is correct?”)
## Ground truth tests
1. **Known-leak scenario**
* intentionally set OS DNS to ISP DNS, no tunnel
* detector must catch plaintext DNS
2. **Known-safe scenario**
* local stub only + blocked outbound 53/853
* detector should show zero leaks
3. **Bypass scenario**
* enable browser built-in DoH directly
* detector should catch encrypted resolver bypass (Leak-C)
4. **Split-policy scenario**
* allowlist CN direct, everything else proxy-resolve
* detector should show:
* allowlist resolved direct
* unknown resolved via proxy path
---
# 10) Recommended “profiles” (makes tool usable)
Provide built-in presets:
### Profile 1: Full-tunnel VPN
* allow DNS only via tunnel interface or loopback stub
* any UDP/TCP 53 on physical NIC = leak
### Profile 2: Proxy + local stub (your case)
* allow DNS only to loopback stub
* allow stub upstream only via proxy server destinations
* flag any direct DoH/DoT to public endpoints
### Profile 3: Split tunnel (geoip + allowlist)
* allow plaintext DNS **only** for allowlisted domains (if user accepts risk)
* enforce “unknown → proxy-resolve”
* emphasize Leak-B correctness
---
Below is an updated **high-level design** (still language-agnostic) that integrates **process attribution** cleanly, including how it fits into the workflow and what to log.
---
# 1) New component: Process Attribution Engine (PAE)
## Purpose
When a DNS-like event is observed, the PAE tries to attach:
* **PID**
* **PPID**
* **process name**
* *(optional but extremely useful)* full command line, executable path, user, container/app package, etc.
This lets your logs answer:
> “Which program generated the leaked DNS request?”
> “Was it a browser, OS service, updater, antivirus, proxy itself, or some library?”
## Position in the pipeline
It sits between **Traffic Sensor** and **Leak Detector** as an “event enricher”:
**Traffic Event → (Classifier) → (Process Attribution) → Enriched Event → Leak Rules → Report**
---
# 2) Updated architecture (with process attribution)
### Existing modules (from earlier design)
1. Policy & Configuration
2. Traffic Sensor (packet/flow monitor)
3. Classifier (Plain DNS / DoT / DoH / Unknown)
4. DNS Parser (plaintext only)
5. Flow Tracker
6. Leak Detector (rules engine)
7. Active Prober
8. Report Generator
### New module
9. **Process Attribution Engine (PAE)**
* resolves “who owns this flow / packet”
* emits PID/PPID/name
* handles platform-specific differences and fallbacks
---
# 3) Workflow changes (what happens when a potential leak is seen)
## Passive detection loop (updated)
1. Capture outbound traffic event
2. Classify transport type:
* UDP/53, TCP/53 → plaintext DNS
* TCP/853 → DoT
* HTTPS patterns → DoH (heuristic)
3. Extract the **5-tuple**
* src IP:port, dst IP:port, protocol
4. **PAE lookup**
* resolve the owner process for this traffic
* attach PID/PPID/name (+ optional metadata)
5. Apply leak rules (A/B/C/D)
6. Emit:
* realtime log line (human readable)
* structured record (JSON/event log)
---
# 4) Process attribution: what to detect and how (high-level)
Process attribution always works on one core concept:
> **Map observed traffic (socket/flow) → owning process**
### Inputs PAE needs
* protocol (UDP/TCP)
* local src port
* local address
* timestamp
* optionally: connection state / flow ID
### Output from PAE
* `pid`, `ppid`, `process_name`
* optional enrichment:
* `exe_path`
* `cmdline`
* `user`
* “process tree chain” (for debugging: parent → child → …)
---
# 5) Platform support strategy (without implementation detail)
Process attribution is **OS-specific**, so structure it as:
## “Attribution Provider” interface
* Provider A: “kernel-level flow owner”
* Provider B: “socket table owner lookup”
* Provider C: “event tracing feed”
* Provider D: fallback “unknown / not supported”
Your main design goal is:
### Design rule
**Attribution must be best-effort + gracefully degrading**, never blocking detection.
So you always log the leak even if PID is unavailable:
* `pid=null, attribution_confidence=LOW`
---
# 6) Attribution confidence + race handling (important!)
Attribution can be tricky because:
* a process may exit quickly (“short-lived resolver helper”)
* ports can be reused
* NAT or local proxies may obscure the real origin
So log **confidence**:
* **HIGH**: direct mapping from kernel/socket owner at time of event
* **MEDIUM**: mapping by lookup shortly after event (possible race)
* **LOW**: inferred / uncertain
* **NONE**: not resolved
Also record *why* attribution failed:
* “permission denied”
* “flow already gone”
* “unsupported transport”
* “ambiguous mapping”
This makes debugging much easier.
---
# 7) What PID/PPID adds to your leak definitions
### Leak-A (plaintext DNS outside safe path)
Now you can say:
> “`svchost.exe (PID 1234)` sent UDP/53 to ISP resolver on Wi-Fi interface”
### Leak-B (split-policy intent leak)
You can catch:
* “game launcher looked up blocked domain”
* “system service triggered a sensitive name unexpectedly”
* “your proxy itself isnt actually resolving via its own channel”
### Leak-C (encrypted DNS bypass)
This becomes *very actionable*:
> “`firefox.exe` started direct DoH to resolver outside tunnel”
### Leak-D (mismatch indicator)
You can also correlate:
* DNS resolved by one process
* connection made by another process
(e.g., local stub vs app)
---
# 8) Reporting / realtime logging format (updated)
## Realtime log line (human readable)
Example (conceptual):
* **[P0][Leak-A] Plain DNS leaked**
* Domain: `example-sensitive.com` (A)
* From: `Wi-Fi` → To: `1.2.3.4:53`
* Process: `browser.exe` **PID=4321 PPID=1200**
* Policy violated: “No UDP/53 on physical NIC”
## Structured event (JSON-style fields)
Minimum recommended fields:
### Event identity
* `event_id`
* `timestamp`
### DNS identity
* `transport` (udp53/tcp53/dot/doh/unknown)
* `qname` (nullable)
* `qtype` (nullable)
### Network path
* `interface_name`
* `src_ip`, `src_port`
* `dst_ip`, `dst_port`
* `route_class` (tunnel / physical / loopback)
### Process identity (your requested additions)
* `pid`
* `ppid`
* `process_name`
* optional:
* `exe_path`
* `cmdline`
* `user`
### Detection result
* `leak_type` (A/B/C/D)
* `severity` (P0..P3)
* `policy_rule_id`
* `attribution_confidence`
---
# 9) Privacy and safety notes (important in a DNS tool)
Because youre logging **domains** and **process command lines**, this becomes sensitive.
Add a “privacy mode” policy:
* **Full**: store full domain + cmdline
* **Redacted**: hash domain; keep TLD only; truncate cmdline
* **Minimal**: only keep leak counts + resolver IPs + process name
Also allow “capture window” (rotate logs, avoid giant histories).
---
# 10) UX feature: “Show me the process tree”
When a leak happens, a good debugger view is:
* `PID: foo (pid 1000)`
* `PPID: bar (pid 900)`
* `PPID: systemd/svchost/etc`
This is extremely useful to identify:
* browsers spawning helpers
* OS DNS services
* containerized processes
* update agents / telemetry daemons
So your report generator should support:
**Process chain rendering** (where possible)
---
# 11) Practical edge cases you should detect (with PID helping)
1. **Local stub is fine, upstream isnt**
* Your local resolver process leaks upstream plaintext DNS
2. **Browser uses its own DoH**
* process attribution immediately reveals it
3. **Multiple interfaces**
* a leak only happens on Wi-Fi but not Ethernet
4. **Kill-switch failure**
* when tunnel drops, PID shows which app starts leaking first
---

View File

@@ -0,0 +1,55 @@
# DNS Leak Detection - Implementation Status
This document tracks the current DNS leak detector implementation against the design in
`docs/dns_leak_detection_design.md` and `docs/requirement_docs_v0.4.md`.
## Implemented
- New `wtfnet-dnsleak` crate with passive capture (pcap feature).
- Transport classification:
- Plain DNS (UDP/53, TCP/53) with qname/qtype parsing.
- DoT (TCP/853) detection.
- DoH detection is not implemented (skipped for now).
- Leak rules:
- Leak-A (plaintext DNS outside safe path).
- Leak-B (split-policy intent leak based on proxy-required/allowlist domains).
- Leak-C (encrypted DNS bypass for DoT).
- Leak-D (basic mismatch: DNS response IP -> outbound TCP SYN on different route).
- Policy profiles: `full-tunnel`, `proxy-stub`, `split`.
- Privacy modes: full/redacted/minimal (redacts qname).
- Process attribution:
- Best-effort `FlowOwnerProvider` with Linux `/proc` and Windows `netstat` lookups.
- Confidence levels and failure reasons exposed in events.
- CLI commands:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
- `dns leak watch --iface-diag` (diagnostics for capture-capable interfaces).
- `dns leak watch --follow` runs until Ctrl-C by combining a long duration with
a `tokio::signal::ctrl_c()` early-exit path.
- Interface selection:
- per-interface open timeout to avoid capture hangs
- ordered scan prefers non-loopback + named ethernet/wlan and interfaces with IPs
- verbose logging of interface selection attempts + failures (use `-v` / `-vv`)
- overall watch timeout accounts for worst-case interface scan time
- Capture loop:
- receiver runs in a worker thread; main loop polls with a short timeout to avoid blocking
## Partially implemented
- Route/interface classification: heuristic only (loopback/tunnel/physical by iface name).
- Safe path matching: allowed ifaces/dests/ports/processes; no route-based policy.
## Not implemented (v0.4 backlog)
- DoH heuristic detection (SNI/endpoint list/traffic shape).
- GeoIP enrichment of leak events.
- Process tree reporting (PPID chain).
## Known limitations
- On Windows, pcap capture may require selecting a specific NPF interface; use
`dns leak watch --iface-diag` to list interfaces that can be opened.
- Leak-D test attempts on Windows did not fire; see test notes below.
## Test notes
- `dns leak watch --duration 8s --summary-only --iface <NPF>` captured UDP/53 and produced Leak-A.
- `dns leak watch --duration 15s --iface <NPF>` with scripted DNS query + TCP connect:
- UDP/53 query/response captured (Leak-A).
- TCP SYNs observed, but did not match cached DNS response IPs, so Leak-D did not trigger.

View File

@@ -0,0 +1,31 @@
# Implementation Status vs Design
This document tracks current implementation status against the original design in `docs/implementation_notes.md`.
## Matches the design
- Workspace layout with feature crates (`wtfnet-core`, `wtfnet-platform`, `wtfnet-geoip`, `wtfnet-probe`, `wtfnet-dns`, `wtfnet-http`, `wtfnet-tls`, `wtfnet-discover`, `wtfnet-diag`).
- CLI remains a thin wrapper around library crates.
- Platform abstraction uses traits with OS dispatch.
- GeoIP: local GeoLite2 Country + ASN support.
- Probe: ping/tcping/trace with GeoIP enrichment.
- DNS: Hickory-based query/detect with best-effort heuristics.
- DNS leak detection: new passive monitor with policy and privacy modes (best-effort).
- HTTP: head/get via reqwest with best-effort timing breakdown and optional HTTP/3 (feature-gated).
- TLS: rustls-based handshake/verify/cert/alpn.
- Discover: mDNS/SSDP bounded collection plus LLMNR/NBNS (best-effort).
- Diag: bundle export in zip.
## Deviations or refinements
- DNS adds DoT/DoH and SOCKS5 proxy support.
- HTTP/TLS/TCP ping include SOCKS5 proxy support.
- HTTP timing breakdown is best-effort: `dns_ms`/`ttfb_ms` are captured; `connect_ms`/`tls_ms` are measured via a separate probe and can be `null` with warnings.
- DNS watch uses `pnet` and is feature-gated as best-effort.
## Gaps vs design (as of now)
- HTTP/3 is feature-gated and best-effort; not enabled in default builds.
- TLS verification is rustls-based (no OS-native verifier).
- DNS leak DoH detection is heuristic and currently optional.
## Current stage summary
- v0.1 scope is complete.
- v0.2 scope mostly complete; remaining are deeper test coverage and optional enhancements.

View File

@@ -0,0 +1,267 @@
Below are **repo-ready v0.3.0 docs**.
Theyre written to match your **current implemented feature set** (v0.2 mostly done: sys/ports/neigh/certs/geoip/probe/dns/http/tls/discover/diag/calc).
They also respect your decision to **NOT include** in v0.3.0:
* shell completions / man pages
* the “smart one-shot doctor” command
* extra output ergonomics for automation
And they target the **real gaps / next upgrades** you still have: **HTTP/3**, deeper **HTTP timing**, **TLS extras**, richer **trace**, **ports conns**, better **cert baseline/diff**, and optionally **LLMNR/NBNS discovery** (currently missing).
---
## 1) `docs/RELEASE_v0.3.0.md`
```markdown
# WTFnet v0.3.0 — Release Plan
Binary name in examples: `wtfn` (current CLI examples use this form).
Project scope: Linux (Debian/Ubuntu) + Windows first-class.
## 0. Summary
v0.3.0 focuses on improving **diagnostic depth and fidelity** of existing commands rather than adding a "smart doctor" workflow.
Major upgrades in this release:
- richer traceroute output and per-hop statistics
- HTTP timing breakdown accuracy (connect/tls stages)
- optional HTTP/3 support (best-effort)
- TLS diagnostics upgrades (OCSP stapling indicator, richer certificate parsing)
- ports connections view and summaries
- improved cert baseline/diff for system roots
- optional discovery expansion (LLMNR/NBNS)
---
## 1. Goals
### G1. Make existing outputs more trustworthy
- Replace placeholder timing fields with real measured values where feasible.
- Improve trace reliability and readability.
### G2. Expand diagnostics depth, not workflow complexity
- Keep subcommands explicit (no `doctor`, no guided flow).
- Focus on "give me evidence" tools.
### G3. Keep v0.2 compatibility
- Add flags and fields in an additive way.
- Keep default behavior safe and bounded.
---
## 2. Non-goals (explicitly out of scope)
- No `wtfn doctor ...` / one-shot diagnosis command (move to v0.4+).
- No shell completion scripts or man page generation.
- No new output modes like JSONL streaming / schema negotiation changes (stay stable).
- No OS-native TLS verifier in v0.3.0 (optional future enhancement).
---
## 3. Feature scope
### 3.1 probe trace: richer output (MUST)
Current: trace exists best-effort.
Target improvements:
- `--rdns`: reverse DNS lookup per hop (best-effort; cached; time-bounded)
- `--per-hop <n>`: send N probes per hop (default 3) to compute:
- avg/min/max RTT per hop
- loss % per hop
- `--icmp` and `--udp` modes remain best-effort; document privilege requirements
- Keep `--geoip` integration: hop IP → Country/ASN
Acceptance:
- output includes per-hop loss and stable hop formatting
- JSON output contains hop arrays with RTT series
---
### 3.2 HTTP timing breakdown accuracy (MUST)
Current: `dns_ms` + `ttfb_ms` exist, but connect/tls are placeholders.
Target:
- implement `connect_ms` and `tls_ms` (best-effort) for HTTP/1.1 and HTTP/2
- keep total duration correct and stable
- when measurement unavailable (library limitation), report:
- `null` + add warning, never fake numbers
Acceptance:
- `wtfn http head|get` JSON contains:
- `dns_ms`, `connect_ms`, `tls_ms`, `ttfb_ms`, `total_ms`
- on timeout / failure, partial timing must still be meaningful.
---
### 3.3 HTTP/3 (optional feature flag) (SHOULD)
Current: HTTP/3 not implemented.
Target:
- add `--http3` support behind Cargo feature `http3`
- behavior:
- `--http3-only`: fail if HTTP/3 cannot be used
- `--http3`: try HTTP/3, fallback to HTTP/2 unless `--http3-only`
- provide clear error classes:
- UDP blocked, QUIC handshake timeout, TLS/ALPN mismatch, etc.
Acceptance:
- builds without `http3` feature still work
- with feature enabled, HTTP/3 works on at least one known compatible endpoint
---
### 3.4 TLS extras: OCSP + richer cert parsing (MUST)
Current: `tls handshake/verify/cert/alpn` exists.
Target:
- show OCSP stapling presence (if exposed by library)
- richer certificate parsing for leaf and intermediates:
- SANs (DNS/IP)
- key usage / extended key usage (best-effort)
- signature algorithm (best-effort)
- new flags:
- `--show-extensions` (prints richer X.509 info)
- `--ocsp` (show stapling info if present)
Acceptance:
- TLS output includes richer leaf cert details when requested
- `--show-chain` remains fast and bounded
---
### 3.5 ports conns: active connection view + summaries (SHOULD)
Current: `ports listen/who`.
Target:
- add `wtfn ports conns`
- show active TCP connections with:
- local addr:port
- remote addr:port
- state (ESTABLISHED/TIME_WAIT/etc)
- PID/process name (best-effort)
- add summary mode:
- `--top <n>` show top remote IPs by count
- `--by-process` group by process
Acceptance:
- works on Linux + Windows best-effort
- never requires admin by default; if needed, return partial with warnings
---
### 3.6 cert roots: stronger baseline/diff (MUST)
Current: cert roots listing exists; baseline/diff exists.
Target improvements:
- normalize matching key: SHA256 fingerprint
- diff categories:
- added / removed
- changed validity (newly expired)
- subject/issuer changes
- add stable JSON schema for baseline files (include schema version)
Acceptance:
- baseline diff is stable across platforms (best-effort fields allowed)
- diff output is human-friendly and JSON-friendly
---
### 3.7 discover: LLMNR/NBNS (optional) (NICE)
Current: mDNS + SSDP exist; LLMNR/NBNS missing.
Target:
- add `wtfn discover llmnr --duration 3s`
- add `wtfn discover nbns --duration 3s`
- bounded, low-noise, rate-limited
Acceptance:
- best-effort implementation on Windows-first networks
- if unsupported on OS, show "not supported" error with exit code 5 (partial)
---
## 4. Compatibility & behavior rules
- Command names must remain stable.
- Existing flags must retain meaning.
- JSON output fields are additive only.
- Logging remains stderr-only; JSON output remains clean stdout.
---
## 5. Deliverables checklist
MUST:
- trace richer output + per-hop loss stats
- HTTP connect/tls timing best-effort with warnings when unknown
- TLS extras: OCSP indicator + richer x509 parsing
- ports conns basic implementation
- cert baseline/diff improvements
SHOULD:
- HTTP/3 behind feature flag
NICE:
- LLMNR/NBNS discovery
---
## 6. Definition of Done (v0.3.0)
- v0.3.0 builds on Linux (Debian/Ubuntu) + Windows.
- `wtfn probe trace` provides per-hop loss and optional rdns.
- `wtfn http head|get` reports accurate timing breakdown where possible.
- `wtfn tls ...` provides OCSP + SAN/extensions when requested.
- `wtfn ports conns` works best-effort and produces useful output.
- cert baseline/diff is stable and readable.
- No doctor command, no completions, no new output modes.
```
---
## 3) `docs/WORK_ITEMS_v0.3.0.md` (engineering task list)
```markdown
# WTFnet v0.3.0 — Work Items
This is a practical checklist to execute v0.3.0.
## 1) probe/trace upgrades
- [ ] add `--per-hop <n>` and store RTT samples per hop
- [ ] compute loss% per hop
- [ ] add `--rdns` best-effort reverse lookup (cached + time-bounded)
- [ ] improve hop formatting + JSON schema
## 2) http timing improvements
- [ ] implement `connect_ms` and `tls_ms` timing
- [ ] report `null` + warning when measurement unavailable
- [ ] keep current `dns_ms` and `ttfb_ms`
## 3) optional HTTP/3
- [ ] add `http3` cargo feature + deps
- [ ] implement `--http3` / `--http3-only`
- [ ] define error classification for QUIC failures
## 4) tls extras
- [ ] add OCSP stapling presence indicator (if available)
- [ ] parse SANs and key usage / EKU best-effort
- [ ] add `--show-extensions` and `--ocsp` flags
## 5) ports conns
- [ ] implement `wtfn ports conns`
- [ ] add `--top <n>` and `--by-process`
- [ ] best-effort PID mapping with warnings
## 6) cert baseline/diff improvements
- [ ] baseline schema version
- [ ] match by SHA256 fingerprint
- [ ] diff categories: add/remove/expired/changed
## 7) optional LLMNR/NBNS
- [ ] implement `discover llmnr`
- [ ] implement `discover nbns`
- [ ] bounded collection, low-noise
## 8) docs updates
- [ ] update README roadmap
- [ ] update COMMANDS.md with new flags/commands
- [ ] add RELEASE_v0.3.0.md
```
---

View File

@@ -0,0 +1,484 @@
Yep — your DNS leak detector design **fits your current architecture cleanly**, and **making it a dedicated subcrate is the right move**.
Your existing workspace already follows “thin CLI + feature crates + platform traits + OS dispatch”, so the detector drops in naturally as **one new feature crate** plus a small extension to `wtfnet-platform` for **best-effort process attribution**. This matches the designs pipeline idea: **Traffic Sensor → Classifier → (Process Attribution) → Leak Rules → Report**
Below Ill do two things:
1. **Evaluate fit + integration plan** (what needs changing / what can be reused)
2. Provide **repo-ready v0.4.0 documents**, focused on implementing this detector.
---
# 1) Can this design fit your current architecture?
## ✅ Yes — it matches your crate boundaries almost 1:1
Your detector design already breaks down into modules that map directly into a new subcrate:
* Policy & Configuration
* Passive Traffic Sensor (UDP53/TCP53/853/“DoH-ish”)
* Classifier (DNS / DoT / DoH + confidence)
* DNS Parser (plaintext only)
* Flow Tracker (interface/route/process correlation)
* Leak Detector rules A/B/C/D
* Report generator + structured events
So: **new crate = `wtfnet-dnsleak`**, and keep `wtfnet-dns` for **active query/detect/watch** (already exists).
## ✅ What you can reuse immediately
### Reuse from your current codebase
* `wtfnet-dns watch` capture plumbing (you already have passive-ish capture)
* existing DNS parsing logic (for UDP/TCP 53)
* existing GeoIP enrichment pipeline (optional)
* `wtfnet-platform` interface snapshot + routing info (for “which interface leaked?”)
* your JSON output envelope + logging style
### Reuse from the detector design directly
* Leak definitions A/B/C/D (this is already precise and CLI-tool friendly)
* DoH recognition levels + confidence model (strong → weak)
* “safe DNS path” abstraction (interfaces/dests/process/ports)
* process attribution confidence levels and failure reasons
* privacy modes (Full/Redacted/Minimal)
## ✅ What youll need to add (small + contained)
### 1) New platform trait for flow → process owner (best effort)
Your design explicitly wants PID/PPID/process name to show “who leaked DNS” , and it recommends an OS-specific provider interface .
So extend `wtfnet-platform` with something like:
* `FlowOwnerProvider::owner_of(tuple, timestamp) -> ProcessInfo + confidence`
This stays consistent with your existing “platform traits + OS dispatch” architecture.
### 2) Route/interface classification (“tunnel vs physical vs loopback”)
Your event schema wants `route_class` (tunnel/physical/loopback) .
Implement this via **policy-configured interface labels** + a small heuristic fallback.
### 3) DoH detection will be heuristic (and thats OK)
Your design already plans for this: DoH classification is “hard”, confidence-based .
In v0.4, keep it simple:
* DoT = TCP/853
* Plain DNS = UDP/TCP 53 (+ parse QNAME)
* “DoH-ish” = TCP/443 to known resolver host/IP OR “small HTTPS bursts” pattern
…and attach confidence.
---
# 2) v0.4.0 documents (repo-ready)
Below are two docs you can drop into `docs/`.
---
## `docs/RELEASE_v0.4.0.md`
```markdown
# WTFnet v0.4.0 — DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
---
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic → classify (Plain DNS / DoT / DoH) →
enrich with interface/route/process metadata → evaluate leak definitions (A/B/C/D) →
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
---
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
---
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / "kill switch" management (detection only)
---
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple → process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
---
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--geoip` (include GeoIP in event outputs)
- `--out <path>` (write JSON report/events)
---
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
---
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
---
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS → subsequent TCP/TLS connection) for Leak-D mismatch indicator
---
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands
```
---
## `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
```markdown
# DNS Leak Detector — Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
---
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- parse_dns.rs # plaintext DNS parser: qname/qtype
- attrib.rs # process attribution integration (platform provider)
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
---
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
- geoip: optional
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
---
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
ts: SystemTime,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
---
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
Important: you mostly need to detect encrypted resolver traffic bypassing the proxy channel,
not to fully prove DoH with payload inspection.
---
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
---
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
---
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
---
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
---
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output
```
---
# Small note about *where* it lives in your architecture
This design slots in with **minimal churn**:
* ✅ Add `crates/wtfnet-dnsleak`
* ✅ Add `dns leak ...` subcommands in CLI
* ✅ Add a **single** new platform trait for process attribution (best-effort)
* ✅ Reuse your existing `dns watch` capture approach as the sensor
…which is exactly what your design describes: passive monitoring + classification + rules + evidence output and the PAE “event enricher” location in the pipeline .
**If it's too hard to detect DoH traffic, skip it.**
---

View File

@@ -21,11 +21,18 @@ This document tracks the planned roadmap alongside the current implementation st
- diag: bundle export (zip) - diag: bundle export (zip)
### v0.3 (future upgrades) ### v0.3 (future upgrades)
- richer trace output (reverse lookup, per-hop loss) - richer trace output (reverse lookup, per-hop loss, per-hop stats)
- TLS extras: OCSP stapling indicator, more chain parsing - HTTP timing accuracy (connect/tls)
- TLS extras: OCSP stapling indicator, richer cert parsing
- ports conns improvements (top talkers / summary) - ports conns improvements (top talkers / summary)
- better baseline/diff for system roots - better baseline/diff for system roots
- smarter "diagnose <domain>" workflow mode - optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (dns leak detection)
- dns leak detector (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage ## Current stage
@@ -54,14 +61,33 @@ This document tracks the planned roadmap alongside the current implementation st
- `wtfnet-dns` crate with query/detect support wired to CLI. - `wtfnet-dns` crate with query/detect support wired to CLI.
- DNS query/detect output includes GeoIP enrichment for server and answer IPs. - DNS query/detect output includes GeoIP enrichment for server and answer IPs.
- DNS query/detect supports DoT and DoH transports. - DNS query/detect supports DoT and DoH transports.
- DNS query/detect supports SOCKS5 proxying for DoH. - DNS query/detect supports SOCKS5 proxying for DoH/DoT.
- DNS watch (passive, best-effort) implemented. - DNS watch (passive, best-effort) implemented.
- Calc subcrate with subnet/contains/overlap/summarize wired to CLI. - Calc subcrate with subnet/contains/overlap/summarize wired to CLI.
- CMake/Makefile build, install, and package targets for release packaging. - CMake/Makefile build, install, and package targets for release packaging.
- HTTP crate with head/get support, timing breakdown, optional GeoIP, and SOCKS5 proxy.
- TLS crate with handshake/verify/cert/alpn support in CLI (SOCKS5 proxy supported).
- TCP ping supports SOCKS5 proxy.
- v0.3: probe trace per-hop stats + rdns support.
- v0.3: http connect/tls timing best-effort with warnings.
- v0.3: ports conns (active TCP connections + summaries).
- v0.3: TLS extras (OCSP flag + richer cert parsing).
- v0.3: cert baseline/diff improvements.
- v0.3: HTTP/3 request path (feature-gated; experimental, incomplete).
- v0.3: HTTP/3 error classification (feature-gated).
- v0.4: platform flow-owner lookup (best-effort).
- v0.4: dns leak detector crate + CLI commands (status/watch/report).
- Discover crate with mdns/ssdp commands.
- Discover llmnr/nbns (best-effort).
- Diag crate with report and bundle export.
- Basic unit tests for calc and TLS parsing.
### In progress ### In progress
- v0.2 features: http, tls, discover, diag. - v0.4: DoH heuristic classification (optional).
- v0.4: Leak-D mismatch correlation (optional).
- v0.3: optional HTTP/3 (feature-gated; keep disabled until stabilized).
### Next ### Next
- Complete remaining v0.2 crates/commands (http/tls/discover/diag/dns watch). - Update docs/README/COMMANDS for v0.4.
- Add v0.2 tests (dns detect, calc, basic http/tls smoke). - Add v0.2 tests (dns detect, basic http/tls smoke).
- Track DNS leak design status in `docs/dns_leak_implementation_status.md`.

13
justfile Normal file
View File

@@ -0,0 +1,13 @@
# justfile (cross-platform, no bash)
python := env_var_or_default("PYTHON", if os() == "windows" { "python" } else { "python3" })
dist_dir := "dist"
stage_root := "target/release-package"
default:
@just --list
release bin='' target='':
{{python}} scripts/release_meta.py --bin "{{bin}}" --target "{{target}}" --dist-dir "{{dist_dir}}" --stage-root "{{stage_root}}"
clean-dist:
{{python}} -c "import shutil; shutil.rmtree('dist', ignore_errors=True); shutil.rmtree('target/release-package', ignore_errors=True)"

175
scripts/release_meta.py Normal file
View File

@@ -0,0 +1,175 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import os
import platform
import shutil
import subprocess
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Any
def run(cmd: list[str], *, capture: bool = False) -> str:
if capture:
return subprocess.check_output(cmd, text=True).strip()
subprocess.check_call(cmd)
return ""
def cargo_metadata() -> dict[str, Any]:
out = run(["cargo", "metadata", "--no-deps", "--format-version", "1"], capture=True)
return json.loads(out)
def rustc_host_triple() -> str:
v = run(["rustc", "-vV"], capture=True)
for line in v.splitlines():
if line.startswith("host: "):
return line.split("host: ", 1)[1].strip()
raise RuntimeError("Could not determine host target triple from `rustc -vV`")
def is_windows_host() -> bool:
# Works for normal Windows Python and most MSYS/Cygwin Pythons too.
sp = sys.platform.lower()
ps = platform.system().lower()
return (
os.name == "nt"
or sp.startswith("win")
or sp.startswith("cygwin")
or sp.startswith("msys")
or "windows" in ps
or "cygwin" in ps
or "msys" in ps
)
def exe_suffix_for_target(target_triple: str) -> str:
return ".exe" if "windows" in target_triple else ""
def find_bin_targets(meta: dict[str, Any]) -> list[tuple[str, str, str]]:
bins: list[tuple[str, str, str]] = []
for p in meta.get("packages", []):
for t in p.get("targets", []):
if "bin" in t.get("kind", []):
bins.append((p["name"], p["version"], t["name"]))
bins.sort(key=lambda x: (x[0], x[2], x[1])) # stable deterministic choice
return bins
def find_owner_package_for_bin(meta: dict[str, Any], bin_name: str) -> tuple[str, str]:
for p in meta.get("packages", []):
for t in p.get("targets", []):
if t.get("name") == bin_name and "bin" in t.get("kind", []):
return p["name"], p["version"]
raise RuntimeError(f"Could not find a package providing bin '{bin_name}'")
def stage_and_archive(
*,
pkg_name: str,
pkg_version: str,
bin_path: Path,
data_dir: Path,
dist_dir: Path,
stage_root: Path,
target_triple_for_name: str,
) -> Path:
pkg_base = f"{pkg_name}-v{pkg_version}-{target_triple_for_name}"
stage_dir = stage_root / pkg_base
stage_data_dir = stage_dir / "data"
if stage_root.exists():
shutil.rmtree(stage_root)
stage_data_dir.mkdir(parents=True, exist_ok=True)
dist_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(bin_path, stage_dir / bin_path.name)
mmdbs = sorted(data_dir.glob("*.mmdb")) if data_dir.exists() else []
if mmdbs:
for f in mmdbs:
shutil.copy2(f, stage_data_dir / f.name)
else:
print("WARN: no ./data/*.mmdb found; packaging binary only.", file=sys.stderr)
if is_windows_host():
out = dist_dir / f"{pkg_base}.zip"
with zipfile.ZipFile(out, "w", compression=zipfile.ZIP_DEFLATED) as z:
for p in stage_dir.rglob("*"):
if p.is_file():
z.write(p, arcname=str(Path(pkg_base) / p.relative_to(stage_dir)))
return out
else:
out = dist_dir / f"{pkg_base}.tar.gz"
with tarfile.open(out, "w:gz") as tf:
tf.add(stage_dir, arcname=pkg_base)
return out
def main() -> int:
ap = argparse.ArgumentParser(description="Build and package Rust binary + data/*.mmdb")
ap.add_argument("--bin", default="", help="Binary target name (optional)")
ap.add_argument("--target", default="", help="Cargo target triple (optional)")
ap.add_argument("--dist-dir", default="dist", help="Output directory for archives")
ap.add_argument("--stage-root", default="target/release-package", help="Staging directory root")
ap.add_argument("--data-dir", default="data", help="Directory containing .mmdb files")
args = ap.parse_args()
meta = cargo_metadata()
bins = find_bin_targets(meta)
if not bins:
print("ERROR: no binary targets found in workspace.", file=sys.stderr)
return 2
bin_name = args.bin.strip()
if not bin_name:
_, _, bin_name = bins[0]
print(f"INFO: --bin not provided; defaulting to '{bin_name}'", file=sys.stderr)
pkg_name, pkg_version = find_owner_package_for_bin(meta, bin_name)
host_triple = rustc_host_triple()
target_triple_for_name = args.target.strip() or host_triple
# Build only the owning package
build_cmd = ["cargo", "build", "-p", pkg_name, "--release"]
if args.target.strip():
build_cmd += ["--target", args.target.strip()]
run(build_cmd)
# Locate binary
exe_suffix = exe_suffix_for_target(target_triple_for_name)
bin_dir = Path("target") / (args.target.strip() if args.target.strip() else "release") / "release" \
if args.target.strip() else Path("target") / "release"
if args.target.strip():
bin_dir = Path("target") / args.target.strip() / "release"
bin_path = bin_dir / f"{bin_name}{exe_suffix}"
if not bin_path.exists():
print(f"ERROR: built binary not found: {bin_path}", file=sys.stderr)
print("Hint: pass the correct bin target name: just release bin=<name>", file=sys.stderr)
return 3
out = stage_and_archive(
pkg_name=pkg_name,
pkg_version=pkg_version,
bin_path=bin_path,
data_dir=Path(args.data_dir),
dist_dir=Path(args.dist_dir),
stage_root=Path(args.stage_root),
target_triple_for_name=target_triple_for_name,
)
print(f"Created: {out}")
return 0
if __name__ == "__main__":
raise SystemExit(main())