Add: dns leak detection

This commit is contained in:
DaZuo0122
2026-01-17 18:45:24 +08:00
parent ccd4a31d21
commit cfa96bde08
30 changed files with 3973 additions and 16 deletions

19
Cargo.lock generated
View File

@@ -1059,6 +1059,9 @@ name = "ipnet"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
dependencies = [
"serde",
]
[[package]]
name = "ipnetwork"
@@ -3234,6 +3237,7 @@ dependencies = [
"wtfnet-diag",
"wtfnet-discover",
"wtfnet-dns",
"wtfnet-dnsleak",
"wtfnet-geoip",
"wtfnet-http",
"wtfnet-platform",
@@ -3271,6 +3275,7 @@ dependencies = [
name = "wtfnet-discover"
version = "0.1.0"
dependencies = [
"hickory-proto",
"mdns-sd",
"serde",
"thiserror 2.0.17",
@@ -3296,6 +3301,20 @@ dependencies = [
"url",
]
[[package]]
name = "wtfnet-dnsleak"
version = "0.1.0"
dependencies = [
"hickory-proto",
"ipnet",
"pnet",
"serde",
"thiserror 2.0.17",
"tokio",
"tracing",
"wtfnet-platform",
]
[[package]]
name = "wtfnet-geoip"
version = "0.1.0"

View File

@@ -10,6 +10,7 @@ members = [
"crates/wtfnet-geoip",
"crates/wtfnet-probe",
"crates/wtfnet-dns",
"crates/wtfnet-dnsleak",
"crates/wtfnet-http",
"crates/wtfnet-tls",
"crates/wtfnet-discover",

View File

@@ -7,8 +7,10 @@ WTFnet is a pure CLI toolbox for diagnosing network problems on Linux and Window
- Ports, neighbors, and trusted root certificates.
- Probing: ping, tcping, traceroute (best-effort).
- DNS: query/detect/watch with GeoIP, DoT/DoH, and SOCKS5 support.
- DNS leak detection with policy profiles and privacy modes (best-effort).
- GeoIP offline lookup via GeoLite2 Country/ASN.
- Subnet calculator: subnet/contains/overlap/summarize.
- Discover: mDNS/SSDP plus LLMNR/NBNS.
## Quickstart
```bash
@@ -44,6 +46,9 @@ wtfn dns query example.com A --transport doh --server 1.1.1.1 --tls-name cloudfl
wtfn dns query example.com A --transport dot --server 1.1.1.1 --tls-name cloudflare-dns.com --socks5 socks5://127.0.0.1:9909
wtfn dns detect example.com --transport doh --servers 1.1.1.1 --tls-name cloudflare-dns.com
wtfn dns watch --duration 10s --filter example.com
wtfn dns leak status
wtfn dns leak watch --duration 10s --profile proxy-stub
wtfn dns leak report report.json
# TLS
wtfn tls handshake example.com:443
@@ -55,6 +60,8 @@ wtfn tls alpn example.com:443 --alpn h2,http/1.1
# Discover
wtfn discover mdns --duration 3s
wtfn discover ssdp --duration 3s
wtfn discover llmnr --duration 3s
wtfn discover nbns --duration 3s
# Diag
wtfn diag --out report.json --json
@@ -90,10 +97,16 @@ Command flags (implemented):
- `dns query`: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect`: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch`: `--duration <Ns|Nms>`, `--iface <name>`, `--filter <pattern>`
- `dns leak status`: `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`
- `dns leak watch`: `--duration <Ns|Nms>`, `--iface <name>`, `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`, `--privacy <full|redacted|minimal>`, `--out <path>`, `--summary-only`
- `dns leak watch`: `--iface-diag` (prints capture-capable interfaces)
- `dns leak report`: `<path>`, `--privacy <full|redacted|minimal>`
- `http head|get`: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--http3` (feature `http3`), `--http3-only` (feature `http3`), `--geoip`, `--socks5 <url>`
- `tls handshake|cert|verify|alpn`: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`, `--show-extensions`, `--ocsp`
- `discover mdns`: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp`: `--duration <Ns|Nms>`
- `discover llmnr`: `--duration <Ns|Nms>`, `--name <host>`
- `discover nbns`: `--duration <Ns|Nms>`
- `diag`: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## GeoIP data files
@@ -149,6 +162,11 @@ cargo run -p wtfnet-cli --features wtfnet-http/http3 -- http head https://cloudf
- optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (current requirements)
- dns leak detection (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage
Implemented:
- Core CLI with JSON output and logging.
@@ -157,14 +175,16 @@ Implemented:
- http head/get with timing and GeoIP.
- tls handshake/verify/cert/alpn.
- DoT/DoH + SOCKS5 proxy support.
- discover mdns/ssdp.
- discover mdns/ssdp/llmnr/nbns.
- dns leak detection (status/watch/report).
- diag report + bundle.
- calc subcrate with subnet/contains/overlap/summarize.
- CMake/Makefile build + package + install targets.
- Basic unit tests for calc and TLS parsing.
In progress:
- none.
- dns leak: DoH heuristic classification (optional).
- dns leak: Leak-D mismatch correlation (optional).
See `docs/implementation_status.md` for a design-vs-implementation view.

View File

@@ -19,6 +19,7 @@ wtfnet-geoip = { path = "../wtfnet-geoip" }
wtfnet-platform = { path = "../wtfnet-platform" }
wtfnet-probe = { path = "../wtfnet-probe" }
wtfnet-dns = { path = "../wtfnet-dns", features = ["pcap"] }
wtfnet-dnsleak = { path = "../wtfnet-dnsleak", features = ["pcap"] }
wtfnet-http = { path = "../wtfnet-http" }
wtfnet-tls = { path = "../wtfnet-tls" }
wtfnet-discover = { path = "../wtfnet-discover" }

View File

@@ -130,6 +130,17 @@ enum DnsCommand {
Query(DnsQueryArgs),
Detect(DnsDetectArgs),
Watch(DnsWatchArgs),
Leak {
#[command(subcommand)]
command: DnsLeakCommand,
},
}
#[derive(Subcommand, Debug)]
enum DnsLeakCommand {
Status(DnsLeakStatusArgs),
Watch(DnsLeakWatchArgs),
Report(DnsLeakReportArgs),
}
#[derive(Subcommand, Debug)]
@@ -158,6 +169,8 @@ enum TlsCommand {
enum DiscoverCommand {
Mdns(DiscoverMdnsArgs),
Ssdp(DiscoverSsdpArgs),
Llmnr(DiscoverLlmnrArgs),
Nbns(DiscoverNbnsArgs),
}
#[derive(Parser, Debug, Clone)]
@@ -320,6 +333,41 @@ struct DnsWatchArgs {
filter: Option<String>,
}
#[derive(Parser, Debug, Clone)]
struct DnsLeakStatusArgs {
#[arg(long)]
profile: Option<String>,
#[arg(long)]
policy: Option<PathBuf>,
}
#[derive(Parser, Debug, Clone)]
struct DnsLeakWatchArgs {
#[arg(long, default_value = "10s")]
duration: String,
#[arg(long)]
iface: Option<String>,
#[arg(long)]
profile: Option<String>,
#[arg(long)]
policy: Option<PathBuf>,
#[arg(long, default_value = "redacted")]
privacy: String,
#[arg(long)]
out: Option<PathBuf>,
#[arg(long)]
summary_only: bool,
#[arg(long)]
iface_diag: bool,
}
#[derive(Parser, Debug, Clone)]
struct DnsLeakReportArgs {
path: PathBuf,
#[arg(long, default_value = "redacted")]
privacy: String,
}
#[derive(Parser, Debug, Clone)]
struct CalcSubnetArgs {
input: Vec<String>,
@@ -404,6 +452,20 @@ struct DiscoverSsdpArgs {
duration: String,
}
#[derive(Parser, Debug, Clone)]
struct DiscoverLlmnrArgs {
#[arg(long, default_value = "3s")]
duration: String,
#[arg(long)]
name: Option<String>,
}
#[derive(Parser, Debug, Clone)]
struct DiscoverNbnsArgs {
#[arg(long, default_value = "3s")]
duration: String,
}
#[derive(Parser, Debug, Clone)]
struct DiagArgs {
#[arg(long)]
@@ -550,6 +612,13 @@ async fn main() {
Commands::Dns {
command: DnsCommand::Watch(args),
} => handle_dns_watch(&cli, args.clone()).await,
Commands::Dns {
command: DnsCommand::Leak { command },
} => match command {
DnsLeakCommand::Status(args) => handle_dns_leak_status(&cli, args.clone()).await,
DnsLeakCommand::Watch(args) => handle_dns_leak_watch(&cli, args.clone()).await,
DnsLeakCommand::Report(args) => handle_dns_leak_report(&cli, args.clone()).await,
},
Commands::Calc {
command: CalcCommand::Subnet(args),
} => handle_calc_subnet(&cli, args.clone()).await,
@@ -586,6 +655,12 @@ async fn main() {
Commands::Discover {
command: DiscoverCommand::Ssdp(args),
} => handle_discover_ssdp(&cli, args.clone()).await,
Commands::Discover {
command: DiscoverCommand::Llmnr(args),
} => handle_discover_llmnr(&cli, args.clone()).await,
Commands::Discover {
command: DiscoverCommand::Nbns(args),
} => handle_discover_nbns(&cli, args.clone()).await,
Commands::Diag(args) => handle_diag(&cli, args.clone()).await,
};
@@ -1926,6 +2001,242 @@ async fn handle_dns_watch(cli: &Cli, args: DnsWatchArgs) -> i32 {
}
}
#[derive(Debug, Serialize)]
struct DnsLeakStatusReport {
pub policy: wtfnet_dnsleak::PolicySummary,
pub interfaces: Vec<wtfnet_platform::NetInterface>,
pub routes: Vec<wtfnet_platform::RouteEntry>,
pub dns: wtfnet_platform::DnsConfigSnapshot,
}
async fn handle_dns_leak_status(cli: &Cli, args: DnsLeakStatusArgs) -> i32 {
let platform = platform();
let interfaces = match platform.sys.interfaces().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let routes = match platform.sys.routes().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let dns = match platform.sys.dns_config().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let policy = match resolve_leak_policy(&args.profile, args.policy.as_ref(), &interfaces) {
Ok(policy) => policy,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let report = DnsLeakStatusReport {
policy: policy.summary(),
interfaces,
routes,
dns,
};
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = Vec::new();
if let Some(profile) = args.profile {
command_args.push("--profile".to_string());
command_args.push(profile);
}
if let Some(policy_path) = args.policy {
command_args.push("--policy".to_string());
command_args.push(policy_path.display().to_string());
}
let command = CommandInfo::new("dns leak status", command_args);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
println!("policy: {:?}", report.policy.profile);
if !report.policy.allowed_ifaces.is_empty() {
println!("allowed ifaces: {}", report.policy.allowed_ifaces.join(", "));
}
if !report.policy.allowed_destinations.is_empty() {
println!(
"allowed destinations: {}",
report.policy.allowed_destinations.join(", ")
);
}
if !report.policy.allowed_ports.is_empty() {
println!(
"allowed ports: {}",
report
.policy
.allowed_ports
.iter()
.map(|port| port.to_string())
.collect::<Vec<_>>()
.join(", ")
);
}
println!("interfaces: {}", report.interfaces.len());
println!("routes: {}", report.routes.len());
println!("dns servers: {}", report.dns.servers.join(", "));
ExitKind::Ok.code()
}
}
async fn handle_dns_leak_watch(cli: &Cli, args: DnsLeakWatchArgs) -> i32 {
if args.iface_diag {
return handle_dns_leak_iface_diag(cli).await;
}
let duration_ms = match parse_duration_ms(&args.duration) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let privacy = match parse_leak_privacy(&args.privacy) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let platform = platform();
let interfaces = match platform.sys.interfaces().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let policy = match resolve_leak_policy(&args.profile, args.policy.as_ref(), &interfaces) {
Ok(policy) => policy,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let options = wtfnet_dnsleak::LeakWatchOptions {
duration_ms,
iface: args.iface.clone(),
policy,
privacy,
include_events: !args.summary_only,
};
let report = match wtfnet_dnsleak::watch(options, Some(&*platform.flow_owner)).await {
Ok(report) => report,
Err(err) => {
eprintln!("dns leak watch failed: {err}");
return ExitKind::Failed.code();
}
};
if let Some(path) = args.out.as_ref() {
if let Ok(payload) = serde_json::to_string_pretty(&report) {
if let Err(err) = std::fs::write(path, payload) {
eprintln!("failed to write report: {err}");
return ExitKind::Failed.code();
}
}
}
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = vec!["--duration".to_string(), args.duration];
if let Some(iface) = args.iface {
command_args.push("--iface".to_string());
command_args.push(iface);
}
if let Some(profile) = args.profile {
command_args.push("--profile".to_string());
command_args.push(profile);
}
if let Some(policy_path) = args.policy {
command_args.push("--policy".to_string());
command_args.push(policy_path.display().to_string());
}
if let Some(out) = args.out {
command_args.push("--out".to_string());
command_args.push(out.display().to_string());
}
if args.summary_only {
command_args.push("--summary-only".to_string());
}
command_args.push("--privacy".to_string());
command_args.push(args.privacy);
let command = CommandInfo::new("dns leak watch", command_args);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
print_leak_summary(&report);
if !report.events.is_empty() {
for event in report.events {
println!(
"[{:?}] {:?} {}:{} via {:?}",
event.severity, event.leak_type, event.dst_ip, event.dst_port, event.route_class
);
if let Some(qname) = event.qname.as_ref() {
println!(" qname: {}", qname);
}
if let Some(process) = event.process_name.as_ref() {
println!(" process: {}", process);
}
}
}
ExitKind::Ok.code()
}
}
async fn handle_dns_leak_iface_diag(_cli: &Cli) -> i32 {
match wtfnet_dnsleak::iface_diagnostics() {
Ok(entries) => {
for entry in entries {
println!("iface: {} open: {} {}", entry.name, entry.open_ok, entry.error);
}
ExitKind::Ok.code()
}
Err(err) => {
eprintln!("iface diag failed: {err}");
ExitKind::Failed.code()
}
}
}
async fn handle_dns_leak_report(cli: &Cli, args: DnsLeakReportArgs) -> i32 {
let privacy = match parse_leak_privacy(&args.privacy) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let payload = match std::fs::read_to_string(&args.path) {
Ok(value) => value,
Err(err) => {
eprintln!("failed to read report: {err}");
return ExitKind::Failed.code();
}
};
let mut report: wtfnet_dnsleak::LeakReport = match serde_json::from_str(&payload) {
Ok(value) => value,
Err(err) => {
eprintln!("failed to parse report: {err}");
return ExitKind::Failed.code();
}
};
for event in report.events.iter_mut() {
wtfnet_dnsleak::apply_privacy(event, privacy);
}
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let command = CommandInfo::new("dns leak report", vec![args.path.display().to_string()]);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
print_leak_summary(&report);
ExitKind::Ok.code()
}
}
async fn handle_calc_subnet(cli: &Cli, args: CalcSubnetArgs) -> i32 {
let input = match normalize_subnet_input(&args.input) {
Ok(value) => value,
@@ -2335,6 +2646,81 @@ fn parse_alpn(value: Option<&str>) -> Vec<String> {
.collect()
}
fn parse_leak_privacy(value: &str) -> Result<wtfnet_dnsleak::PrivacyMode, String> {
match value.to_ascii_lowercase().as_str() {
"full" => Ok(wtfnet_dnsleak::PrivacyMode::Full),
"redacted" => Ok(wtfnet_dnsleak::PrivacyMode::Redacted),
"minimal" => Ok(wtfnet_dnsleak::PrivacyMode::Minimal),
_ => Err(format!("invalid privacy mode: {value}")),
}
}
fn parse_leak_profile(
value: Option<&str>,
) -> Result<wtfnet_dnsleak::LeakPolicyProfile, String> {
let value = value.unwrap_or("proxy-stub");
match value.to_ascii_lowercase().as_str() {
"full-tunnel" => Ok(wtfnet_dnsleak::LeakPolicyProfile::FullTunnel),
"proxy-stub" => Ok(wtfnet_dnsleak::LeakPolicyProfile::ProxyStub),
"split" => Ok(wtfnet_dnsleak::LeakPolicyProfile::Split),
_ => Err(format!("invalid profile: {value}")),
}
}
fn resolve_leak_policy(
profile: &Option<String>,
policy_path: Option<&PathBuf>,
interfaces: &[wtfnet_platform::NetInterface],
) -> Result<wtfnet_dnsleak::LeakPolicy, String> {
if let Some(path) = policy_path {
let payload = std::fs::read_to_string(path)
.map_err(|err| format!("failed to read policy: {err}"))?;
let policy: wtfnet_dnsleak::LeakPolicy = serde_json::from_str(&payload)
.map_err(|err| format!("failed to parse policy: {err}"))?;
return Ok(policy);
}
let profile = parse_leak_profile(profile.as_deref())?;
let iface_names = interfaces.iter().map(|iface| iface.name.clone()).collect::<Vec<_>>();
Ok(wtfnet_dnsleak::LeakPolicy::from_profile(
profile,
&iface_names,
))
}
fn print_leak_summary(report: &wtfnet_dnsleak::LeakReport) {
println!("leaks: {}", report.summary.total);
if !report.summary.by_type.is_empty() {
let items = report
.summary
.by_type
.iter()
.map(|entry| format!("{:?}={}", entry.leak_type, entry.count))
.collect::<Vec<_>>()
.join(", ");
println!("by type: {items}");
}
if !report.summary.top_processes.is_empty() {
let items = report
.summary
.top_processes
.iter()
.map(|entry| format!("{}={}", entry.key, entry.count))
.collect::<Vec<_>>()
.join(", ");
println!("top processes: {items}");
}
if !report.summary.top_destinations.is_empty() {
let items = report
.summary
.top_destinations
.iter()
.map(|entry| format!("{}={}", entry.key, entry.count))
.collect::<Vec<_>>()
.join(", ");
println!("top destinations: {items}");
}
}
fn emit_tls_report<T: serde::Serialize>(cli: &Cli, name: &str, report: T) -> i32 {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
@@ -2447,6 +2833,86 @@ async fn handle_discover_ssdp(cli: &Cli, args: DiscoverSsdpArgs) -> i32 {
}
}
async fn handle_discover_llmnr(cli: &Cli, args: DiscoverLlmnrArgs) -> i32 {
let duration_ms = match parse_duration_ms(&args.duration) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let options = wtfnet_discover::LlmnrOptions {
duration_ms,
name: args.name.clone(),
};
match wtfnet_discover::llmnr_discover(options).await {
Ok(report) => {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = vec!["--duration".to_string(), args.duration];
if let Some(name) = args.name {
command_args.push("--name".to_string());
command_args.push(name);
}
let command = CommandInfo::new("discover llmnr", command_args);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
println!("query: {}", report.name);
for answer in report.answers {
println!("from: {}", answer.from);
println!(" name: {}", answer.name);
println!(" type: {}", answer.record_type);
println!(" data: {}", answer.data);
println!(" ttl: {}", answer.ttl);
}
ExitKind::Ok.code()
}
}
Err(err) => {
eprintln!("llmnr discover failed: {err}");
ExitKind::Failed.code()
}
}
}
async fn handle_discover_nbns(cli: &Cli, args: DiscoverNbnsArgs) -> i32 {
let duration_ms = match parse_duration_ms(&args.duration) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let options = wtfnet_discover::NbnsOptions { duration_ms };
match wtfnet_discover::nbns_discover(options).await {
Ok(report) => {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let command = CommandInfo::new(
"discover nbns",
vec!["--duration".to_string(), args.duration],
);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
for node in report.nodes {
println!("from: {}", node.from);
if node.names.is_empty() {
continue;
}
println!(" names: {}", node.names.join(", "));
}
ExitKind::Ok.code()
}
}
Err(err) => {
eprintln!("nbns discover failed: {err}");
ExitKind::Failed.code()
}
}
}
async fn handle_diag(cli: &Cli, args: DiagArgs) -> i32 {
let options = wtfnet_diag::DiagOptions {
dns_detect_domain: args.dns_detect.clone(),

View File

@@ -4,6 +4,7 @@ version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
mdns-sd = "0.8"
serde = { version = "1", features = ["derive"] }
thiserror = "2"

View File

@@ -1,7 +1,9 @@
use hickory_proto::op::{Message, MessageType, Query};
use hickory_proto::rr::{Name, RData, RecordType};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
use std::net::{SocketAddr, UdpSocket};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::{Duration, Instant};
use thiserror::Error;
@@ -24,6 +26,17 @@ pub struct SsdpOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrOptions {
pub duration_ms: u64,
pub name: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdnsService {
pub service_type: String,
@@ -56,6 +69,34 @@ pub struct SsdpReport {
pub services: Vec<SsdpService>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrAnswer {
pub from: String,
pub name: String,
pub record_type: String,
pub data: String,
pub ttl: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrReport {
pub duration_ms: u64,
pub name: String,
pub answers: Vec<LlmnrAnswer>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsNodeStatus {
pub from: String,
pub names: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsReport {
pub duration_ms: u64,
pub nodes: Vec<NbnsNodeStatus>,
}
pub async fn mdns_discover(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || mdns_discover_blocking(options))
.await
@@ -68,6 +109,18 @@ pub async fn ssdp_discover(options: SsdpOptions) -> Result<SsdpReport, DiscoverE
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn llmnr_discover(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
tokio::task::spawn_blocking(move || llmnr_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn nbns_discover(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || nbns_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
fn mdns_discover_blocking(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
let daemon = ServiceDaemon::new().map_err(|err| DiscoverError::Mdns(err.to_string()))?;
let mut service_types = BTreeSet::new();
@@ -174,6 +227,94 @@ fn ssdp_discover_blocking(options: SsdpOptions) -> Result<SsdpReport, DiscoverEr
})
}
fn llmnr_discover_blocking(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let name = options
.name
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| "wpad".to_string());
let query = build_llmnr_query(&name)
.map_err(|err| DiscoverError::Io(format!("llmnr build query: {err}")))?;
let target = "224.0.0.252:5355";
let _ = socket.send_to(&query, target);
let mut answers = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(entries) = parse_llmnr_response(&buf[..len], from.ip()) {
for entry in entries {
let key = format!(
"{}|{}|{}|{}",
entry.from, entry.name, entry.record_type, entry.data
);
if seen.insert(key) {
answers.push(entry);
}
}
}
}
Err(_) => continue,
}
}
Ok(LlmnrReport {
duration_ms: options.duration_ms,
name,
answers,
})
}
fn nbns_discover_blocking(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_broadcast(true)
.map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let query = build_nbns_node_status_query();
let _ = socket.send_to(&query, "255.255.255.255:137");
let mut nodes = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(names) = parse_nbns_node_status(&buf[..len]) {
let key = format!("{}|{}", from.ip(), names.join(","));
if seen.insert(key) {
nodes.push(NbnsNodeStatus {
from: from.ip().to_string(),
names,
});
}
}
}
Err(_) => continue,
}
}
Ok(NbnsReport {
duration_ms: options.duration_ms,
nodes,
})
}
fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
let mut st = None;
let mut usn = None;
@@ -207,3 +348,183 @@ fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
server,
})
}
fn build_llmnr_query(name: &str) -> Result<Vec<u8>, String> {
let name = Name::from_ascii(name).map_err(|err| format!("invalid name: {err}"))?;
let mut message = Message::new();
message
.set_id(0)
.set_message_type(MessageType::Query)
.set_recursion_desired(false)
.add_query(Query::query(name.clone(), RecordType::A))
.add_query(Query::query(name, RecordType::AAAA));
message.to_vec().map_err(|err| err.to_string())
}
fn parse_llmnr_response(payload: &[u8], from: IpAddr) -> Option<Vec<LlmnrAnswer>> {
let message = Message::from_vec(payload).ok()?;
if message.message_type() != MessageType::Response {
return None;
}
let mut answers = Vec::new();
for record in message.answers() {
let record_type = record.record_type();
let data = match record.data() {
Some(RData::A(addr)) => addr.to_string(),
Some(RData::AAAA(addr)) => addr.to_string(),
_ => continue,
};
answers.push(LlmnrAnswer {
from: from.to_string(),
name: record.name().to_string(),
record_type: record_type.to_string(),
data,
ttl: record.ttl(),
});
}
if answers.is_empty() {
None
} else {
Some(answers)
}
}
fn build_nbns_node_status_query() -> Vec<u8> {
let mut buf = Vec::with_capacity(50);
let id = nbns_query_id();
buf.extend_from_slice(&id.to_be_bytes());
buf.extend_from_slice(&0u16.to_be_bytes()); // flags
buf.extend_from_slice(&1u16.to_be_bytes()); // qdcount
buf.extend_from_slice(&0u16.to_be_bytes()); // ancount
buf.extend_from_slice(&0u16.to_be_bytes()); // nscount
buf.extend_from_slice(&0u16.to_be_bytes()); // arcount
buf.extend_from_slice(&nbns_encode_name("*", 0x00));
buf.extend_from_slice(&0x0021u16.to_be_bytes()); // NBSTAT
buf.extend_from_slice(&0x0001u16.to_be_bytes()); // IN
buf
}
fn nbns_query_id() -> u16 {
let nanos = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.subsec_nanos();
(nanos & 0xffff) as u16
}
fn nbns_encode_name(name: &str, suffix: u8) -> Vec<u8> {
let mut raw = [b' '; 16];
let mut bytes = name.as_bytes().to_vec();
for byte in bytes.iter_mut() {
byte.make_ascii_uppercase();
}
for (idx, byte) in bytes.iter().take(15).enumerate() {
raw[idx] = *byte;
}
raw[15] = suffix;
let mut encoded = Vec::with_capacity(34);
encoded.push(32);
for byte in raw {
let high = ((byte >> 4) & 0x0f) + b'A';
let low = (byte & 0x0f) + b'A';
encoded.push(high);
encoded.push(low);
}
encoded.push(0);
encoded
}
fn parse_nbns_node_status(payload: &[u8]) -> Option<Vec<String>> {
if payload.len() < 12 {
return None;
}
let flags = u16::from_be_bytes([payload[2], payload[3]]);
if flags & 0x8000 == 0 {
return None;
}
let qdcount = u16::from_be_bytes([payload[4], payload[5]]) as usize;
let ancount = u16::from_be_bytes([payload[6], payload[7]]) as usize;
let mut offset = 12;
for _ in 0..qdcount {
offset = skip_dns_name(payload, offset)?;
if offset + 4 > payload.len() {
return None;
}
offset += 4;
}
let mut names = Vec::new();
for _ in 0..ancount {
offset = skip_dns_name(payload, offset)?;
if offset + 10 > payload.len() {
return None;
}
let rr_type = u16::from_be_bytes([payload[offset], payload[offset + 1]]);
let _rr_class = u16::from_be_bytes([payload[offset + 2], payload[offset + 3]]);
let _ttl = u32::from_be_bytes([
payload[offset + 4],
payload[offset + 5],
payload[offset + 6],
payload[offset + 7],
]);
let rdlength = u16::from_be_bytes([payload[offset + 8], payload[offset + 9]]) as usize;
offset += 10;
if offset + rdlength > payload.len() {
return None;
}
if rr_type == 0x0021 && rdlength > 0 {
if let Some(list) = parse_nbns_name_list(&payload[offset..offset + rdlength]) {
names.extend(list);
}
}
offset += rdlength;
}
if names.is_empty() {
None
} else {
Some(names)
}
}
fn parse_nbns_name_list(payload: &[u8]) -> Option<Vec<String>> {
let count = *payload.first()? as usize;
let mut offset = 1;
let mut names = Vec::new();
for _ in 0..count {
if offset + 18 > payload.len() {
return None;
}
let name_bytes = &payload[offset..offset + 15];
let suffix = payload[offset + 15];
let name = String::from_utf8_lossy(name_bytes)
.trim_end()
.to_string();
names.push(format!("{name}<{suffix:02x}>"));
offset += 18;
}
Some(names)
}
fn skip_dns_name(payload: &[u8], mut offset: usize) -> Option<usize> {
if offset >= payload.len() {
return None;
}
loop {
let len = *payload.get(offset)?;
if len & 0xc0 == 0xc0 {
if offset + 1 >= payload.len() {
return None;
}
return Some(offset + 2);
}
if len == 0 {
return Some(offset + 1);
}
offset += 1 + len as usize;
if offset >= payload.len() {
return None;
}
}
}

View File

@@ -0,0 +1,17 @@
[package]
name = "wtfnet-dnsleak"
version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
ipnet = { version = "2", features = ["serde"] }
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["rt"] }
tracing = "0.1"
wtfnet-platform = { path = "../wtfnet-platform" }
pnet = { version = "0.34", optional = true }
[features]
pcap = ["dep:pnet"]

View File

@@ -0,0 +1,32 @@
use crate::report::LeakTransport;
use hickory_proto::op::{Message, MessageType};
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use wtfnet_platform::FlowProtocol;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassifiedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
}
pub fn classify_dns_query(payload: &[u8]) -> Option<(String, String, String)> {
let message = Message::from_vec(payload).ok()?;
if message.message_type() != MessageType::Query {
return None;
}
let query = message.queries().first()?;
let qname = query.name().to_utf8();
let qtype = query.query_type().to_string();
let rcode = message.response_code().to_string();
Some((qname, qtype, rcode))
}

View File

@@ -0,0 +1,102 @@
mod classify;
mod policy;
mod privacy;
mod report;
mod route;
mod rules;
mod sensor;
use crate::classify::ClassifiedEvent;
use crate::sensor::capture_events;
use std::time::Instant;
use thiserror::Error;
use tracing::debug;
use wtfnet_platform::{FlowOwnerProvider, FlowTuple};
pub use crate::policy::{LeakPolicy, LeakPolicyProfile, PolicySummary};
pub use crate::privacy::{apply_privacy, PrivacyMode};
pub use crate::report::{LeakEvent, LeakReport, LeakSummary, LeakTransport, RouteClass, Severity};
pub use crate::sensor::{iface_diagnostics, IfaceDiag};
#[derive(Debug, Error)]
pub enum DnsLeakError {
#[error("not supported: {0}")]
NotSupported(String),
#[error("io error: {0}")]
Io(String),
#[error("policy error: {0}")]
Policy(String),
}
#[derive(Debug, Clone)]
pub struct LeakWatchOptions {
pub duration_ms: u64,
pub iface: Option<String>,
pub policy: LeakPolicy,
pub privacy: PrivacyMode,
pub include_events: bool,
}
pub async fn watch(
options: LeakWatchOptions,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> Result<LeakReport, DnsLeakError> {
debug!(
duration_ms = options.duration_ms,
iface = ?options.iface,
include_events = options.include_events,
"dns leak watch start"
);
let start = Instant::now();
let events = capture_events(&options).await?;
let mut leak_events = Vec::new();
for event in events {
let enriched = enrich_event(event, flow_owner).await;
if let Some(decision) = rules::evaluate(&enriched, &options.policy) {
let mut leak_event = report::LeakEvent::from_decision(enriched, decision);
privacy::apply_privacy(&mut leak_event, options.privacy);
leak_events.push(leak_event);
}
}
let summary = LeakSummary::from_events(&leak_events);
let report = LeakReport {
duration_ms: start.elapsed().as_millis() as u64,
policy: options.policy.summary(),
summary,
events: if options.include_events {
leak_events
} else {
Vec::new()
},
};
Ok(report)
}
async fn enrich_event(
event: ClassifiedEvent,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> report::EnrichedEvent {
let mut enriched = route::enrich_route(event);
if let Some(provider) = flow_owner {
let flow = FlowTuple {
proto: enriched.proto,
src_ip: enriched.src_ip,
src_port: enriched.src_port,
dst_ip: enriched.dst_ip,
dst_port: enriched.dst_port,
};
match provider.owner_of(flow).await {
Ok(result) => {
enriched.owner = result.owner;
enriched.owner_confidence = result.confidence;
enriched.owner_failure = result.failure_reason;
}
Err(err) => {
enriched.owner_failure = Some(err.message);
}
}
}
enriched
}

View File

@@ -0,0 +1,113 @@
use ipnet::IpNet;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum LeakPolicyProfile {
FullTunnel,
ProxyStub,
Split,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakPolicy {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub loopback_ifaces: Vec<String>,
pub allowed_destinations: Vec<IpNet>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
pub proxy_required_domains: Vec<String>,
pub allowlist_domains: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PolicySummary {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub allowed_destinations: Vec<String>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
}
impl LeakPolicy {
pub fn from_profile(profile: LeakPolicyProfile, ifaces: &[String]) -> Self {
let loopback_ifaces = detect_loopback_ifaces(ifaces);
let tunnel_ifaces = detect_tunnel_ifaces(ifaces);
let allowed_ifaces = match profile {
LeakPolicyProfile::FullTunnel | LeakPolicyProfile::ProxyStub => {
merge_lists(&loopback_ifaces, &tunnel_ifaces)
}
LeakPolicyProfile::Split => merge_lists(&loopback_ifaces, &tunnel_ifaces),
};
LeakPolicy {
profile,
allowed_ifaces,
tunnel_ifaces,
loopback_ifaces,
allowed_destinations: Vec::new(),
allowed_ports: Vec::new(),
allowed_processes: Vec::new(),
proxy_required_domains: Vec::new(),
allowlist_domains: Vec::new(),
}
}
pub fn summary(&self) -> PolicySummary {
PolicySummary {
profile: self.profile,
allowed_ifaces: self.allowed_ifaces.clone(),
tunnel_ifaces: self.tunnel_ifaces.clone(),
allowed_destinations: self
.allowed_destinations
.iter()
.map(|net| net.to_string())
.collect(),
allowed_ports: self.allowed_ports.clone(),
allowed_processes: self.allowed_processes.clone(),
}
}
}
fn detect_loopback_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name == "lo"
|| name == "lo0"
|| name.contains("loopback")
|| name.contains("localhost")
})
.cloned()
.collect()
}
fn detect_tunnel_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
})
.cloned()
.collect()
}
fn merge_lists(a: &[String], b: &[String]) -> Vec<String> {
let mut out = Vec::new();
for value in a.iter().chain(b.iter()) {
if !out.iter().any(|entry| entry == value) {
out.push(value.clone());
}
}
out
}

View File

@@ -0,0 +1,35 @@
use crate::report::LeakEvent;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PrivacyMode {
Full,
Redacted,
Minimal,
}
pub fn apply_privacy(event: &mut LeakEvent, mode: PrivacyMode) {
match mode {
PrivacyMode::Full => {}
PrivacyMode::Redacted => {
if let Some(value) = event.qname.as_ref() {
event.qname = Some(redact_domain(value));
}
}
PrivacyMode::Minimal => {
event.qname = None;
event.qtype = None;
event.rcode = None;
}
}
}
fn redact_domain(value: &str) -> String {
let parts: Vec<&str> = value.split('.').filter(|part| !part.is_empty()).collect();
if parts.len() >= 2 {
format!("{}.{}", parts[parts.len() - 2], parts[parts.len() - 1])
} else {
value.to_string()
}
}

View File

@@ -0,0 +1,192 @@
use crate::policy::PolicySummary;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use std::net::IpAddr;
use wtfnet_platform::{FlowOwner, FlowOwnerConfidence, FlowProtocol};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum LeakTransport {
Udp53,
Tcp53,
Dot,
Doh,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum LeakType {
A,
B,
C,
D,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RouteClass {
Loopback,
Tunnel,
Physical,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Severity {
P0,
P1,
P2,
P3,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnrichedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub route_class: RouteClass,
pub owner: Option<FlowOwner>,
pub owner_confidence: FlowOwnerConfidence,
pub owner_failure: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakEvent {
pub timestamp_ms: u128,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub iface_name: Option<String>,
pub route_class: RouteClass,
pub dst_ip: String,
pub dst_port: u16,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
pub attribution_confidence: FlowOwnerConfidence,
pub attribution_failure: Option<String>,
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakTypeCount {
pub leak_type: LeakType,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SummaryItem {
pub key: String,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakSummary {
pub total: usize,
pub by_type: Vec<LeakTypeCount>,
pub top_processes: Vec<SummaryItem>,
pub top_destinations: Vec<SummaryItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakReport {
pub duration_ms: u64,
pub policy: PolicySummary,
pub summary: LeakSummary,
pub events: Vec<LeakEvent>,
}
impl LeakEvent {
pub fn from_decision(event: EnrichedEvent, decision: crate::rules::LeakDecision) -> Self {
let (pid, ppid, process_name, process_path) = event
.owner
.as_ref()
.map(|owner| {
(
owner.pid,
owner.ppid,
owner.process_name.clone(),
owner.process_path.clone(),
)
})
.unwrap_or((None, None, None, None));
LeakEvent {
timestamp_ms: event.timestamp_ms,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
iface_name: event.iface_name,
route_class: event.route_class,
dst_ip: event.dst_ip.to_string(),
dst_port: event.dst_port,
pid,
ppid,
process_name,
process_path,
attribution_confidence: event.owner_confidence,
attribution_failure: event.owner_failure,
leak_type: decision.leak_type,
severity: decision.severity,
policy_rule_id: decision.policy_rule_id,
}
}
}
impl LeakSummary {
pub fn from_events(events: &[LeakEvent]) -> Self {
let total = events.len();
let mut by_type_map: HashMap<LeakType, usize> = HashMap::new();
let mut process_map: BTreeMap<String, usize> = BTreeMap::new();
let mut dest_map: BTreeMap<String, usize> = BTreeMap::new();
for event in events {
*by_type_map.entry(event.leak_type).or_insert(0) += 1;
if let Some(name) = event.process_name.as_ref() {
*process_map.entry(name.clone()).or_insert(0) += 1;
}
let dst_key = format!("{}:{}", event.dst_ip, event.dst_port);
*dest_map.entry(dst_key).or_insert(0) += 1;
}
let mut by_type = by_type_map
.into_iter()
.map(|(leak_type, count)| LeakTypeCount { leak_type, count })
.collect::<Vec<_>>();
by_type.sort_by(|a, b| a.leak_type.cmp(&b.leak_type));
let top_processes = top_items(process_map, 5);
let top_destinations = top_items(dest_map, 5);
LeakSummary {
total,
by_type,
top_processes,
top_destinations,
}
}
}
fn top_items(map: BTreeMap<String, usize>, limit: usize) -> Vec<SummaryItem> {
let mut items = map
.into_iter()
.map(|(key, count)| SummaryItem { key, count })
.collect::<Vec<_>>();
items.sort_by(|a, b| b.count.cmp(&a.count).then_with(|| a.key.cmp(&b.key)));
items.truncate(limit);
items
}

View File

@@ -0,0 +1,48 @@
use crate::classify::ClassifiedEvent;
use crate::report::{EnrichedEvent, RouteClass};
use wtfnet_platform::FlowOwnerConfidence;
pub fn enrich_route(event: ClassifiedEvent) -> EnrichedEvent {
let route_class = if event.src_ip.is_loopback() || event.dst_ip.is_loopback() {
RouteClass::Loopback
} else if event
.iface_name
.as_ref()
.map(|name| is_tunnel_iface(name))
.unwrap_or(false)
{
RouteClass::Tunnel
} else if event.iface_name.is_some() {
RouteClass::Physical
} else {
RouteClass::Unknown
};
EnrichedEvent {
timestamp_ms: event.timestamp_ms,
proto: event.proto,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
iface_name: event.iface_name,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
route_class,
owner: None,
owner_confidence: FlowOwnerConfidence::None,
owner_failure: None,
}
}
fn is_tunnel_iface(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
}

View File

@@ -0,0 +1,116 @@
use crate::policy::LeakPolicy;
use crate::report::{EnrichedEvent, LeakTransport, LeakType, Severity};
#[derive(Debug, Clone)]
pub struct LeakDecision {
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
pub fn evaluate(event: &EnrichedEvent, policy: &LeakPolicy) -> Option<LeakDecision> {
match event.transport {
LeakTransport::Udp53 | LeakTransport::Tcp53 => {
if is_proxy_required(event, policy) && !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::B,
severity: Severity::P1,
policy_rule_id: "LEAK_B_PROXY_REQUIRED".to_string(),
});
}
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::A,
severity: Severity::P0,
policy_rule_id: "LEAK_A_PLAINTEXT".to_string(),
});
}
}
LeakTransport::Dot | LeakTransport::Doh => {
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::C,
severity: Severity::P1,
policy_rule_id: "LEAK_C_ENCRYPTED".to_string(),
});
}
}
LeakTransport::Unknown => {}
}
None
}
fn is_allowed(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let has_rules = !policy.allowed_ifaces.is_empty()
|| !policy.allowed_destinations.is_empty()
|| !policy.allowed_ports.is_empty()
|| !policy.allowed_processes.is_empty();
if !has_rules {
return false;
}
if let Some(iface) = event.iface_name.as_ref() {
if policy
.allowed_ifaces
.iter()
.any(|allowed| allowed.eq_ignore_ascii_case(iface))
{
return true;
}
}
if policy
.allowed_ports
.iter()
.any(|port| *port == event.dst_port)
{
return true;
}
if policy
.allowed_destinations
.iter()
.any(|net| net.contains(&event.dst_ip))
{
return true;
}
if let Some(name) = event
.owner
.as_ref()
.and_then(|owner| owner.process_name.as_ref())
{
if policy
.allowed_processes
.iter()
.any(|value| value.eq_ignore_ascii_case(name))
{
return true;
}
}
false
}
fn is_proxy_required(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let Some(qname) = event.qname.as_ref() else {
return false;
};
let qname = qname.to_ascii_lowercase();
if policy.proxy_required_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
}) {
return true;
}
if !policy.allowlist_domains.is_empty() {
let allowed = policy.allowlist_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
});
return !allowed;
}
false
}

View File

@@ -0,0 +1,380 @@
use crate::classify::{classify_dns_query, ClassifiedEvent};
use crate::report::LeakTransport;
use crate::DnsLeakError;
use std::collections::HashSet;
use std::net::IpAddr;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tracing::debug;
use wtfnet_platform::FlowProtocol;
use crate::LeakWatchOptions;
#[cfg(feature = "pcap")]
use pnet::datalink::{self, Channel, Config as DatalinkConfig};
#[cfg(feature = "pcap")]
use std::sync::mpsc;
#[cfg(not(feature = "pcap"))]
pub async fn capture_events(_options: &LeakWatchOptions) -> Result<Vec<ClassifiedEvent>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub async fn capture_events(options: &LeakWatchOptions) -> Result<Vec<ClassifiedEvent>, DnsLeakError> {
let options = options.clone();
let candidates = format_iface_list(&datalink::interfaces());
let timeout_ms = options.duration_ms.saturating_add(2000);
let handle = tokio::task::spawn_blocking(move || capture_events_blocking(options));
match tokio::time::timeout(Duration::from_millis(timeout_ms), handle).await {
Ok(joined) => joined.map_err(|err| DnsLeakError::Io(err.to_string()))?,
Err(_) => {
return Err(DnsLeakError::Io(
format!(
"capture timed out waiting for interface; candidates: {candidates}"
),
))
}
}
}
#[derive(Debug, Clone)]
pub struct IfaceDiag {
pub name: String,
pub open_ok: bool,
pub error: String,
}
#[cfg(not(feature = "pcap"))]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
let interfaces = datalink::interfaces();
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let mut out = Vec::new();
for iface in interfaces {
let result = match open_channel_with_timeout(iface.clone(), &config) {
Ok((_iface, _rx)) => IfaceDiag {
name: iface.name,
open_ok: true,
error: "-".to_string(),
},
Err(err) => IfaceDiag {
name: iface.name,
open_ok: false,
error: err,
},
};
out.push(result);
}
Ok(out)
}
#[cfg(feature = "pcap")]
fn capture_events_blocking(options: LeakWatchOptions) -> Result<Vec<ClassifiedEvent>, DnsLeakError> {
use pnet::packet::ethernet::{EtherTypes, EthernetPacket};
use pnet::packet::Packet;
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let (iface, mut rx) = select_interface(options.iface.as_deref(), &config)?;
let local_ips = iface.ips.iter().map(|ip| ip.ip()).collect::<Vec<_>>();
let iface_name = iface.name.clone();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut events = Vec::new();
let mut seen = HashSet::new();
while Instant::now() < deadline {
let frame = match rx.next() {
Ok(frame) => frame,
Err(_) => continue,
};
let ethernet = match EthernetPacket::new(frame) {
Some(packet) => packet,
None => continue,
};
let event = match ethernet.get_ethertype() {
EtherTypes::Ipv4 => parse_ipv4(
ethernet.payload(),
&local_ips,
&iface_name,
),
EtherTypes::Ipv6 => parse_ipv6(
ethernet.payload(),
&local_ips,
&iface_name,
),
_ => None,
};
if let Some(event) = event {
let key = format!(
"{:?}|{}|{}|{}|{}",
event.transport, event.src_ip, event.src_port, event.dst_ip, event.dst_port
);
if seen.insert(key) {
debug!(
transport = ?event.transport,
src_ip = %event.src_ip,
src_port = event.src_port,
dst_ip = %event.dst_ip,
dst_port = event.dst_port,
"dns leak event"
);
events.push(event);
}
}
}
Ok(events)
}
#[cfg(feature = "pcap")]
fn parse_ipv4(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::Packet;
let ipv4 = Ipv4Packet::new(payload)?;
let src = IpAddr::V4(ipv4.get_source());
if !local_ips.contains(&src) {
return None;
}
match ipv4.get_next_level_protocol() {
IpNextHeaderProtocols::Udp => parse_udp(
src,
IpAddr::V4(ipv4.get_destination()),
ipv4.payload(),
iface_name,
),
IpNextHeaderProtocols::Tcp => parse_tcp(
src,
IpAddr::V4(ipv4.get_destination()),
ipv4.payload(),
iface_name,
),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_ipv6(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv6::Ipv6Packet;
use pnet::packet::Packet;
let ipv6 = Ipv6Packet::new(payload)?;
let src = IpAddr::V6(ipv6.get_source());
if !local_ips.contains(&src) {
return None;
}
match ipv6.get_next_header() {
IpNextHeaderProtocols::Udp => parse_udp(
src,
IpAddr::V6(ipv6.get_destination()),
ipv6.payload(),
iface_name,
),
IpNextHeaderProtocols::Tcp => parse_tcp(
src,
IpAddr::V6(ipv6.get_destination()),
ipv6.payload(),
iface_name,
),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_udp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::udp::UdpPacket;
use pnet::packet::Packet;
let udp = UdpPacket::new(payload)?;
let dst_port = udp.get_destination();
if dst_port != 53 {
return None;
}
let (qname, qtype, rcode) = classify_dns_query(udp.payload())?;
Some(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Udp,
src_ip,
src_port: udp.get_source(),
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport: LeakTransport::Udp53,
qname: Some(qname),
qtype: Some(qtype),
rcode: Some(rcode),
})
}
#[cfg(feature = "pcap")]
fn parse_tcp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::tcp::TcpPacket;
let tcp = TcpPacket::new(payload)?;
let dst_port = tcp.get_destination();
let transport = match dst_port {
53 => LeakTransport::Tcp53,
853 => LeakTransport::Dot,
_ => return None,
};
Some(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Tcp,
src_ip,
src_port: tcp.get_source(),
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport,
qname: None,
qtype: None,
rcode: None,
})
}
#[cfg(feature = "pcap")]
fn select_interface(
name: Option<&str>,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), DnsLeakError> {
let interfaces = datalink::interfaces();
if let Some(name) = name {
let iface = interfaces
.iter()
.find(|iface| iface.name == name)
.cloned()
.ok_or_else(|| {
DnsLeakError::Io(format!(
"interface '{name}' not found; candidates: {}",
format_iface_list(&interfaces)
))
})?;
return open_channel_with_timeout(iface, config).map_err(|err| {
DnsLeakError::Io(format!(
"failed to open capture on interface ({err}); candidates: {}",
format_iface_list(&interfaces)
))
});
}
if let Some(iface) = pick_stable_iface(&interfaces) {
if let Ok(channel) = open_channel_with_timeout(iface, config) {
return Ok(channel);
}
}
for iface in interfaces.iter() {
if let Ok(channel) = open_channel_with_timeout(iface.clone(), config) {
return Ok(channel);
}
}
Err(DnsLeakError::Io(format!(
"no suitable interface found; candidates: {}",
format_iface_list(&interfaces)
)))
}
#[cfg(feature = "pcap")]
fn open_channel_with_timeout(
iface: datalink::NetworkInterface,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), String> {
let (tx, rx) = mpsc::channel();
let config = config.clone();
std::thread::spawn(move || {
let result = match datalink::channel(&iface, config) {
Ok(Channel::Ethernet(_, rx)) => Ok(rx),
Ok(_) => Err("unsupported channel".to_string()),
Err(err) => Err(err.to_string()),
};
let _ = tx.send((iface, result));
});
let timeout = Duration::from_millis(700);
match rx.recv_timeout(timeout) {
Ok((iface, Ok(rx))) => Ok((iface, rx)),
Ok((_iface, Err(err))) => Err(err),
Err(_) => Err("timeout opening capture".to_string()),
}
}
#[cfg(feature = "pcap")]
fn is_named_fallback(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("wlan")
|| name.contains("wifi")
|| name.contains("wi-fi")
|| name.contains("ethernet")
|| name.contains("eth")
|| name.contains("lan")
}
#[cfg(feature = "pcap")]
fn pick_stable_iface(
interfaces: &[datalink::NetworkInterface],
) -> Option<datalink::NetworkInterface> {
let mut preferred = interfaces
.iter()
.filter(|iface| {
iface.is_up()
&& !iface.is_loopback()
&& (is_named_fallback(&iface.name) || !iface.ips.is_empty())
})
.cloned()
.collect::<Vec<_>>();
if preferred.is_empty() {
preferred = interfaces
.iter()
.filter(|iface| !iface.is_loopback())
.cloned()
.collect();
}
preferred.into_iter().next()
}
#[cfg(feature = "pcap")]
fn format_iface_list(interfaces: &[datalink::NetworkInterface]) -> String {
if interfaces.is_empty() {
return "-".to_string();
}
interfaces
.iter()
.map(|iface| iface.name.as_str())
.collect::<Vec<_>>()
.join(", ")
}
#[cfg(feature = "pcap")]
fn now_ms() -> u128 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis()
}

View File

@@ -148,7 +148,11 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
});
}
Err(err) => {
warnings.push(format!("http3 failed: {err}"));
let err_string = err.to_string();
let category = classify_http3_error(&err_string);
warnings.push(format!(
"http3 failed (category={category}): {err_string}"
));
if opts.http3_only {
return Err(err);
}
@@ -410,6 +414,37 @@ fn build_tls_connector() -> Result<TlsConnector, HttpError> {
Ok(TlsConnector::from(Arc::new(config)))
}
#[cfg(feature = "http3")]
fn classify_http3_error(message: &str) -> &'static str {
let message = message.to_ascii_lowercase();
if message.contains("timeout") || message.contains("timed out") {
return "timeout";
}
if message.contains("no resolved ips") || message.contains("no addresses resolved") {
return "resolve";
}
if message.contains("udp") && message.contains("blocked") {
return "udp_blocked";
}
if message.contains("quic") || message.contains("connection refused") {
return "connect";
}
if message.contains("alpn") || message.contains("application protocol") {
return "alpn";
}
if message.contains("tls")
|| message.contains("certificate")
|| message.contains("crypto")
|| message.contains("handshake")
{
return "tls";
}
if message.contains("permission denied") || message.contains("access is denied") {
return "permission";
}
"unknown"
}
#[cfg(feature = "http3")]
async fn http3_request(
url: &str,

View File

@@ -2,10 +2,12 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use sha2::Digest;
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use wtfnet_core::ErrorCode;
use wtfnet_platform::{
CertProvider, ConnSocket, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider,
CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
};
use x509_parser::oid_registry::{
@@ -19,6 +21,7 @@ pub fn platform() -> Platform {
ports: Arc::new(LinuxPortsProvider),
cert: Arc::new(LinuxCertProvider),
neigh: Arc::new(LinuxNeighProvider),
flow_owner: Arc::new(LinuxFlowOwnerProvider),
}
}
@@ -26,6 +29,7 @@ struct LinuxSysProvider;
struct LinuxPortsProvider;
struct LinuxCertProvider;
struct LinuxNeighProvider;
struct LinuxFlowOwnerProvider;
#[async_trait]
impl SysProvider for LinuxSysProvider {
@@ -375,6 +379,20 @@ fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
}
}
fn parse_proc_socket_addr_value(value: &str, is_v6: bool) -> Option<SocketAddr> {
let mut parts = value.split(':');
let addr_hex = parts.next()?;
let port_hex = parts.next()?;
let port = u16::from_str_radix(port_hex, 16).ok()?;
if is_v6 {
let addr = parse_ipv6_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V6(addr), port))
} else {
let addr = parse_ipv4_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V4(addr), port))
}
}
fn parse_linux_arp(contents: &str) -> Vec<NeighborEntry> {
let mut neighbors = Vec::new();
for (idx, line) in contents.lines().enumerate() {
@@ -482,6 +500,138 @@ fn read_ppid(pid: u32) -> Option<u32> {
Some(ppid)
}
#[derive(Clone)]
struct ProcSocketEntry {
local: SocketAddr,
remote: SocketAddr,
inode: String,
}
fn parse_proc_socket_entries(
path: &str,
is_v6: bool,
) -> Result<Vec<ProcSocketEntry>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut entries = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
continue;
}
let local = parts[1];
let remote = parts[2];
let inode = match parts.get(9) {
Some(value) => (*value).to_string(),
None => continue,
};
let local_addr = match parse_proc_socket_addr_value(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr_value(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
entries.push(ProcSocketEntry {
local: local_addr,
remote: remote_addr,
inode,
});
}
Ok(entries)
}
fn match_flow_entry<'a>(
flow: &FlowTuple,
entries: &'a [ProcSocketEntry],
match_remote: bool,
) -> Option<(&'a ProcSocketEntry, FlowOwnerConfidence)> {
for entry in entries {
let local_match = entry.local.port() == flow.src_port
&& (entry.local.ip() == flow.src_ip
|| entry.local.ip().is_unspecified()
|| entry.local.ip().is_loopback() && flow.src_ip.is_loopback());
if !local_match {
continue;
}
if match_remote {
let remote_match = entry.remote.port() == flow.dst_port
&& (entry.remote.ip() == flow.dst_ip
|| entry.remote.ip().is_unspecified());
if remote_match {
return Some((entry, FlowOwnerConfidence::High));
}
} else {
return Some((entry, FlowOwnerConfidence::Medium));
}
}
None
}
fn resolve_flow_owner(
flow: &FlowTuple,
) -> Result<FlowOwnerResult, PlatformError> {
let inode_map = build_inode_map();
let entries = match flow.proto {
FlowProtocol::Tcp => {
let mut out = parse_proc_socket_entries("/proc/net/tcp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/tcp6", true)?);
out
}
FlowProtocol::Udp => {
let mut out = parse_proc_socket_entries("/proc/net/udp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/udp6", true)?);
out
}
};
let match_remote = matches!(flow.proto, FlowProtocol::Tcp);
let matched = match_flow_entry(flow, &entries, match_remote)
.or_else(|| {
if matches!(flow.proto, FlowProtocol::Udp) {
match_flow_entry(flow, &entries, false)
} else {
None
}
});
let (entry, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let owner = inode_map.get(&entry.inode).map(|info| FlowOwner {
pid: Some(info.pid),
ppid: info.ppid,
process_name: info.name.clone(),
process_path: info.path.clone(),
});
if owner.is_none() {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::Low,
failure_reason: Some("socket owner not found".to_string()),
});
}
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -626,3 +776,10 @@ impl NeighProvider for LinuxNeighProvider {
Ok(parse_linux_arp(&contents))
}
}
#[async_trait]
impl FlowOwnerProvider for LinuxFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -2,6 +2,7 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use regex::Regex;
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use sha2::Digest;
use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -10,7 +11,8 @@ use x509_parser::oid_registry::{
use std::sync::Arc;
use wtfnet_core::ErrorCode;
use wtfnet_platform::{
CertProvider, ConnSocket, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider,
CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
};
@@ -20,6 +22,7 @@ pub fn platform() -> Platform {
ports: Arc::new(WindowsPortsProvider),
cert: Arc::new(WindowsCertProvider),
neigh: Arc::new(WindowsNeighProvider),
flow_owner: Arc::new(WindowsFlowOwnerProvider),
}
}
@@ -27,6 +30,7 @@ struct WindowsSysProvider;
struct WindowsPortsProvider;
struct WindowsCertProvider;
struct WindowsNeighProvider;
struct WindowsFlowOwnerProvider;
#[async_trait]
impl SysProvider for WindowsSysProvider {
@@ -579,6 +583,155 @@ fn parse_csv_line(line: &str) -> Vec<String> {
out
}
#[derive(Clone)]
struct FlowEntry {
proto: FlowProtocol,
local: SocketAddr,
remote: Option<SocketAddr>,
pid: u32,
}
fn parse_netstat_flow_entries() -> Result<Vec<FlowEntry>, PlatformError> {
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut entries = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if trimmed.starts_with("TCP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 5 {
continue;
}
let state = parts[3];
if state == "LISTENING" {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let remote = match parse_netstat_addr(parts[2]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[4].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Tcp,
local,
remote: Some(remote),
pid,
});
} else if trimmed.starts_with("UDP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[3].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Udp,
local,
remote: None,
pid,
});
}
}
Ok(entries)
}
fn parse_netstat_addr(value: &str) -> Option<SocketAddr> {
let value = value.trim();
if value == "*:*" {
return None;
}
if let Some(rest) = value.strip_prefix('[') {
let end = rest.find(']')?;
let host = &rest[..end];
let port = rest[end + 2..].parse::<u16>().ok()?;
let host = host.split('%').next().unwrap_or(host);
let ip: IpAddr = host.parse().ok()?;
return Some(SocketAddr::new(ip, port));
}
let pos = value.rfind(':')?;
let host = &value[..pos];
let port = value[pos + 1..].parse::<u16>().ok()?;
let ip: IpAddr = host.parse().ok()?;
Some(SocketAddr::new(ip, port))
}
fn resolve_flow_owner(flow: &FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
let entries = parse_netstat_flow_entries()?;
let proc_map = load_windows_process_map();
let mut matched: Option<(u32, FlowOwnerConfidence)> = None;
for entry in entries {
if entry.proto != flow.proto {
continue;
}
let local_match = entry.local.ip() == flow.src_ip && entry.local.port() == flow.src_port;
if !local_match {
continue;
}
match flow.proto {
FlowProtocol::Tcp => {
if let Some(remote) = entry.remote {
if remote.ip() == flow.dst_ip && remote.port() == flow.dst_port {
matched = Some((entry.pid, FlowOwnerConfidence::High));
break;
}
}
}
FlowProtocol::Udp => {
matched = Some((entry.pid, FlowOwnerConfidence::Medium));
break;
}
}
}
let (pid, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let info = proc_map.get(&pid);
let owner = Some(FlowOwner {
pid: Some(pid),
ppid: None,
process_name: info.and_then(|value| value.name.clone()),
process_path: info.and_then(|value| value.path.clone()),
});
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -696,3 +849,10 @@ impl NeighProvider for WindowsNeighProvider {
Ok(parse_arp_output(&text))
}
}
#[async_trait]
impl FlowOwnerProvider for WindowsFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -1,5 +1,6 @@
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use std::sync::Arc;
use wtfnet_core::ErrorCode;
@@ -80,6 +81,46 @@ pub struct NeighborEntry {
pub state: Option<String>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum FlowProtocol {
Udp,
Tcp,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum FlowOwnerConfidence {
High,
Medium,
Low,
None,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwner {
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwnerResult {
pub owner: Option<FlowOwner>,
pub confidence: FlowOwnerConfidence,
pub failure_reason: Option<String>,
}
#[derive(Debug, Clone)]
pub struct FlowTuple {
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
}
#[derive(Debug, Clone)]
pub struct PlatformError {
pub code: ErrorCode,
@@ -123,9 +164,15 @@ pub trait NeighProvider: Send + Sync {
async fn neighbors(&self) -> Result<Vec<NeighborEntry>, PlatformError>;
}
#[async_trait]
pub trait FlowOwnerProvider: Send + Sync {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError>;
}
pub struct Platform {
pub sys: Arc<dyn SysProvider>,
pub ports: Arc<dyn PortsProvider>,
pub cert: Arc<dyn CertProvider>,
pub neigh: Arc<dyn NeighProvider>,
pub flow_owner: Arc<dyn FlowOwnerProvider>,
}

69
docs/COMMANDS.md Normal file
View File

@@ -0,0 +1,69 @@
# WTFnet Commands
This document lists CLI commands and supported flags. Output defaults to text; use `--json` for structured output.
## Global flags
- `--json` / `--pretty`
- `--no-color` / `--quiet`
- `-v` / `-vv` / `--verbose`
- `--log-level <error|warn|info|debug|trace>`
- `--log-format <text|json>`
- `--log-file <path>`
- `NETTOOL_LOG_FILTER` or `RUST_LOG` can override log filters (ex: `maxminddb::decoder=debug`)
## sys
- `sys ifaces`
- `sys ip` flags: `--all`, `--iface <name>`
- `sys route` flags: `--ipv4`, `--ipv6`, `--to <ip>`
- `sys dns`
## ports
- `ports listen` flags: `--tcp`, `--udp`, `--port <n>`
- `ports who <port>`
- `ports conns` flags: `--top <n>`, `--by-process`
## neigh
- `neigh list` flags: `--ipv4`, `--ipv6`, `--iface <name>`
## cert
- `cert roots`
- `cert baseline <path>`
- `cert diff <path>`
## geoip
- `geoip lookup <ip>`
- `geoip status`
## probe
- `probe ping <host>` flags: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping <host:port>` flags: `--count <n>`, `--timeout-ms <n>`, `--socks5 <url>`, `--prefer-ipv4`, `--no-geoip`
- `probe trace <host>` flags: `--max-hops <n>`, `--per-hop <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--rdns`, `--no-geoip`
## dns
- `dns query <domain> <type>` flags: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect <domain>` flags: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch` flags: `--duration <Ns|Nms>`, `--iface <name>`, `--filter <pattern>`
- `dns leak status` flags: `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`
- `dns leak watch` flags: `--duration <Ns|Nms>`, `--iface <name>`, `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`, `--privacy <full|redacted|minimal>`, `--out <path>`, `--summary-only`, `--iface-diag`
- `dns leak report` flags: `<path>`, `--privacy <full|redacted|minimal>`
## http
- `http head|get <url>` flags: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--http3` (feature `http3`), `--http3-only` (feature `http3`), `--geoip`, `--socks5 <url>`
## tls
- `tls handshake|cert|verify|alpn <host:port>` flags: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`, `--show-extensions`, `--ocsp`
## discover
- `discover mdns` flags: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp` flags: `--duration <Ns|Nms>`
- `discover llmnr` flags: `--duration <Ns|Nms>`, `--name <host>`
- `discover nbns` flags: `--duration <Ns|Nms>`
## diag
- `diag` flags: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## calc
- `calc subnet <cidr>|<ip> <mask>`
- `calc contains <a> <b>`
- `calc overlap <a> <b>`
- `calc summarize <cidr...>`

View File

@@ -0,0 +1,176 @@
# DNS Leak Detector - Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output

154
docs/RELEASE_v0.4.0.md Normal file
View File

@@ -0,0 +1,154 @@
# WTFnet v0.4.0 - DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic -> classify (Plain DNS / DoT / DoH) ->
enrich with interface/route/process metadata -> evaluate leak definitions (A/B/C/D) ->
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / kill switch management (detection only)
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple -> process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--out <path>` (write JSON report/events)
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS -> subsequent TCP/TLS connection) for Leak-D mismatch indicator
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands

View File

@@ -29,17 +29,17 @@ This is a practical checklist to execute v0.3.0.
- [x] diff categories: add/remove/expired/changed
## 6) optional LLMNR/NBNS
- [ ] implement `discover llmnr`
- [ ] implement `discover nbns`
- [ ] bounded collection, low-noise
- [x] implement `discover llmnr`
- [x] implement `discover nbns`
- [x] bounded collection, low-noise
## 7) docs updates
- [x] update README roadmap
- [ ] update COMMANDS.md with new flags/commands
- [ ] add RELEASE_v0.3.0.md
- [x] update COMMANDS.md with new flags/commands
- [x] add RELEASE_v0.3.0.md
## 8) optional HTTP/3 (last)
- [x] add `http3` cargo feature + deps
- [x] implement `--http3` / `--http3-only`
- [ ] define error classification for QUIC failures
- [ ] keep feature disabled in default builds until stabilized
- [x] define error classification for QUIC failures
- [x] keep feature disabled in default builds until stabilized

33
docs/WORK_ITEMS_v0.4.0.md Normal file
View File

@@ -0,0 +1,33 @@
# WTFnet v0.4.0 - Work Items
This is a practical checklist to execute v0.4.0.
## 1) platform flow ownership
- [x] add FlowOwnerProvider trait + data types
- [x] implement Linux best-effort lookup
- [x] implement Windows best-effort lookup
## 2) new wtfnet-dnsleak crate
- [x] crate scaffold + pcap feature
- [x] UDP/TCP 53 capture + classify
- [x] DoT detection (TCP 853)
- [x] policy model + profiles
- [x] leak rules A/B/C (partial)
- [x] privacy modes
- [x] report + summary builder
## 3) CLI wiring
- [x] add `dns leak status`
- [x] add `dns leak watch`
- [x] add `dns leak report`
## 4) docs updates
- [x] add `docs/RELEASE_v0.4.0.md`
- [x] add `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
- [x] update README roadmap + flags
- [x] update COMMANDS.md
- [x] update status/implementation docs
## 5) follow-ups
- [ ] add DoH heuristic classification (optional)
- [ ] add Leak-D mismatch correlation (optional)

View File

@@ -0,0 +1,723 @@
Below is a **high-level (language-agnostic)** design for a **client-side DNS leak detector** aimed at *censorship-resistance threat models*, i.e.:
> “Censor/ISP can observe/log DNS intent or infer proxy usage; we want to detect when DNS behavior escapes the intended protection path.”
Ill cover: **definitions**, **detection standards**, **workflow**, **modules**, **passive+active detection**, **outputs**, and **test methodology**.
---
# 1) Scope and goals
## Goals
Your detector should answer, with evidence:
1. **Did any DNS query leave the device outside the intended safe path?**
2. **Which domains leaked?** (when visible)
3. **Which transport leaked?** (UDP/53, TCP/53, DoT/853, DoH)
4. **Which interface leaked?** (Wi-Fi/Ethernet vs tunnel)
5. **Which process/app triggered it?** (if your OS allows attribution)
And in your censorship model, it should also detect:
6. **Split-policy intent leakage**: “unknown/sensitive domains were resolved using domestic/ISP-facing DNS.”
## Non-goals (be explicit)
* Not a censorship circumvention tool itself
* Not a full firewall manager (can suggest fixes, but detection is the core)
* Not perfect attribution on every OS (process mapping may be partial)
---
# 2) Define “DNS leak” precisely (your programs standard)
You need a **formal definition** because “DNS leak” is overloaded.
## Standard definition A (classic VPN / tunnel bypass)
A leak occurs if:
> **An unencrypted DNS query is sent outside the secure tunnel path**
> This is essentially how popular leak test sites define it (“unencrypted DNS query sent OUTSIDE the established VPN tunnel”). ([IP Leak][1])
Your detector should implement it in a machine-checkable way:
**Leak-A condition**
* DNS over **UDP/53 or TCP/53**
* Destination is **not** a “trusted resolver path” (e.g., not the tunnel interface, not loopback stub, not proxy channel)
* Interface is **not** the intended egress
✅ Strong for censorship: plaintext DNS exposes intent.
---
## Standard definition B (split-policy intent leak)
A leak occurs if:
> **A domain that should be “proxied / remote-resolved” was queried via local/ISP-facing DNS.**
This is the “proxy split rules still leak intent” case.
**Leak-B condition**
* Query name matches either:
* a “proxy-required set” (sensitive list, non-allowlist, unknown), or
* a policy rule (“everything except allowlist must resolve via proxy DNS”)
* And the query was observed going to:
* ISP resolver(s) / domestic resolver(s) / non-tunnel interface
✅ This is the leak most users in censorship settings care about.
---
## Standard definition C (encrypted DNS escape / bypass)
A leak occurs if:
> DNS was encrypted, but escaped the intended channel (e.g., app uses its own DoH directly to the Internet).
This matters because DoH hides the QNAME but still creates **observable behavior** and breaks your “DNS must follow proxy” invariant.
**Leak-C condition**
* DoH (RFC 8484) ([IETF Datatracker][2]) or DoT (RFC 7858) ([IETF Datatracker][3]) flow exists
* And it does **not** go through your approved egress path (tunnel/proxy)
✅ Detects “Firefox/Chrome built-in DoH bypass” style cases.
---
## Standard definition D (mismatch risk indicator)
Not a “leak” by itself, but a **proxy inference amplifier**:
> DNS egress region/path differs from traffic egress region/path.
This is a *censorship-resistance hygiene metric*, not a binary leak.
**Mismatch condition**
* Same domain produces:
* DNS resolution via path X
* TCP/TLS connection via path Y
* Where X ≠ Y (interface, ASN region, etc.)
✅ Helps catch “DNS direct, traffic proxy” or “DNS proxy, traffic direct” weirdness.
---
# 3) High-level architecture
## Core components
1. **Policy & Configuration**
* What counts as “safe DNS path”
* Which interfaces are “protected” (tunnel) vs “physical”
* Allowlist / proxy-required sets (optional)
* Known resolver lists (optional)
* Severity thresholds
2. **Traffic Sensor (Passive Monitor)**
* Captures outbound traffic metadata (and optionally payload for DNS parsing)
* Must cover:
* UDP/53, TCP/53
* TCP/853 (DoT)
* HTTPS flows that look like DoH (see below)
* Emits normalized events into a pipeline
3. **Classifier**
* Recognize DNS protocol types:
* Plain DNS
* DoT
* DoH
* Attach confidence scores (especially for DoH)
4. **DNS Parser (for plaintext DNS only)**
* Extract: QNAME, QTYPE, transaction IDs, response codes (optional)
* Store minimally (privacy-aware)
5. **Flow Tracker**
* Correlate packets into “flows”
* Map flow → interface → destination → process (if possible)
* Track timing correlation: DNS → connection attempts
6. **Leak Detector (Rules Engine)**
* Apply Leak-A/B/C/D definitions
* Produce leak events + severity + evidence chain
7. **Active Prober**
* Generates controlled DNS lookups to test behavior
* Can test fail-closed, bypasses, multi-interface behavior, etc.
8. **Report Generator**
* Human-readable summary
* Machine-readable logs (JSON)
* Recommendations (non-invasive)
---
# 4) Workflow (end-to-end)
## Workflow 0: Setup & baseline
1. Enumerate interfaces and routes
* Identify physical NICs
* Identify tunnel / proxy interface (or “expected egress destinations”)
2. Identify system DNS configuration
* Default resolvers per interface
* Local stub presence (127.0.0.1, etc.)
3. Load policy profile
* Full-tunnel, split-tunnel, or proxy-based
4. Start passive monitor
**Output:** “Current state snapshot” (useful even before testing).
---
## Workflow 1: Passive detection loop (always-on)
Continuously:
1. Capture outbound packets/flows
2. Classify as DNS-like (plain DNS / DoT / DoH / unknown)
3. If plaintext DNS → parse QNAME/QTYPE
4. Assign metadata:
* interface
* dst IP/port
* process (if possible)
* timestamp
5. Evaluate leak rules:
* Leak-A/B/C/D
6. Write event log + optional real-time alert
**Key design point:** passive mode should be able to detect leaks **without requiring any special test domain**.
---
## Workflow 2: Active test suite (on-demand)
Active tests exist because some leaks are intermittent or only happen under stress.
### Active Test A: “No plaintext DNS escape”
* Trigger a set of DNS queries (unique random domains)
* Verify **zero UDP/53 & TCP/53** leaves physical interfaces
### Active Test B: “Fail-closed test”
* Temporarily disrupt the “protected path” (e.g., tunnel down)
* Trigger lookups again
* Expected: DNS fails (no fallback to ISP DNS)
### Active Test C: “App bypass test”
* Launch test scenarios that mimic real apps
* Confirm no direct DoH/DoT flows go to public Internet outside the proxy path
### Active Test D: “Split-policy correctness”
* Query domains that should be:
* direct-allowed
* proxy-required
* unknown
* Confirm resolution path matches policy
---
# 5) How to recognize DNS transports (detection mechanics)
## Plain DNS (strongest signal)
**Match conditions**
* UDP dst port 53 OR TCP dst port 53
* Parse DNS header
* Extract QNAME/QTYPE
**Evidence strength:** high
**Intent visibility:** yes (domain visible)
---
## DoT (port-based, easy)
DoT is defined over TLS, typically port **853**. ([IETF Datatracker][3])
**Match conditions**
* TCP dst port 853
* Optionally confirm TLS handshake exists
**Evidence strength:** high
**Intent visibility:** no (domain hidden)
---
## DoH (harder; heuristic + optional allowlists)
DoH is DNS over HTTPS (RFC 8484). ([IETF Datatracker][2])
**Recognizers (from strongest to weakest):**
1. HTTP request with `Content-Type: application/dns-message`
2. Path/pattern common to DoH endpoints (optional list)
3. SNI matches known DoH providers (optional list)
4. Traffic resembles frequent small HTTPS POST/GET bursts typical of DoH (weak)
**Evidence strength:** medium
**Intent visibility:** no (domain hidden)
**Important for your use-case:** you may not need to *prove* its DoH; you mostly need to detect “DNS-like encrypted resolver traffic bypassing the proxy channel.”
---
# 6) Policy model: define “safe DNS path”
You need a simple abstraction users can configure:
### Safe DNS path can be defined by one or more of:
* **Allowed interfaces**
* loopback (local stub)
* tunnel interface
* **Allowed destination set**
* proxy server IP(s)
* internal resolver IP(s)
* **Allowed process**
* only your local stub + proxy allowed to resolve externally
* **Allowed port set**
* maybe only permit 443 to proxy server (if DNS rides inside it)
Then implement:
**A DNS event is a “leak” if it violates safe-path constraints.**
---
# 7) Leak severity model (useful for real-world debugging)
### Severity P0 (critical)
* Plaintext DNS (UDP/TCP 53) on physical interface to ISP/public resolver
* Especially if QNAME matches proxy-required/sensitive list
### Severity P1 (high)
* DoH/DoT bypassing proxy channel directly to public Internet
### Severity P2 (medium)
* Policy mismatch: domain resolved locally but connection later proxied (or vice versa)
### Severity P3 (low / info)
* Authoritative-side “resolver egress exposure” (less relevant for client-side leak detector)
* CDN performance mismatch indicators
---
# 8) Outputs and reporting
## Real-time console output (for debugging)
* “DNS leak detected: Plain DNS”
* domain (if visible)
* destination resolver IP
* interface
* process name (if available)
* policy rule violated
* suggested fix category (e.g., “force stub + block port 53”)
## Forensics log (machine-readable)
A single **LeakEvent** record could include:
* timestamp
* leak_type (A/B/C/D)
* transport (UDP53, TCP53, DoT, DoH)
* qname/qtype (nullable)
* src_iface / dst_ip / dst_port
* process_id/process_name (nullable)
* correlation_id (link DNS → subsequent connection attempt)
* confidence score (esp. DoH)
* raw evidence pointers (pcap offsets / event IDs)
## Summary report
* Leak counts by type
* Top leaking processes
* Top leaking resolver destinations
* Timeline view (bursts often indicate OS fallback behavior)
* “Pass/Fail” per policy definition
---
# 9) Validation strategy (“how do I know my detector is correct?”)
## Ground truth tests
1. **Known-leak scenario**
* intentionally set OS DNS to ISP DNS, no tunnel
* detector must catch plaintext DNS
2. **Known-safe scenario**
* local stub only + blocked outbound 53/853
* detector should show zero leaks
3. **Bypass scenario**
* enable browser built-in DoH directly
* detector should catch encrypted resolver bypass (Leak-C)
4. **Split-policy scenario**
* allowlist CN direct, everything else proxy-resolve
* detector should show:
* allowlist resolved direct
* unknown resolved via proxy path
---
# 10) Recommended “profiles” (makes tool usable)
Provide built-in presets:
### Profile 1: Full-tunnel VPN
* allow DNS only via tunnel interface or loopback stub
* any UDP/TCP 53 on physical NIC = leak
### Profile 2: Proxy + local stub (your case)
* allow DNS only to loopback stub
* allow stub upstream only via proxy server destinations
* flag any direct DoH/DoT to public endpoints
### Profile 3: Split tunnel (geoip + allowlist)
* allow plaintext DNS **only** for allowlisted domains (if user accepts risk)
* enforce “unknown → proxy-resolve”
* emphasize Leak-B correctness
---
Below is an updated **high-level design** (still language-agnostic) that integrates **process attribution** cleanly, including how it fits into the workflow and what to log.
---
# 1) New component: Process Attribution Engine (PAE)
## Purpose
When a DNS-like event is observed, the PAE tries to attach:
* **PID**
* **PPID**
* **process name**
* *(optional but extremely useful)* full command line, executable path, user, container/app package, etc.
This lets your logs answer:
> “Which program generated the leaked DNS request?”
> “Was it a browser, OS service, updater, antivirus, proxy itself, or some library?”
## Position in the pipeline
It sits between **Traffic Sensor** and **Leak Detector** as an “event enricher”:
**Traffic Event → (Classifier) → (Process Attribution) → Enriched Event → Leak Rules → Report**
---
# 2) Updated architecture (with process attribution)
### Existing modules (from earlier design)
1. Policy & Configuration
2. Traffic Sensor (packet/flow monitor)
3. Classifier (Plain DNS / DoT / DoH / Unknown)
4. DNS Parser (plaintext only)
5. Flow Tracker
6. Leak Detector (rules engine)
7. Active Prober
8. Report Generator
### New module
9. **Process Attribution Engine (PAE)**
* resolves “who owns this flow / packet”
* emits PID/PPID/name
* handles platform-specific differences and fallbacks
---
# 3) Workflow changes (what happens when a potential leak is seen)
## Passive detection loop (updated)
1. Capture outbound traffic event
2. Classify transport type:
* UDP/53, TCP/53 → plaintext DNS
* TCP/853 → DoT
* HTTPS patterns → DoH (heuristic)
3. Extract the **5-tuple**
* src IP:port, dst IP:port, protocol
4. **PAE lookup**
* resolve the owner process for this traffic
* attach PID/PPID/name (+ optional metadata)
5. Apply leak rules (A/B/C/D)
6. Emit:
* realtime log line (human readable)
* structured record (JSON/event log)
---
# 4) Process attribution: what to detect and how (high-level)
Process attribution always works on one core concept:
> **Map observed traffic (socket/flow) → owning process**
### Inputs PAE needs
* protocol (UDP/TCP)
* local src port
* local address
* timestamp
* optionally: connection state / flow ID
### Output from PAE
* `pid`, `ppid`, `process_name`
* optional enrichment:
* `exe_path`
* `cmdline`
* `user`
* “process tree chain” (for debugging: parent → child → …)
---
# 5) Platform support strategy (without implementation detail)
Process attribution is **OS-specific**, so structure it as:
## “Attribution Provider” interface
* Provider A: “kernel-level flow owner”
* Provider B: “socket table owner lookup”
* Provider C: “event tracing feed”
* Provider D: fallback “unknown / not supported”
Your main design goal is:
### Design rule
**Attribution must be best-effort + gracefully degrading**, never blocking detection.
So you always log the leak even if PID is unavailable:
* `pid=null, attribution_confidence=LOW`
---
# 6) Attribution confidence + race handling (important!)
Attribution can be tricky because:
* a process may exit quickly (“short-lived resolver helper”)
* ports can be reused
* NAT or local proxies may obscure the real origin
So log **confidence**:
* **HIGH**: direct mapping from kernel/socket owner at time of event
* **MEDIUM**: mapping by lookup shortly after event (possible race)
* **LOW**: inferred / uncertain
* **NONE**: not resolved
Also record *why* attribution failed:
* “permission denied”
* “flow already gone”
* “unsupported transport”
* “ambiguous mapping”
This makes debugging much easier.
---
# 7) What PID/PPID adds to your leak definitions
### Leak-A (plaintext DNS outside safe path)
Now you can say:
> “`svchost.exe (PID 1234)` sent UDP/53 to ISP resolver on Wi-Fi interface”
### Leak-B (split-policy intent leak)
You can catch:
* “game launcher looked up blocked domain”
* “system service triggered a sensitive name unexpectedly”
* “your proxy itself isnt actually resolving via its own channel”
### Leak-C (encrypted DNS bypass)
This becomes *very actionable*:
> “`firefox.exe` started direct DoH to resolver outside tunnel”
### Leak-D (mismatch indicator)
You can also correlate:
* DNS resolved by one process
* connection made by another process
(e.g., local stub vs app)
---
# 8) Reporting / realtime logging format (updated)
## Realtime log line (human readable)
Example (conceptual):
* **[P0][Leak-A] Plain DNS leaked**
* Domain: `example-sensitive.com` (A)
* From: `Wi-Fi` → To: `1.2.3.4:53`
* Process: `browser.exe` **PID=4321 PPID=1200**
* Policy violated: “No UDP/53 on physical NIC”
## Structured event (JSON-style fields)
Minimum recommended fields:
### Event identity
* `event_id`
* `timestamp`
### DNS identity
* `transport` (udp53/tcp53/dot/doh/unknown)
* `qname` (nullable)
* `qtype` (nullable)
### Network path
* `interface_name`
* `src_ip`, `src_port`
* `dst_ip`, `dst_port`
* `route_class` (tunnel / physical / loopback)
### Process identity (your requested additions)
* `pid`
* `ppid`
* `process_name`
* optional:
* `exe_path`
* `cmdline`
* `user`
### Detection result
* `leak_type` (A/B/C/D)
* `severity` (P0..P3)
* `policy_rule_id`
* `attribution_confidence`
---
# 9) Privacy and safety notes (important in a DNS tool)
Because youre logging **domains** and **process command lines**, this becomes sensitive.
Add a “privacy mode” policy:
* **Full**: store full domain + cmdline
* **Redacted**: hash domain; keep TLD only; truncate cmdline
* **Minimal**: only keep leak counts + resolver IPs + process name
Also allow “capture window” (rotate logs, avoid giant histories).
---
# 10) UX feature: “Show me the process tree”
When a leak happens, a good debugger view is:
* `PID: foo (pid 1000)`
* `PPID: bar (pid 900)`
* `PPID: systemd/svchost/etc`
This is extremely useful to identify:
* browsers spawning helpers
* OS DNS services
* containerized processes
* update agents / telemetry daemons
So your report generator should support:
**Process chain rendering** (where possible)
---
# 11) Practical edge cases you should detect (with PID helping)
1. **Local stub is fine, upstream isnt**
* Your local resolver process leaks upstream plaintext DNS
2. **Browser uses its own DoH**
* process attribution immediately reveals it
3. **Multiple interfaces**
* a leak only happens on Wi-Fi but not Ethernet
4. **Kill-switch failure**
* when tunnel drops, PID shows which app starts leaking first
---

View File

@@ -0,0 +1,42 @@
# DNS Leak Detection - Implementation Status
This document tracks the current DNS leak detector implementation against the design in
`docs/dns_leak_detection_design.md` and `docs/requirement_docs_v0.4.md`.
## Implemented
- New `wtfnet-dnsleak` crate with passive capture (pcap feature).
- Transport classification:
- Plain DNS (UDP/53, TCP/53) with qname/qtype parsing.
- DoT (TCP/853) detection.
- DoH detection is not implemented (skipped for now).
- Leak rules:
- Leak-A (plaintext DNS outside safe path).
- Leak-B (split-policy intent leak based on proxy-required/allowlist domains).
- Leak-C (encrypted DNS bypass for DoT).
- Policy profiles: `full-tunnel`, `proxy-stub`, `split`.
- Privacy modes: full/redacted/minimal (redacts qname).
- Process attribution:
- Best-effort `FlowOwnerProvider` with Linux `/proc` and Windows `netstat` lookups.
- Confidence levels and failure reasons exposed in events.
- CLI commands:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
- `dns leak watch --iface-diag` (diagnostics for capture-capable interfaces).
- Interface selection:
- per-interface open timeout to avoid capture hangs
- stable default pick (up, non-loopback, named ethernet/wlan) before fallback scan
## Partially implemented
- Route/interface classification: heuristic only (loopback/tunnel/physical by iface name).
- Safe path matching: allowed ifaces/dests/ports/processes; no route-based policy.
## Not implemented (v0.4 backlog)
- DoH heuristic detection (SNI/endpoint list/traffic shape).
- Leak-D mismatch correlation (DNS -> TCP/TLS flows).
- GeoIP enrichment of leak events.
- Process tree reporting (PPID chain).
## Known limitations
- On Windows, pcap capture may require selecting a specific NPF interface; use
`dns leak watch --iface-diag` to list interfaces that can be opened.

View File

@@ -9,9 +9,10 @@ This document tracks current implementation status against the original design i
- GeoIP: local GeoLite2 Country + ASN support.
- Probe: ping/tcping/trace with GeoIP enrichment.
- DNS: Hickory-based query/detect with best-effort heuristics.
- DNS leak detection: new passive monitor with policy and privacy modes (best-effort).
- HTTP: head/get via reqwest with best-effort timing breakdown and optional HTTP/3 (feature-gated).
- TLS: rustls-based handshake/verify/cert/alpn.
- Discover: mDNS/SSDP bounded collection.
- Discover: mDNS/SSDP bounded collection plus LLMNR/NBNS (best-effort).
- Diag: bundle export in zip.
## Deviations or refinements
@@ -23,7 +24,7 @@ This document tracks current implementation status against the original design i
## Gaps vs design (as of now)
- HTTP/3 is feature-gated and incomplete; not enabled in default builds.
- TLS verification is rustls-based (no OS-native verifier).
- Discover does not include LLMNR/NBNS.
- DNS leak DoH detection is heuristic and currently optional.
## Current stage summary
- v0.1 scope is complete.

View File

@@ -0,0 +1,484 @@
Yep — your DNS leak detector design **fits your current architecture cleanly**, and **making it a dedicated subcrate is the right move**.
Your existing workspace already follows “thin CLI + feature crates + platform traits + OS dispatch”, so the detector drops in naturally as **one new feature crate** plus a small extension to `wtfnet-platform` for **best-effort process attribution**. This matches the designs pipeline idea: **Traffic Sensor → Classifier → (Process Attribution) → Leak Rules → Report**
Below Ill do two things:
1. **Evaluate fit + integration plan** (what needs changing / what can be reused)
2. Provide **repo-ready v0.4.0 documents**, focused on implementing this detector.
---
# 1) Can this design fit your current architecture?
## ✅ Yes — it matches your crate boundaries almost 1:1
Your detector design already breaks down into modules that map directly into a new subcrate:
* Policy & Configuration
* Passive Traffic Sensor (UDP53/TCP53/853/“DoH-ish”)
* Classifier (DNS / DoT / DoH + confidence)
* DNS Parser (plaintext only)
* Flow Tracker (interface/route/process correlation)
* Leak Detector rules A/B/C/D
* Report generator + structured events
So: **new crate = `wtfnet-dnsleak`**, and keep `wtfnet-dns` for **active query/detect/watch** (already exists).
## ✅ What you can reuse immediately
### Reuse from your current codebase
* `wtfnet-dns watch` capture plumbing (you already have passive-ish capture)
* existing DNS parsing logic (for UDP/TCP 53)
* existing GeoIP enrichment pipeline (optional)
* `wtfnet-platform` interface snapshot + routing info (for “which interface leaked?”)
* your JSON output envelope + logging style
### Reuse from the detector design directly
* Leak definitions A/B/C/D (this is already precise and CLI-tool friendly)
* DoH recognition levels + confidence model (strong → weak)
* “safe DNS path” abstraction (interfaces/dests/process/ports)
* process attribution confidence levels and failure reasons
* privacy modes (Full/Redacted/Minimal)
## ✅ What youll need to add (small + contained)
### 1) New platform trait for flow → process owner (best effort)
Your design explicitly wants PID/PPID/process name to show “who leaked DNS” , and it recommends an OS-specific provider interface .
So extend `wtfnet-platform` with something like:
* `FlowOwnerProvider::owner_of(tuple, timestamp) -> ProcessInfo + confidence`
This stays consistent with your existing “platform traits + OS dispatch” architecture.
### 2) Route/interface classification (“tunnel vs physical vs loopback”)
Your event schema wants `route_class` (tunnel/physical/loopback) .
Implement this via **policy-configured interface labels** + a small heuristic fallback.
### 3) DoH detection will be heuristic (and thats OK)
Your design already plans for this: DoH classification is “hard”, confidence-based .
In v0.4, keep it simple:
* DoT = TCP/853
* Plain DNS = UDP/TCP 53 (+ parse QNAME)
* “DoH-ish” = TCP/443 to known resolver host/IP OR “small HTTPS bursts” pattern
…and attach confidence.
---
# 2) v0.4.0 documents (repo-ready)
Below are two docs you can drop into `docs/`.
---
## `docs/RELEASE_v0.4.0.md`
```markdown
# WTFnet v0.4.0 — DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
---
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic → classify (Plain DNS / DoT / DoH) →
enrich with interface/route/process metadata → evaluate leak definitions (A/B/C/D) →
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
---
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
---
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / "kill switch" management (detection only)
---
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple → process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
---
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--geoip` (include GeoIP in event outputs)
- `--out <path>` (write JSON report/events)
---
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
---
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
---
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS → subsequent TCP/TLS connection) for Leak-D mismatch indicator
---
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands
```
---
## `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
```markdown
# DNS Leak Detector — Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
---
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- parse_dns.rs # plaintext DNS parser: qname/qtype
- attrib.rs # process attribution integration (platform provider)
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
---
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
- geoip: optional
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
---
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
ts: SystemTime,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
---
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
Important: you mostly need to detect encrypted resolver traffic bypassing the proxy channel,
not to fully prove DoH with payload inspection.
---
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
---
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
---
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
---
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
---
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output
```
---
# Small note about *where* it lives in your architecture
This design slots in with **minimal churn**:
* ✅ Add `crates/wtfnet-dnsleak`
* ✅ Add `dns leak ...` subcommands in CLI
* ✅ Add a **single** new platform trait for process attribution (best-effort)
* ✅ Reuse your existing `dns watch` capture approach as the sensor
…which is exactly what your design describes: passive monitoring + classification + rules + evidence output and the PAE “event enricher” location in the pipeline .
**If it's too hard to detect DoH traffic, skip it.**
---

View File

@@ -29,6 +29,11 @@ This document tracks the planned roadmap alongside the current implementation st
- optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (dns leak detection)
- dns leak detector (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage
### Implemented
@@ -69,13 +74,20 @@ This document tracks the planned roadmap alongside the current implementation st
- v0.3: TLS extras (OCSP flag + richer cert parsing).
- v0.3: cert baseline/diff improvements.
- v0.3: HTTP/3 request path (feature-gated; experimental, incomplete).
- v0.3: HTTP/3 error classification (feature-gated).
- v0.4: platform flow-owner lookup (best-effort).
- v0.4: dns leak detector crate + CLI commands (status/watch/report).
- Discover crate with mdns/ssdp commands.
- Discover llmnr/nbns (best-effort).
- Diag crate with report and bundle export.
- Basic unit tests for calc and TLS parsing.
### In progress
- v0.4: DoH heuristic classification (optional).
- v0.4: Leak-D mismatch correlation (optional).
- v0.3: optional HTTP/3 (feature-gated; keep disabled until stabilized).
### Next
- Complete v0.3 trace upgrades and update CLI output.
- Update docs/README/COMMANDS for v0.4.
- Add v0.2 tests (dns detect, basic http/tls smoke).
- Track DNS leak design status in `docs/dns_leak_implementation_status.md`.