Compare commits

...

2 Commits

Author SHA1 Message Date
DaZuo0122
cfa96bde08 Add: dns leak detection 2026-01-17 18:45:24 +08:00
DaZuo0122
ccd4a31d21 Add: H3 support - incomplete 2026-01-17 13:47:37 +08:00
33 changed files with 5520 additions and 81 deletions

516
Cargo.lock generated
View File

@@ -217,12 +217,24 @@ dependencies = [
"shlex",
]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]]
name = "cipher"
version = "0.4.4"
@@ -279,6 +291,16 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
]
[[package]]
name = "concurrent-queue"
version = "2.5.0"
@@ -304,6 +326,16 @@ dependencies = [
"libc",
]
[[package]]
name = "core-foundation"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
@@ -447,6 +479,18 @@ dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "fastbloom"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e7f34442dbe69c60fe8eaf58a8cafff81a1f278816d8ab4db255b3bef4ac3c4"
dependencies = [
"getrandom 0.3.4",
"libm",
"rand 0.9.2",
"siphasher",
]
[[package]]
name = "fastrand"
version = "2.3.0"
@@ -511,6 +555,21 @@ dependencies = [
"percent-encoding",
]
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-channel"
version = "0.3.31"
@@ -518,6 +577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [
"futures-core",
"futures-sink",
]
[[package]]
@@ -526,12 +586,34 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]]
name = "futures-io"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]]
name = "futures-sink"
version = "0.3.31"
@@ -550,8 +632,13 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [
"futures-channel",
"futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task",
"memchr",
"pin-project-lite",
"pin-utils",
"slab",
@@ -574,8 +661,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0"
dependencies = [
"cfg-if",
"js-sys",
"libc",
"wasi",
"wasm-bindgen",
]
[[package]]
@@ -585,9 +674,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
dependencies = [
"cfg-if",
"js-sys",
"libc",
"r-efi",
"wasip2",
"wasm-bindgen",
]
[[package]]
@@ -607,7 +698,7 @@ dependencies = [
"futures-core",
"futures-sink",
"futures-util",
"http",
"http 0.2.12",
"indexmap",
"slab",
"tokio",
@@ -615,6 +706,34 @@ dependencies = [
"tracing",
]
[[package]]
name = "h3"
version = "0.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10872b55cfb02a821b69dc7cf8dc6a71d6af25eb9a79662bec4a9d016056b3be"
dependencies = [
"bytes",
"fastrand",
"futures-util",
"http 1.4.0",
"pin-project-lite",
"tokio",
]
[[package]]
name = "h3-quinn"
version = "0.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b2e732c8d91a74731663ac8479ab505042fbf547b9a207213ab7fbcbfc4f8b4"
dependencies = [
"bytes",
"futures",
"h3",
"quinn",
"tokio",
"tokio-util",
]
[[package]]
name = "hashbrown"
version = "0.16.1"
@@ -648,12 +767,12 @@ dependencies = [
"futures-io",
"futures-util",
"h2",
"http",
"http 0.2.12",
"idna",
"ipnet",
"once_cell",
"rand 0.8.5",
"rustls",
"rustls 0.21.12",
"rustls-native-certs 0.6.3",
"rustls-pemfile 1.0.4",
"thiserror 1.0.69",
@@ -679,7 +798,7 @@ dependencies = [
"parking_lot",
"rand 0.8.5",
"resolv-conf",
"rustls",
"rustls 0.21.12",
"rustls-native-certs 0.6.3",
"smallvec",
"thiserror 1.0.69",
@@ -708,6 +827,16 @@ dependencies = [
"itoa",
]
[[package]]
name = "http"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a"
dependencies = [
"bytes",
"itoa",
]
[[package]]
name = "http-body"
version = "0.4.6"
@@ -715,7 +844,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [
"bytes",
"http",
"http 0.2.12",
"pin-project-lite",
]
@@ -742,7 +871,7 @@ dependencies = [
"futures-core",
"futures-util",
"h2",
"http",
"http 0.2.12",
"http-body",
"httparse",
"httpdate",
@@ -762,9 +891,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
dependencies = [
"futures-util",
"http",
"http 0.2.12",
"hyper",
"rustls",
"rustls 0.21.12",
"tokio",
"tokio-rustls",
]
@@ -930,6 +1059,9 @@ name = "ipnet"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
dependencies = [
"serde",
]
[[package]]
name = "ipnetwork"
@@ -952,6 +1084,28 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "jni"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
"cfg-if",
"combine",
"jni-sys",
"log",
"thiserror 1.0.69",
"walkdir",
"windows-sys 0.45.0",
]
[[package]]
name = "jni-sys"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]]
name = "jobserver"
version = "0.1.34"
@@ -984,6 +1138,12 @@ version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "libm"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
[[package]]
name = "linked-hash-map"
version = "0.5.6"
@@ -1026,6 +1186,12 @@ dependencies = [
"linked-hash-map",
]
[[package]]
name = "lru-slab"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]]
name = "matchers"
version = "0.2.0"
@@ -1108,10 +1274,10 @@ dependencies = [
"libc",
"log",
"openssl",
"openssl-probe",
"openssl-probe 0.1.6",
"openssl-sys",
"schannel",
"security-framework",
"security-framework 2.11.1",
"security-framework-sys",
"tempfile",
]
@@ -1240,6 +1406,12 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
[[package]]
name = "openssl-probe"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391"
[[package]]
name = "openssl-sys"
version = "0.9.111"
@@ -1482,6 +1654,64 @@ dependencies = [
"unicode-ident",
]
[[package]]
name = "quinn"
version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
dependencies = [
"bytes",
"cfg_aliases",
"futures-io",
"pin-project-lite",
"quinn-proto",
"quinn-udp",
"rustc-hash",
"rustls 0.23.36",
"socket2 0.6.1",
"thiserror 2.0.17",
"tokio",
"tracing",
"web-time",
]
[[package]]
name = "quinn-proto"
version = "0.11.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
dependencies = [
"bytes",
"fastbloom",
"getrandom 0.3.4",
"lru-slab",
"rand 0.9.2",
"ring",
"rustc-hash",
"rustls 0.23.36",
"rustls-pki-types",
"rustls-platform-verifier",
"slab",
"thiserror 2.0.17",
"tinyvec",
"tracing",
"web-time",
]
[[package]]
name = "quinn-udp"
version = "0.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.1",
"tracing",
"windows-sys 0.60.2",
]
[[package]]
name = "quote"
version = "1.0.43"
@@ -1606,7 +1836,7 @@ dependencies = [
"futures-core",
"futures-util",
"h2",
"http",
"http 0.2.12",
"http-body",
"hyper",
"hyper-rustls",
@@ -1619,7 +1849,7 @@ dependencies = [
"once_cell",
"percent-encoding",
"pin-project-lite",
"rustls",
"rustls 0.21.12",
"rustls-pemfile 1.0.4",
"serde",
"serde_json",
@@ -1635,7 +1865,7 @@ dependencies = [
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
"webpki-roots",
"webpki-roots 0.25.4",
"winreg",
]
@@ -1659,6 +1889,12 @@ dependencies = [
"windows-sys 0.52.0",
]
[[package]]
name = "rustc-hash"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]]
name = "rusticata-macros"
version = "4.1.0"
@@ -1689,20 +1925,34 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
dependencies = [
"log",
"ring",
"rustls-webpki",
"rustls-webpki 0.101.7",
"sct",
]
[[package]]
name = "rustls"
version = "0.23.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b"
dependencies = [
"once_cell",
"ring",
"rustls-pki-types",
"rustls-webpki 0.103.9",
"subtle",
"zeroize",
]
[[package]]
name = "rustls-native-certs"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00"
dependencies = [
"openssl-probe",
"openssl-probe 0.1.6",
"rustls-pemfile 1.0.4",
"schannel",
"security-framework",
"security-framework 2.11.1",
]
[[package]]
@@ -1711,11 +1961,23 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5"
dependencies = [
"openssl-probe",
"openssl-probe 0.1.6",
"rustls-pemfile 2.2.0",
"rustls-pki-types",
"schannel",
"security-framework",
"security-framework 2.11.1",
]
[[package]]
name = "rustls-native-certs"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63"
dependencies = [
"openssl-probe 0.2.0",
"rustls-pki-types",
"schannel",
"security-framework 3.5.1",
]
[[package]]
@@ -1742,9 +2004,37 @@ version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282"
dependencies = [
"web-time",
"zeroize",
]
[[package]]
name = "rustls-platform-verifier"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784"
dependencies = [
"core-foundation 0.10.1",
"core-foundation-sys",
"jni",
"log",
"once_cell",
"rustls 0.23.36",
"rustls-native-certs 0.8.3",
"rustls-platform-verifier-android",
"rustls-webpki 0.103.9",
"security-framework 3.5.1",
"security-framework-sys",
"webpki-root-certs",
"windows-sys 0.61.2",
]
[[package]]
name = "rustls-platform-verifier-android"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
[[package]]
name = "rustls-webpki"
version = "0.101.7"
@@ -1755,6 +2045,17 @@ dependencies = [
"untrusted",
]
[[package]]
name = "rustls-webpki"
version = "0.103.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
dependencies = [
"ring",
"rustls-pki-types",
"untrusted",
]
[[package]]
name = "rustversion"
version = "1.0.22"
@@ -1767,6 +2068,15 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]]
name = "schannel"
version = "0.1.28"
@@ -1799,7 +2109,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [
"bitflags 2.10.0",
"core-foundation",
"core-foundation 0.9.4",
"core-foundation-sys",
"libc",
"security-framework-sys",
]
[[package]]
name = "security-framework"
version = "3.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef"
dependencies = [
"bitflags 2.10.0",
"core-foundation 0.10.1",
"core-foundation-sys",
"libc",
"security-framework-sys",
@@ -1913,6 +2236,12 @@ version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
[[package]]
name = "siphasher"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]]
name = "slab"
version = "0.4.11"
@@ -2033,7 +2362,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
dependencies = [
"bitflags 1.3.2",
"core-foundation",
"core-foundation 0.9.4",
"system-configuration-sys",
]
@@ -2207,7 +2536,7 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [
"rustls",
"rustls 0.21.12",
"tokio",
]
@@ -2248,6 +2577,7 @@ version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
dependencies = [
"log",
"pin-project-lite",
"tracing-attributes",
"tracing-core",
@@ -2394,6 +2724,16 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]]
name = "want"
version = "0.3.1"
@@ -2487,12 +2827,40 @@ dependencies = [
"wasm-bindgen",
]
[[package]]
name = "web-time"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "webpki-root-certs"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "webpki-roots"
version = "0.25.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
[[package]]
name = "webpki-roots"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c"
dependencies = [
"rustls-pki-types",
]
[[package]]
name = "widestring"
version = "1.2.1"
@@ -2515,6 +2883,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.61.2",
]
[[package]]
name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0"
@@ -2527,6 +2904,15 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]]
name = "windows-sys"
version = "0.48.0"
@@ -2563,6 +2949,21 @@ dependencies = [
"windows-link",
]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
@@ -2611,6 +3012,12 @@ dependencies = [
"windows_x86_64_msvc 0.53.1",
]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
@@ -2629,6 +3036,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
@@ -2647,6 +3060,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
@@ -2677,6 +3096,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
@@ -2695,6 +3120,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
@@ -2713,6 +3144,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
@@ -2731,6 +3168,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
@@ -2787,12 +3230,14 @@ dependencies = [
"clap",
"serde",
"serde_json",
"time",
"tokio",
"wtfnet-calc",
"wtfnet-core",
"wtfnet-diag",
"wtfnet-discover",
"wtfnet-dns",
"wtfnet-dnsleak",
"wtfnet-geoip",
"wtfnet-http",
"wtfnet-platform",
@@ -2830,6 +3275,7 @@ dependencies = [
name = "wtfnet-discover"
version = "0.1.0"
dependencies = [
"hickory-proto",
"mdns-sd",
"serde",
"thiserror 2.0.17",
@@ -2844,7 +3290,7 @@ dependencies = [
"hickory-resolver",
"pnet",
"reqwest",
"rustls",
"rustls 0.21.12",
"rustls-native-certs 0.6.3",
"serde",
"thiserror 2.0.17",
@@ -2855,6 +3301,20 @@ dependencies = [
"url",
]
[[package]]
name = "wtfnet-dnsleak"
version = "0.1.0"
dependencies = [
"hickory-proto",
"ipnet",
"pnet",
"serde",
"thiserror 2.0.17",
"tokio",
"tracing",
"wtfnet-platform",
]
[[package]]
name = "wtfnet-geoip"
version = "0.1.0"
@@ -2868,12 +3328,22 @@ dependencies = [
name = "wtfnet-http"
version = "0.1.0"
dependencies = [
"bytes",
"h3",
"h3-quinn",
"http 1.4.0",
"quinn",
"reqwest",
"rustls 0.21.12",
"rustls-native-certs 0.6.3",
"serde",
"thiserror 2.0.17",
"tokio",
"tokio-rustls",
"tokio-socks",
"tracing",
"url",
"webpki-roots 1.0.5",
]
[[package]]
@@ -2939,7 +3409,7 @@ dependencies = [
name = "wtfnet-tls"
version = "0.1.0"
dependencies = [
"rustls",
"rustls 0.21.12",
"rustls-native-certs 0.6.3",
"serde",
"thiserror 2.0.17",

View File

@@ -10,6 +10,7 @@ members = [
"crates/wtfnet-geoip",
"crates/wtfnet-probe",
"crates/wtfnet-dns",
"crates/wtfnet-dnsleak",
"crates/wtfnet-http",
"crates/wtfnet-tls",
"crates/wtfnet-discover",

View File

@@ -7,8 +7,10 @@ WTFnet is a pure CLI toolbox for diagnosing network problems on Linux and Window
- Ports, neighbors, and trusted root certificates.
- Probing: ping, tcping, traceroute (best-effort).
- DNS: query/detect/watch with GeoIP, DoT/DoH, and SOCKS5 support.
- DNS leak detection with policy profiles and privacy modes (best-effort).
- GeoIP offline lookup via GeoLite2 Country/ASN.
- Subnet calculator: subnet/contains/overlap/summarize.
- Discover: mDNS/SSDP plus LLMNR/NBNS.
## Quickstart
```bash
@@ -44,6 +46,9 @@ wtfn dns query example.com A --transport doh --server 1.1.1.1 --tls-name cloudfl
wtfn dns query example.com A --transport dot --server 1.1.1.1 --tls-name cloudflare-dns.com --socks5 socks5://127.0.0.1:9909
wtfn dns detect example.com --transport doh --servers 1.1.1.1 --tls-name cloudflare-dns.com
wtfn dns watch --duration 10s --filter example.com
wtfn dns leak status
wtfn dns leak watch --duration 10s --profile proxy-stub
wtfn dns leak report report.json
# TLS
wtfn tls handshake example.com:443
@@ -55,6 +60,8 @@ wtfn tls alpn example.com:443 --alpn h2,http/1.1
# Discover
wtfn discover mdns --duration 3s
wtfn discover ssdp --duration 3s
wtfn discover llmnr --duration 3s
wtfn discover nbns --duration 3s
# Diag
wtfn diag --out report.json --json
@@ -81,16 +88,25 @@ Command flags (implemented):
- `sys route`: `--ipv4`, `--ipv6`, `--to <ip>`
- `ports listen`: `--tcp`, `--udp`, `--port <n>`
- `neigh list`: `--ipv4`, `--ipv6`, `--iface <name>`
- `ports conns`: `--top <n>`, `--by-process`
- `cert baseline`: `<path>`
- `cert diff`: `<path>`
- `probe ping`: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping`: `--count <n>`, `--timeout-ms <n>`, `--socks5 <url>`, `--prefer-ipv4`, `--no-geoip`
- `probe trace`: `--max-hops <n>`, `--per-hop <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--rdns`, `--no-geoip`
- `dns query`: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect`: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch`: `--duration <Ns|Nms>`, `--iface <name>`, `--filter <pattern>`
- `http head|get`: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--geoip`, `--socks5 <url>`
- `tls handshake|cert|verify|alpn`: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`
- `dns leak status`: `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`
- `dns leak watch`: `--duration <Ns|Nms>`, `--iface <name>`, `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`, `--privacy <full|redacted|minimal>`, `--out <path>`, `--summary-only`
- `dns leak watch`: `--iface-diag` (prints capture-capable interfaces)
- `dns leak report`: `<path>`, `--privacy <full|redacted|minimal>`
- `http head|get`: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--http3` (feature `http3`), `--http3-only` (feature `http3`), `--geoip`, `--socks5 <url>`
- `tls handshake|cert|verify|alpn`: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`, `--show-extensions`, `--ocsp`
- `discover mdns`: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp`: `--duration <Ns|Nms>`
- `discover llmnr`: `--duration <Ns|Nms>`, `--name <host>`
- `discover nbns`: `--duration <Ns|Nms>`
- `diag`: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## GeoIP data files
@@ -112,6 +128,14 @@ Install:
cmake --build build --target install
```
## HTTP/3 (experimental)
HTTP/3 support is feature-gated and incomplete. Do not enable it in production builds yet.
To enable locally for testing:
```bash
cargo run -p wtfnet-cli --features wtfnet-http/http3 -- http head https://cloudflare-quic.com --http3
```
## Roadmap
### v0.1 (MVP)
- sys: ifaces/ip/route/dns
@@ -135,8 +159,13 @@ cmake --build build --target install
- TLS extras: OCSP stapling indicator, richer cert parsing
- ports conns improvements (top talkers / summary)
- better baseline/diff for system roots
- optional HTTP/3 (feature-gated)
- optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (current requirements)
- dns leak detection (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage
Implemented:
@@ -146,14 +175,16 @@ Implemented:
- http head/get with timing and GeoIP.
- tls handshake/verify/cert/alpn.
- DoT/DoH + SOCKS5 proxy support.
- discover mdns/ssdp.
- discover mdns/ssdp/llmnr/nbns.
- dns leak detection (status/watch/report).
- diag report + bundle.
- calc subcrate with subnet/contains/overlap/summarize.
- CMake/Makefile build + package + install targets.
- Basic unit tests for calc and TLS parsing.
In progress:
- none.
- dns leak: DoH heuristic classification (optional).
- dns leak: Leak-D mismatch correlation (optional).
See `docs/implementation_status.md` for a design-vs-implementation view.

View File

@@ -11,6 +11,7 @@ path = "src/main.rs"
clap = { version = "4", features = ["derive"] }
serde = { version = "1", features = ["derive"] }
serde_json = "1"
time = { version = "0.3", features = ["formatting", "parsing"] }
tokio = { version = "1", features = ["macros", "rt-multi-thread"] }
wtfnet-core = { path = "../wtfnet-core" }
wtfnet-calc = { path = "../wtfnet-calc" }
@@ -18,6 +19,7 @@ wtfnet-geoip = { path = "../wtfnet-geoip" }
wtfnet-platform = { path = "../wtfnet-platform" }
wtfnet-probe = { path = "../wtfnet-probe" }
wtfnet-dns = { path = "../wtfnet-dns", features = ["pcap"] }
wtfnet-dnsleak = { path = "../wtfnet-dnsleak", features = ["pcap"] }
wtfnet-http = { path = "../wtfnet-http" }
wtfnet-tls = { path = "../wtfnet-tls" }
wtfnet-discover = { path = "../wtfnet-discover" }

View File

@@ -1,5 +1,5 @@
use clap::{Parser, Subcommand};
use serde::Serialize;
use serde::{Deserialize, Serialize};
use std::net::ToSocketAddrs;
use std::path::PathBuf;
use wtfnet_core::{
@@ -97,6 +97,7 @@ enum SysCommand {
enum PortsCommand {
Listen(PortsListenArgs),
Who(PortsWhoArgs),
Conns(PortsConnsArgs),
}
#[derive(Subcommand, Debug)]
@@ -107,6 +108,8 @@ enum NeighCommand {
#[derive(Subcommand, Debug)]
enum CertCommand {
Roots,
Baseline(CertBaselineArgs),
Diff(CertDiffArgs),
}
#[derive(Subcommand, Debug)]
@@ -127,6 +130,17 @@ enum DnsCommand {
Query(DnsQueryArgs),
Detect(DnsDetectArgs),
Watch(DnsWatchArgs),
Leak {
#[command(subcommand)]
command: DnsLeakCommand,
},
}
#[derive(Subcommand, Debug)]
enum DnsLeakCommand {
Status(DnsLeakStatusArgs),
Watch(DnsLeakWatchArgs),
Report(DnsLeakReportArgs),
}
#[derive(Subcommand, Debug)]
@@ -155,6 +169,8 @@ enum TlsCommand {
enum DiscoverCommand {
Mdns(DiscoverMdnsArgs),
Ssdp(DiscoverSsdpArgs),
Llmnr(DiscoverLlmnrArgs),
Nbns(DiscoverNbnsArgs),
}
#[derive(Parser, Debug, Clone)]
@@ -190,6 +206,14 @@ struct PortsWhoArgs {
target: String,
}
#[derive(Parser, Debug, Clone)]
struct PortsConnsArgs {
#[arg(long)]
top: Option<usize>,
#[arg(long)]
by_process: bool,
}
#[derive(Parser, Debug, Clone)]
struct NeighListArgs {
#[arg(long)]
@@ -205,6 +229,16 @@ struct GeoIpLookupArgs {
target: String,
}
#[derive(Parser, Debug, Clone)]
struct CertBaselineArgs {
path: PathBuf,
}
#[derive(Parser, Debug, Clone)]
struct CertDiffArgs {
path: PathBuf,
}
#[derive(Parser, Debug, Clone)]
struct ProbePingArgs {
target: String,
@@ -299,6 +333,41 @@ struct DnsWatchArgs {
filter: Option<String>,
}
#[derive(Parser, Debug, Clone)]
struct DnsLeakStatusArgs {
#[arg(long)]
profile: Option<String>,
#[arg(long)]
policy: Option<PathBuf>,
}
#[derive(Parser, Debug, Clone)]
struct DnsLeakWatchArgs {
#[arg(long, default_value = "10s")]
duration: String,
#[arg(long)]
iface: Option<String>,
#[arg(long)]
profile: Option<String>,
#[arg(long)]
policy: Option<PathBuf>,
#[arg(long, default_value = "redacted")]
privacy: String,
#[arg(long)]
out: Option<PathBuf>,
#[arg(long)]
summary_only: bool,
#[arg(long)]
iface_diag: bool,
}
#[derive(Parser, Debug, Clone)]
struct DnsLeakReportArgs {
path: PathBuf,
#[arg(long, default_value = "redacted")]
privacy: String,
}
#[derive(Parser, Debug, Clone)]
struct CalcSubnetArgs {
input: Vec<String>,
@@ -339,6 +408,10 @@ struct HttpRequestArgs {
#[arg(long)]
http2_only: bool,
#[arg(long)]
http3: bool,
#[arg(long)]
http3_only: bool,
#[arg(long)]
geoip: bool,
#[arg(long)]
socks5: Option<String>,
@@ -359,6 +432,10 @@ struct TlsArgs {
socks5: Option<String>,
#[arg(long)]
prefer_ipv4: bool,
#[arg(long)]
show_extensions: bool,
#[arg(long)]
ocsp: bool,
}
#[derive(Parser, Debug, Clone)]
@@ -375,6 +452,20 @@ struct DiscoverSsdpArgs {
duration: String,
}
#[derive(Parser, Debug, Clone)]
struct DiscoverLlmnrArgs {
#[arg(long, default_value = "3s")]
duration: String,
#[arg(long)]
name: Option<String>,
}
#[derive(Parser, Debug, Clone)]
struct DiscoverNbnsArgs {
#[arg(long, default_value = "3s")]
duration: String,
}
#[derive(Parser, Debug, Clone)]
struct DiagArgs {
#[arg(long)]
@@ -450,6 +541,7 @@ struct HttpReportGeoIp {
pub geoip: Vec<wtfnet_geoip::GeoIpRecord>,
pub headers: Vec<(String, String)>,
pub body: Option<String>,
pub warnings: Vec<String>,
pub timing: wtfnet_http::HttpTiming,
}
@@ -481,12 +573,21 @@ async fn main() {
Commands::Ports {
command: PortsCommand::Who(args),
} => handle_ports_who(&cli, args.clone()).await,
Commands::Ports {
command: PortsCommand::Conns(args),
} => handle_ports_conns(&cli, args.clone()).await,
Commands::Neigh {
command: NeighCommand::List(args),
} => handle_neigh_list(&cli, args.clone()).await,
Commands::Cert {
command: CertCommand::Roots,
} => handle_cert_roots(&cli).await,
Commands::Cert {
command: CertCommand::Baseline(args),
} => handle_cert_baseline(&cli, args.clone()).await,
Commands::Cert {
command: CertCommand::Diff(args),
} => handle_cert_diff(&cli, args.clone()).await,
Commands::Geoip {
command: GeoIpCommand::Lookup(args),
} => handle_geoip_lookup(&cli, args.clone()).await,
@@ -511,6 +612,13 @@ async fn main() {
Commands::Dns {
command: DnsCommand::Watch(args),
} => handle_dns_watch(&cli, args.clone()).await,
Commands::Dns {
command: DnsCommand::Leak { command },
} => match command {
DnsLeakCommand::Status(args) => handle_dns_leak_status(&cli, args.clone()).await,
DnsLeakCommand::Watch(args) => handle_dns_leak_watch(&cli, args.clone()).await,
DnsLeakCommand::Report(args) => handle_dns_leak_report(&cli, args.clone()).await,
},
Commands::Calc {
command: CalcCommand::Subnet(args),
} => handle_calc_subnet(&cli, args.clone()).await,
@@ -547,6 +655,12 @@ async fn main() {
Commands::Discover {
command: DiscoverCommand::Ssdp(args),
} => handle_discover_ssdp(&cli, args.clone()).await,
Commands::Discover {
command: DiscoverCommand::Llmnr(args),
} => handle_discover_llmnr(&cli, args.clone()).await,
Commands::Discover {
command: DiscoverCommand::Nbns(args),
} => handle_discover_nbns(&cli, args.clone()).await,
Commands::Diag(args) => handle_diag(&cli, args.clone()).await,
};
@@ -820,6 +934,99 @@ async fn handle_ports_who(cli: &Cli, args: PortsWhoArgs) -> i32 {
}
}
async fn handle_ports_conns(cli: &Cli, args: PortsConnsArgs) -> i32 {
let result = platform().ports.connections().await;
match result {
Ok(conns) => {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = Vec::new();
if let Some(top) = args.top {
command_args.push("--top".to_string());
command_args.push(top.to_string());
}
if args.by_process {
command_args.push("--by-process".to_string());
}
let command = CommandInfo::new("ports conns", command_args);
let envelope = CommandEnvelope::new(meta, command, conns);
emit_json(cli, &envelope)
} else if args.by_process {
let summary = summarize_by_process(&conns);
for (name, count) in summary {
println!("{name} {count}");
}
ExitKind::Ok.code()
} else if let Some(top) = args.top {
let summary = summarize_top_remote(&conns, top);
for (addr, count) in summary {
println!("{addr} {count}");
}
ExitKind::Ok.code()
} else {
for conn in conns {
let state = conn.state.unwrap_or_else(|| "-".to_string());
let pid = conn
.pid
.map(|value| value.to_string())
.unwrap_or_else(|| "-".to_string());
let proc = conn
.process_name
.unwrap_or_else(|| "-".to_string());
println!(
"{} {} -> {} {} pid={} proc={}",
conn.proto, conn.local_addr, conn.remote_addr, state, pid, proc
);
}
ExitKind::Ok.code()
}
}
Err(err) => emit_platform_error(cli, err),
}
}
fn summarize_top_remote(
conns: &[wtfnet_platform::ConnSocket],
top: usize,
) -> Vec<(String, usize)> {
let mut counts = std::collections::HashMap::new();
for conn in conns {
let host = parse_host_from_socket(&conn.remote_addr);
*counts.entry(host).or_insert(0usize) += 1;
}
let mut items = counts.into_iter().collect::<Vec<_>>();
items.sort_by(|a, b| b.1.cmp(&a.1).then_with(|| a.0.cmp(&b.0)));
items.truncate(top);
items
}
fn summarize_by_process(conns: &[wtfnet_platform::ConnSocket]) -> Vec<(String, usize)> {
let mut counts = std::collections::HashMap::new();
for conn in conns {
let name = conn
.process_name
.clone()
.or_else(|| conn.pid.map(|value| format!("pid:{value}")))
.unwrap_or_else(|| "-".to_string());
*counts.entry(name).or_insert(0usize) += 1;
}
let mut items = counts.into_iter().collect::<Vec<_>>();
items.sort_by(|a, b| b.1.cmp(&a.1).then_with(|| a.0.cmp(&b.0)));
items
}
fn parse_host_from_socket(value: &str) -> String {
if let Some(stripped) = value.strip_prefix('[') {
if let Some(end) = stripped.find(']') {
return stripped[..end].to_string();
}
}
if let Some((host, _port)) = value.rsplit_once(':') {
return host.to_string();
}
value.to_string()
}
async fn handle_neigh_list(cli: &Cli, args: NeighListArgs) -> i32 {
let result = platform().neigh.neighbors().await;
match result {
@@ -882,6 +1089,212 @@ async fn handle_cert_roots(cli: &Cli) -> i32 {
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct CertBaseline {
schema_version: u32,
created_at: String,
roots: Vec<wtfnet_platform::RootCert>,
}
#[derive(Debug, Clone, Serialize)]
struct CertChange {
sha256: String,
field: String,
baseline: String,
current: String,
}
#[derive(Debug, Clone, Serialize)]
struct CertDiffReport {
baseline_path: String,
baseline_count: usize,
current_count: usize,
added: Vec<wtfnet_platform::RootCert>,
removed: Vec<wtfnet_platform::RootCert>,
changed: Vec<CertChange>,
newly_expired: Vec<wtfnet_platform::RootCert>,
schema_version: u32,
}
async fn handle_cert_baseline(cli: &Cli, args: CertBaselineArgs) -> i32 {
let result = platform().cert.trusted_roots().await;
match result {
Ok(roots) => {
let baseline = CertBaseline {
schema_version: 1,
created_at: now_rfc3339(),
roots,
};
match serde_json::to_string_pretty(&baseline) {
Ok(payload) => match std::fs::write(&args.path, payload) {
Ok(()) => ExitKind::Ok.code(),
Err(err) => {
eprintln!("failed to write baseline: {err}");
ExitKind::Failed.code()
}
},
Err(err) => {
eprintln!("failed to serialize baseline: {err}");
ExitKind::Failed.code()
}
}
}
Err(err) => emit_platform_error(cli, err),
}
}
async fn handle_cert_diff(cli: &Cli, args: CertDiffArgs) -> i32 {
let baseline = match std::fs::read_to_string(&args.path) {
Ok(contents) => match serde_json::from_str::<CertBaseline>(&contents) {
Ok(value) => value,
Err(err) => {
eprintln!("failed to parse baseline: {err}");
return ExitKind::Failed.code();
}
},
Err(err) => {
eprintln!("failed to read baseline: {err}");
return ExitKind::Failed.code();
}
};
let current = match platform().cert.trusted_roots().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let baseline_map = baseline
.roots
.iter()
.map(|cert| (cert.sha256.clone(), cert))
.collect::<std::collections::HashMap<_, _>>();
let current_map = current
.iter()
.map(|cert| (cert.sha256.clone(), cert))
.collect::<std::collections::HashMap<_, _>>();
let mut added = Vec::new();
let mut removed = Vec::new();
let mut changed = Vec::new();
let mut newly_expired = Vec::new();
for cert in &current {
if !baseline_map.contains_key(&cert.sha256) {
added.push(cert.clone());
}
}
for cert in &baseline.roots {
if !current_map.contains_key(&cert.sha256) {
removed.push(cert.clone());
}
}
for (sha, base) in &baseline_map {
if let Some(curr) = current_map.get(sha) {
if base.subject != curr.subject {
changed.push(CertChange {
sha256: sha.clone(),
field: "subject".to_string(),
baseline: base.subject.clone(),
current: curr.subject.clone(),
});
}
if base.issuer != curr.issuer {
changed.push(CertChange {
sha256: sha.clone(),
field: "issuer".to_string(),
baseline: base.issuer.clone(),
current: curr.issuer.clone(),
});
}
if base.not_after != curr.not_after {
changed.push(CertChange {
sha256: sha.clone(),
field: "not_after".to_string(),
baseline: base.not_after.clone(),
current: curr.not_after.clone(),
});
}
if base.not_before != curr.not_before {
changed.push(CertChange {
sha256: sha.clone(),
field: "not_before".to_string(),
baseline: base.not_before.clone(),
current: curr.not_before.clone(),
});
}
if let (Some(created), Some(expiry)) = (
parse_cert_time(&baseline.created_at),
parse_cert_time(&curr.not_after),
) {
let now = time::OffsetDateTime::now_utc();
if created < expiry && now >= expiry {
newly_expired.push((*curr).clone());
}
}
}
}
let report = CertDiffReport {
baseline_path: args.path.to_string_lossy().to_string(),
baseline_count: baseline.roots.len(),
current_count: current.len(),
added,
removed,
changed,
newly_expired,
schema_version: baseline.schema_version,
};
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let command = CommandInfo::new("cert diff", vec![report.baseline_path.clone()]);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
println!(
"baseline_count={} current_count={} added={} removed={} changed={} newly_expired={}",
report.baseline_count,
report.current_count,
report.added.len(),
report.removed.len(),
report.changed.len(),
report.newly_expired.len()
);
for cert in report.added {
println!("added {} {}", cert.sha256, cert.subject);
}
for cert in report.removed {
println!("removed {} {}", cert.sha256, cert.subject);
}
for change in report.changed {
println!(
"changed {} {} {} -> {}",
change.sha256, change.field, change.baseline, change.current
);
}
for cert in report.newly_expired {
println!("expired {} {}", cert.sha256, cert.subject);
}
ExitKind::Ok.code()
}
}
fn parse_cert_time(value: &str) -> Option<time::OffsetDateTime> {
if let Ok(dt) = time::OffsetDateTime::parse(value, &time::format_description::well_known::Rfc3339) {
return Some(dt);
}
let format = time::format_description::parse("[year]-[month]-[day] [hour]:[minute]:[second] UTC").ok()?;
time::OffsetDateTime::parse(value, &format).ok()
}
fn now_rfc3339() -> String {
time::OffsetDateTime::now_utc()
.format(&time::format_description::well_known::Rfc3339)
.unwrap_or_else(|_| "1970-01-01T00:00:00Z".to_string())
}
async fn handle_geoip_lookup(cli: &Cli, args: GeoIpLookupArgs) -> i32 {
let ip = match args.target.parse::<std::net::IpAddr>() {
Ok(ip) => ip,
@@ -1588,6 +2001,242 @@ async fn handle_dns_watch(cli: &Cli, args: DnsWatchArgs) -> i32 {
}
}
#[derive(Debug, Serialize)]
struct DnsLeakStatusReport {
pub policy: wtfnet_dnsleak::PolicySummary,
pub interfaces: Vec<wtfnet_platform::NetInterface>,
pub routes: Vec<wtfnet_platform::RouteEntry>,
pub dns: wtfnet_platform::DnsConfigSnapshot,
}
async fn handle_dns_leak_status(cli: &Cli, args: DnsLeakStatusArgs) -> i32 {
let platform = platform();
let interfaces = match platform.sys.interfaces().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let routes = match platform.sys.routes().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let dns = match platform.sys.dns_config().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let policy = match resolve_leak_policy(&args.profile, args.policy.as_ref(), &interfaces) {
Ok(policy) => policy,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let report = DnsLeakStatusReport {
policy: policy.summary(),
interfaces,
routes,
dns,
};
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = Vec::new();
if let Some(profile) = args.profile {
command_args.push("--profile".to_string());
command_args.push(profile);
}
if let Some(policy_path) = args.policy {
command_args.push("--policy".to_string());
command_args.push(policy_path.display().to_string());
}
let command = CommandInfo::new("dns leak status", command_args);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
println!("policy: {:?}", report.policy.profile);
if !report.policy.allowed_ifaces.is_empty() {
println!("allowed ifaces: {}", report.policy.allowed_ifaces.join(", "));
}
if !report.policy.allowed_destinations.is_empty() {
println!(
"allowed destinations: {}",
report.policy.allowed_destinations.join(", ")
);
}
if !report.policy.allowed_ports.is_empty() {
println!(
"allowed ports: {}",
report
.policy
.allowed_ports
.iter()
.map(|port| port.to_string())
.collect::<Vec<_>>()
.join(", ")
);
}
println!("interfaces: {}", report.interfaces.len());
println!("routes: {}", report.routes.len());
println!("dns servers: {}", report.dns.servers.join(", "));
ExitKind::Ok.code()
}
}
async fn handle_dns_leak_watch(cli: &Cli, args: DnsLeakWatchArgs) -> i32 {
if args.iface_diag {
return handle_dns_leak_iface_diag(cli).await;
}
let duration_ms = match parse_duration_ms(&args.duration) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let privacy = match parse_leak_privacy(&args.privacy) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let platform = platform();
let interfaces = match platform.sys.interfaces().await {
Ok(value) => value,
Err(err) => return emit_platform_error(cli, err),
};
let policy = match resolve_leak_policy(&args.profile, args.policy.as_ref(), &interfaces) {
Ok(policy) => policy,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let options = wtfnet_dnsleak::LeakWatchOptions {
duration_ms,
iface: args.iface.clone(),
policy,
privacy,
include_events: !args.summary_only,
};
let report = match wtfnet_dnsleak::watch(options, Some(&*platform.flow_owner)).await {
Ok(report) => report,
Err(err) => {
eprintln!("dns leak watch failed: {err}");
return ExitKind::Failed.code();
}
};
if let Some(path) = args.out.as_ref() {
if let Ok(payload) = serde_json::to_string_pretty(&report) {
if let Err(err) = std::fs::write(path, payload) {
eprintln!("failed to write report: {err}");
return ExitKind::Failed.code();
}
}
}
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = vec!["--duration".to_string(), args.duration];
if let Some(iface) = args.iface {
command_args.push("--iface".to_string());
command_args.push(iface);
}
if let Some(profile) = args.profile {
command_args.push("--profile".to_string());
command_args.push(profile);
}
if let Some(policy_path) = args.policy {
command_args.push("--policy".to_string());
command_args.push(policy_path.display().to_string());
}
if let Some(out) = args.out {
command_args.push("--out".to_string());
command_args.push(out.display().to_string());
}
if args.summary_only {
command_args.push("--summary-only".to_string());
}
command_args.push("--privacy".to_string());
command_args.push(args.privacy);
let command = CommandInfo::new("dns leak watch", command_args);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
print_leak_summary(&report);
if !report.events.is_empty() {
for event in report.events {
println!(
"[{:?}] {:?} {}:{} via {:?}",
event.severity, event.leak_type, event.dst_ip, event.dst_port, event.route_class
);
if let Some(qname) = event.qname.as_ref() {
println!(" qname: {}", qname);
}
if let Some(process) = event.process_name.as_ref() {
println!(" process: {}", process);
}
}
}
ExitKind::Ok.code()
}
}
async fn handle_dns_leak_iface_diag(_cli: &Cli) -> i32 {
match wtfnet_dnsleak::iface_diagnostics() {
Ok(entries) => {
for entry in entries {
println!("iface: {} open: {} {}", entry.name, entry.open_ok, entry.error);
}
ExitKind::Ok.code()
}
Err(err) => {
eprintln!("iface diag failed: {err}");
ExitKind::Failed.code()
}
}
}
async fn handle_dns_leak_report(cli: &Cli, args: DnsLeakReportArgs) -> i32 {
let privacy = match parse_leak_privacy(&args.privacy) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let payload = match std::fs::read_to_string(&args.path) {
Ok(value) => value,
Err(err) => {
eprintln!("failed to read report: {err}");
return ExitKind::Failed.code();
}
};
let mut report: wtfnet_dnsleak::LeakReport = match serde_json::from_str(&payload) {
Ok(value) => value,
Err(err) => {
eprintln!("failed to parse report: {err}");
return ExitKind::Failed.code();
}
};
for event in report.events.iter_mut() {
wtfnet_dnsleak::apply_privacy(event, privacy);
}
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let command = CommandInfo::new("dns leak report", vec![args.path.display().to_string()]);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
print_leak_summary(&report);
ExitKind::Ok.code()
}
}
async fn handle_calc_subnet(cli: &Cli, args: CalcSubnetArgs) -> i32 {
let input = match normalize_subnet_input(&args.input) {
Ok(value) => value,
@@ -1821,6 +2470,8 @@ async fn handle_http_request(
show_body: args.show_body,
http1_only: args.http1_only,
http2_only: args.http2_only,
http3: args.http3,
http3_only: args.http3_only,
proxy: args.socks5.clone(),
};
@@ -1844,6 +2495,7 @@ async fn handle_http_request(
geoip,
headers: report.headers.clone(),
body: report.body.clone(),
warnings: report.warnings.clone(),
timing: report.timing.clone(),
}
} else {
@@ -1857,6 +2509,7 @@ async fn handle_http_request(
geoip: Vec::new(),
headers: report.headers.clone(),
body: report.body.clone(),
warnings: report.warnings.clone(),
timing: report.timing.clone(),
}
};
@@ -1883,6 +2536,11 @@ async fn handle_http_request(
if !report.resolved_ips.is_empty() {
println!("resolved: {}", report.resolved_ips.join(", "));
}
if !report.warnings.is_empty() {
for warning in &report.warnings {
println!("warning: {warning}");
}
}
println!("total_ms: {}", report.timing.total_ms);
if let Some(ms) = report.timing.dns_ms {
println!("dns_ms: {ms}");
@@ -1973,6 +2631,8 @@ fn build_tls_options(args: &TlsArgs) -> wtfnet_tls::TlsOptions {
insecure: args.insecure,
socks5: args.socks5.clone(),
prefer_ipv4: args.prefer_ipv4,
show_extensions: args.show_extensions,
ocsp: args.ocsp,
}
}
@@ -1986,6 +2646,81 @@ fn parse_alpn(value: Option<&str>) -> Vec<String> {
.collect()
}
fn parse_leak_privacy(value: &str) -> Result<wtfnet_dnsleak::PrivacyMode, String> {
match value.to_ascii_lowercase().as_str() {
"full" => Ok(wtfnet_dnsleak::PrivacyMode::Full),
"redacted" => Ok(wtfnet_dnsleak::PrivacyMode::Redacted),
"minimal" => Ok(wtfnet_dnsleak::PrivacyMode::Minimal),
_ => Err(format!("invalid privacy mode: {value}")),
}
}
fn parse_leak_profile(
value: Option<&str>,
) -> Result<wtfnet_dnsleak::LeakPolicyProfile, String> {
let value = value.unwrap_or("proxy-stub");
match value.to_ascii_lowercase().as_str() {
"full-tunnel" => Ok(wtfnet_dnsleak::LeakPolicyProfile::FullTunnel),
"proxy-stub" => Ok(wtfnet_dnsleak::LeakPolicyProfile::ProxyStub),
"split" => Ok(wtfnet_dnsleak::LeakPolicyProfile::Split),
_ => Err(format!("invalid profile: {value}")),
}
}
fn resolve_leak_policy(
profile: &Option<String>,
policy_path: Option<&PathBuf>,
interfaces: &[wtfnet_platform::NetInterface],
) -> Result<wtfnet_dnsleak::LeakPolicy, String> {
if let Some(path) = policy_path {
let payload = std::fs::read_to_string(path)
.map_err(|err| format!("failed to read policy: {err}"))?;
let policy: wtfnet_dnsleak::LeakPolicy = serde_json::from_str(&payload)
.map_err(|err| format!("failed to parse policy: {err}"))?;
return Ok(policy);
}
let profile = parse_leak_profile(profile.as_deref())?;
let iface_names = interfaces.iter().map(|iface| iface.name.clone()).collect::<Vec<_>>();
Ok(wtfnet_dnsleak::LeakPolicy::from_profile(
profile,
&iface_names,
))
}
fn print_leak_summary(report: &wtfnet_dnsleak::LeakReport) {
println!("leaks: {}", report.summary.total);
if !report.summary.by_type.is_empty() {
let items = report
.summary
.by_type
.iter()
.map(|entry| format!("{:?}={}", entry.leak_type, entry.count))
.collect::<Vec<_>>()
.join(", ");
println!("by type: {items}");
}
if !report.summary.top_processes.is_empty() {
let items = report
.summary
.top_processes
.iter()
.map(|entry| format!("{}={}", entry.key, entry.count))
.collect::<Vec<_>>()
.join(", ");
println!("top processes: {items}");
}
if !report.summary.top_destinations.is_empty() {
let items = report
.summary
.top_destinations
.iter()
.map(|entry| format!("{}={}", entry.key, entry.count))
.collect::<Vec<_>>()
.join(", ");
println!("top destinations: {items}");
}
}
fn emit_tls_report<T: serde::Serialize>(cli: &Cli, name: &str, report: T) -> i32 {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
@@ -2098,6 +2833,86 @@ async fn handle_discover_ssdp(cli: &Cli, args: DiscoverSsdpArgs) -> i32 {
}
}
async fn handle_discover_llmnr(cli: &Cli, args: DiscoverLlmnrArgs) -> i32 {
let duration_ms = match parse_duration_ms(&args.duration) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let options = wtfnet_discover::LlmnrOptions {
duration_ms,
name: args.name.clone(),
};
match wtfnet_discover::llmnr_discover(options).await {
Ok(report) => {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let mut command_args = vec!["--duration".to_string(), args.duration];
if let Some(name) = args.name {
command_args.push("--name".to_string());
command_args.push(name);
}
let command = CommandInfo::new("discover llmnr", command_args);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
println!("query: {}", report.name);
for answer in report.answers {
println!("from: {}", answer.from);
println!(" name: {}", answer.name);
println!(" type: {}", answer.record_type);
println!(" data: {}", answer.data);
println!(" ttl: {}", answer.ttl);
}
ExitKind::Ok.code()
}
}
Err(err) => {
eprintln!("llmnr discover failed: {err}");
ExitKind::Failed.code()
}
}
}
async fn handle_discover_nbns(cli: &Cli, args: DiscoverNbnsArgs) -> i32 {
let duration_ms = match parse_duration_ms(&args.duration) {
Ok(value) => value,
Err(err) => {
eprintln!("{err}");
return ExitKind::Usage.code();
}
};
let options = wtfnet_discover::NbnsOptions { duration_ms };
match wtfnet_discover::nbns_discover(options).await {
Ok(report) => {
if cli.json {
let meta = Meta::new("wtfnet", env!("CARGO_PKG_VERSION"), false);
let command = CommandInfo::new(
"discover nbns",
vec!["--duration".to_string(), args.duration],
);
let envelope = CommandEnvelope::new(meta, command, report);
emit_json(cli, &envelope)
} else {
for node in report.nodes {
println!("from: {}", node.from);
if node.names.is_empty() {
continue;
}
println!(" names: {}", node.names.join(", "));
}
ExitKind::Ok.code()
}
}
Err(err) => {
eprintln!("nbns discover failed: {err}");
ExitKind::Failed.code()
}
}
}
async fn handle_diag(cli: &Cli, args: DiagArgs) -> i32 {
let options = wtfnet_diag::DiagOptions {
dns_detect_domain: args.dns_detect.clone(),

View File

@@ -4,6 +4,7 @@ version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
mdns-sd = "0.8"
serde = { version = "1", features = ["derive"] }
thiserror = "2"

View File

@@ -1,7 +1,9 @@
use hickory_proto::op::{Message, MessageType, Query};
use hickory_proto::rr::{Name, RData, RecordType};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet};
use std::net::{SocketAddr, UdpSocket};
use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::{Duration, Instant};
use thiserror::Error;
@@ -24,6 +26,17 @@ pub struct SsdpOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrOptions {
pub duration_ms: u64,
pub name: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdnsService {
pub service_type: String,
@@ -56,6 +69,34 @@ pub struct SsdpReport {
pub services: Vec<SsdpService>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrAnswer {
pub from: String,
pub name: String,
pub record_type: String,
pub data: String,
pub ttl: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrReport {
pub duration_ms: u64,
pub name: String,
pub answers: Vec<LlmnrAnswer>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsNodeStatus {
pub from: String,
pub names: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsReport {
pub duration_ms: u64,
pub nodes: Vec<NbnsNodeStatus>,
}
pub async fn mdns_discover(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || mdns_discover_blocking(options))
.await
@@ -68,6 +109,18 @@ pub async fn ssdp_discover(options: SsdpOptions) -> Result<SsdpReport, DiscoverE
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn llmnr_discover(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
tokio::task::spawn_blocking(move || llmnr_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn nbns_discover(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || nbns_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
fn mdns_discover_blocking(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
let daemon = ServiceDaemon::new().map_err(|err| DiscoverError::Mdns(err.to_string()))?;
let mut service_types = BTreeSet::new();
@@ -174,6 +227,94 @@ fn ssdp_discover_blocking(options: SsdpOptions) -> Result<SsdpReport, DiscoverEr
})
}
fn llmnr_discover_blocking(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let name = options
.name
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| "wpad".to_string());
let query = build_llmnr_query(&name)
.map_err(|err| DiscoverError::Io(format!("llmnr build query: {err}")))?;
let target = "224.0.0.252:5355";
let _ = socket.send_to(&query, target);
let mut answers = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(entries) = parse_llmnr_response(&buf[..len], from.ip()) {
for entry in entries {
let key = format!(
"{}|{}|{}|{}",
entry.from, entry.name, entry.record_type, entry.data
);
if seen.insert(key) {
answers.push(entry);
}
}
}
}
Err(_) => continue,
}
}
Ok(LlmnrReport {
duration_ms: options.duration_ms,
name,
answers,
})
}
fn nbns_discover_blocking(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_broadcast(true)
.map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let query = build_nbns_node_status_query();
let _ = socket.send_to(&query, "255.255.255.255:137");
let mut nodes = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(names) = parse_nbns_node_status(&buf[..len]) {
let key = format!("{}|{}", from.ip(), names.join(","));
if seen.insert(key) {
nodes.push(NbnsNodeStatus {
from: from.ip().to_string(),
names,
});
}
}
}
Err(_) => continue,
}
}
Ok(NbnsReport {
duration_ms: options.duration_ms,
nodes,
})
}
fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
let mut st = None;
let mut usn = None;
@@ -207,3 +348,183 @@ fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
server,
})
}
fn build_llmnr_query(name: &str) -> Result<Vec<u8>, String> {
let name = Name::from_ascii(name).map_err(|err| format!("invalid name: {err}"))?;
let mut message = Message::new();
message
.set_id(0)
.set_message_type(MessageType::Query)
.set_recursion_desired(false)
.add_query(Query::query(name.clone(), RecordType::A))
.add_query(Query::query(name, RecordType::AAAA));
message.to_vec().map_err(|err| err.to_string())
}
fn parse_llmnr_response(payload: &[u8], from: IpAddr) -> Option<Vec<LlmnrAnswer>> {
let message = Message::from_vec(payload).ok()?;
if message.message_type() != MessageType::Response {
return None;
}
let mut answers = Vec::new();
for record in message.answers() {
let record_type = record.record_type();
let data = match record.data() {
Some(RData::A(addr)) => addr.to_string(),
Some(RData::AAAA(addr)) => addr.to_string(),
_ => continue,
};
answers.push(LlmnrAnswer {
from: from.to_string(),
name: record.name().to_string(),
record_type: record_type.to_string(),
data,
ttl: record.ttl(),
});
}
if answers.is_empty() {
None
} else {
Some(answers)
}
}
fn build_nbns_node_status_query() -> Vec<u8> {
let mut buf = Vec::with_capacity(50);
let id = nbns_query_id();
buf.extend_from_slice(&id.to_be_bytes());
buf.extend_from_slice(&0u16.to_be_bytes()); // flags
buf.extend_from_slice(&1u16.to_be_bytes()); // qdcount
buf.extend_from_slice(&0u16.to_be_bytes()); // ancount
buf.extend_from_slice(&0u16.to_be_bytes()); // nscount
buf.extend_from_slice(&0u16.to_be_bytes()); // arcount
buf.extend_from_slice(&nbns_encode_name("*", 0x00));
buf.extend_from_slice(&0x0021u16.to_be_bytes()); // NBSTAT
buf.extend_from_slice(&0x0001u16.to_be_bytes()); // IN
buf
}
fn nbns_query_id() -> u16 {
let nanos = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.subsec_nanos();
(nanos & 0xffff) as u16
}
fn nbns_encode_name(name: &str, suffix: u8) -> Vec<u8> {
let mut raw = [b' '; 16];
let mut bytes = name.as_bytes().to_vec();
for byte in bytes.iter_mut() {
byte.make_ascii_uppercase();
}
for (idx, byte) in bytes.iter().take(15).enumerate() {
raw[idx] = *byte;
}
raw[15] = suffix;
let mut encoded = Vec::with_capacity(34);
encoded.push(32);
for byte in raw {
let high = ((byte >> 4) & 0x0f) + b'A';
let low = (byte & 0x0f) + b'A';
encoded.push(high);
encoded.push(low);
}
encoded.push(0);
encoded
}
fn parse_nbns_node_status(payload: &[u8]) -> Option<Vec<String>> {
if payload.len() < 12 {
return None;
}
let flags = u16::from_be_bytes([payload[2], payload[3]]);
if flags & 0x8000 == 0 {
return None;
}
let qdcount = u16::from_be_bytes([payload[4], payload[5]]) as usize;
let ancount = u16::from_be_bytes([payload[6], payload[7]]) as usize;
let mut offset = 12;
for _ in 0..qdcount {
offset = skip_dns_name(payload, offset)?;
if offset + 4 > payload.len() {
return None;
}
offset += 4;
}
let mut names = Vec::new();
for _ in 0..ancount {
offset = skip_dns_name(payload, offset)?;
if offset + 10 > payload.len() {
return None;
}
let rr_type = u16::from_be_bytes([payload[offset], payload[offset + 1]]);
let _rr_class = u16::from_be_bytes([payload[offset + 2], payload[offset + 3]]);
let _ttl = u32::from_be_bytes([
payload[offset + 4],
payload[offset + 5],
payload[offset + 6],
payload[offset + 7],
]);
let rdlength = u16::from_be_bytes([payload[offset + 8], payload[offset + 9]]) as usize;
offset += 10;
if offset + rdlength > payload.len() {
return None;
}
if rr_type == 0x0021 && rdlength > 0 {
if let Some(list) = parse_nbns_name_list(&payload[offset..offset + rdlength]) {
names.extend(list);
}
}
offset += rdlength;
}
if names.is_empty() {
None
} else {
Some(names)
}
}
fn parse_nbns_name_list(payload: &[u8]) -> Option<Vec<String>> {
let count = *payload.first()? as usize;
let mut offset = 1;
let mut names = Vec::new();
for _ in 0..count {
if offset + 18 > payload.len() {
return None;
}
let name_bytes = &payload[offset..offset + 15];
let suffix = payload[offset + 15];
let name = String::from_utf8_lossy(name_bytes)
.trim_end()
.to_string();
names.push(format!("{name}<{suffix:02x}>"));
offset += 18;
}
Some(names)
}
fn skip_dns_name(payload: &[u8], mut offset: usize) -> Option<usize> {
if offset >= payload.len() {
return None;
}
loop {
let len = *payload.get(offset)?;
if len & 0xc0 == 0xc0 {
if offset + 1 >= payload.len() {
return None;
}
return Some(offset + 2);
}
if len == 0 {
return Some(offset + 1);
}
offset += 1 + len as usize;
if offset >= payload.len() {
return None;
}
}
}

View File

@@ -0,0 +1,17 @@
[package]
name = "wtfnet-dnsleak"
version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
ipnet = { version = "2", features = ["serde"] }
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["rt"] }
tracing = "0.1"
wtfnet-platform = { path = "../wtfnet-platform" }
pnet = { version = "0.34", optional = true }
[features]
pcap = ["dep:pnet"]

View File

@@ -0,0 +1,32 @@
use crate::report::LeakTransport;
use hickory_proto::op::{Message, MessageType};
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use wtfnet_platform::FlowProtocol;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassifiedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
}
pub fn classify_dns_query(payload: &[u8]) -> Option<(String, String, String)> {
let message = Message::from_vec(payload).ok()?;
if message.message_type() != MessageType::Query {
return None;
}
let query = message.queries().first()?;
let qname = query.name().to_utf8();
let qtype = query.query_type().to_string();
let rcode = message.response_code().to_string();
Some((qname, qtype, rcode))
}

View File

@@ -0,0 +1,102 @@
mod classify;
mod policy;
mod privacy;
mod report;
mod route;
mod rules;
mod sensor;
use crate::classify::ClassifiedEvent;
use crate::sensor::capture_events;
use std::time::Instant;
use thiserror::Error;
use tracing::debug;
use wtfnet_platform::{FlowOwnerProvider, FlowTuple};
pub use crate::policy::{LeakPolicy, LeakPolicyProfile, PolicySummary};
pub use crate::privacy::{apply_privacy, PrivacyMode};
pub use crate::report::{LeakEvent, LeakReport, LeakSummary, LeakTransport, RouteClass, Severity};
pub use crate::sensor::{iface_diagnostics, IfaceDiag};
#[derive(Debug, Error)]
pub enum DnsLeakError {
#[error("not supported: {0}")]
NotSupported(String),
#[error("io error: {0}")]
Io(String),
#[error("policy error: {0}")]
Policy(String),
}
#[derive(Debug, Clone)]
pub struct LeakWatchOptions {
pub duration_ms: u64,
pub iface: Option<String>,
pub policy: LeakPolicy,
pub privacy: PrivacyMode,
pub include_events: bool,
}
pub async fn watch(
options: LeakWatchOptions,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> Result<LeakReport, DnsLeakError> {
debug!(
duration_ms = options.duration_ms,
iface = ?options.iface,
include_events = options.include_events,
"dns leak watch start"
);
let start = Instant::now();
let events = capture_events(&options).await?;
let mut leak_events = Vec::new();
for event in events {
let enriched = enrich_event(event, flow_owner).await;
if let Some(decision) = rules::evaluate(&enriched, &options.policy) {
let mut leak_event = report::LeakEvent::from_decision(enriched, decision);
privacy::apply_privacy(&mut leak_event, options.privacy);
leak_events.push(leak_event);
}
}
let summary = LeakSummary::from_events(&leak_events);
let report = LeakReport {
duration_ms: start.elapsed().as_millis() as u64,
policy: options.policy.summary(),
summary,
events: if options.include_events {
leak_events
} else {
Vec::new()
},
};
Ok(report)
}
async fn enrich_event(
event: ClassifiedEvent,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> report::EnrichedEvent {
let mut enriched = route::enrich_route(event);
if let Some(provider) = flow_owner {
let flow = FlowTuple {
proto: enriched.proto,
src_ip: enriched.src_ip,
src_port: enriched.src_port,
dst_ip: enriched.dst_ip,
dst_port: enriched.dst_port,
};
match provider.owner_of(flow).await {
Ok(result) => {
enriched.owner = result.owner;
enriched.owner_confidence = result.confidence;
enriched.owner_failure = result.failure_reason;
}
Err(err) => {
enriched.owner_failure = Some(err.message);
}
}
}
enriched
}

View File

@@ -0,0 +1,113 @@
use ipnet::IpNet;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum LeakPolicyProfile {
FullTunnel,
ProxyStub,
Split,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakPolicy {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub loopback_ifaces: Vec<String>,
pub allowed_destinations: Vec<IpNet>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
pub proxy_required_domains: Vec<String>,
pub allowlist_domains: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PolicySummary {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub allowed_destinations: Vec<String>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
}
impl LeakPolicy {
pub fn from_profile(profile: LeakPolicyProfile, ifaces: &[String]) -> Self {
let loopback_ifaces = detect_loopback_ifaces(ifaces);
let tunnel_ifaces = detect_tunnel_ifaces(ifaces);
let allowed_ifaces = match profile {
LeakPolicyProfile::FullTunnel | LeakPolicyProfile::ProxyStub => {
merge_lists(&loopback_ifaces, &tunnel_ifaces)
}
LeakPolicyProfile::Split => merge_lists(&loopback_ifaces, &tunnel_ifaces),
};
LeakPolicy {
profile,
allowed_ifaces,
tunnel_ifaces,
loopback_ifaces,
allowed_destinations: Vec::new(),
allowed_ports: Vec::new(),
allowed_processes: Vec::new(),
proxy_required_domains: Vec::new(),
allowlist_domains: Vec::new(),
}
}
pub fn summary(&self) -> PolicySummary {
PolicySummary {
profile: self.profile,
allowed_ifaces: self.allowed_ifaces.clone(),
tunnel_ifaces: self.tunnel_ifaces.clone(),
allowed_destinations: self
.allowed_destinations
.iter()
.map(|net| net.to_string())
.collect(),
allowed_ports: self.allowed_ports.clone(),
allowed_processes: self.allowed_processes.clone(),
}
}
}
fn detect_loopback_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name == "lo"
|| name == "lo0"
|| name.contains("loopback")
|| name.contains("localhost")
})
.cloned()
.collect()
}
fn detect_tunnel_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
})
.cloned()
.collect()
}
fn merge_lists(a: &[String], b: &[String]) -> Vec<String> {
let mut out = Vec::new();
for value in a.iter().chain(b.iter()) {
if !out.iter().any(|entry| entry == value) {
out.push(value.clone());
}
}
out
}

View File

@@ -0,0 +1,35 @@
use crate::report::LeakEvent;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PrivacyMode {
Full,
Redacted,
Minimal,
}
pub fn apply_privacy(event: &mut LeakEvent, mode: PrivacyMode) {
match mode {
PrivacyMode::Full => {}
PrivacyMode::Redacted => {
if let Some(value) = event.qname.as_ref() {
event.qname = Some(redact_domain(value));
}
}
PrivacyMode::Minimal => {
event.qname = None;
event.qtype = None;
event.rcode = None;
}
}
}
fn redact_domain(value: &str) -> String {
let parts: Vec<&str> = value.split('.').filter(|part| !part.is_empty()).collect();
if parts.len() >= 2 {
format!("{}.{}", parts[parts.len() - 2], parts[parts.len() - 1])
} else {
value.to_string()
}
}

View File

@@ -0,0 +1,192 @@
use crate::policy::PolicySummary;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use std::net::IpAddr;
use wtfnet_platform::{FlowOwner, FlowOwnerConfidence, FlowProtocol};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum LeakTransport {
Udp53,
Tcp53,
Dot,
Doh,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum LeakType {
A,
B,
C,
D,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum RouteClass {
Loopback,
Tunnel,
Physical,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Severity {
P0,
P1,
P2,
P3,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnrichedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub route_class: RouteClass,
pub owner: Option<FlowOwner>,
pub owner_confidence: FlowOwnerConfidence,
pub owner_failure: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakEvent {
pub timestamp_ms: u128,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub iface_name: Option<String>,
pub route_class: RouteClass,
pub dst_ip: String,
pub dst_port: u16,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
pub attribution_confidence: FlowOwnerConfidence,
pub attribution_failure: Option<String>,
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakTypeCount {
pub leak_type: LeakType,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SummaryItem {
pub key: String,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakSummary {
pub total: usize,
pub by_type: Vec<LeakTypeCount>,
pub top_processes: Vec<SummaryItem>,
pub top_destinations: Vec<SummaryItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakReport {
pub duration_ms: u64,
pub policy: PolicySummary,
pub summary: LeakSummary,
pub events: Vec<LeakEvent>,
}
impl LeakEvent {
pub fn from_decision(event: EnrichedEvent, decision: crate::rules::LeakDecision) -> Self {
let (pid, ppid, process_name, process_path) = event
.owner
.as_ref()
.map(|owner| {
(
owner.pid,
owner.ppid,
owner.process_name.clone(),
owner.process_path.clone(),
)
})
.unwrap_or((None, None, None, None));
LeakEvent {
timestamp_ms: event.timestamp_ms,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
iface_name: event.iface_name,
route_class: event.route_class,
dst_ip: event.dst_ip.to_string(),
dst_port: event.dst_port,
pid,
ppid,
process_name,
process_path,
attribution_confidence: event.owner_confidence,
attribution_failure: event.owner_failure,
leak_type: decision.leak_type,
severity: decision.severity,
policy_rule_id: decision.policy_rule_id,
}
}
}
impl LeakSummary {
pub fn from_events(events: &[LeakEvent]) -> Self {
let total = events.len();
let mut by_type_map: HashMap<LeakType, usize> = HashMap::new();
let mut process_map: BTreeMap<String, usize> = BTreeMap::new();
let mut dest_map: BTreeMap<String, usize> = BTreeMap::new();
for event in events {
*by_type_map.entry(event.leak_type).or_insert(0) += 1;
if let Some(name) = event.process_name.as_ref() {
*process_map.entry(name.clone()).or_insert(0) += 1;
}
let dst_key = format!("{}:{}", event.dst_ip, event.dst_port);
*dest_map.entry(dst_key).or_insert(0) += 1;
}
let mut by_type = by_type_map
.into_iter()
.map(|(leak_type, count)| LeakTypeCount { leak_type, count })
.collect::<Vec<_>>();
by_type.sort_by(|a, b| a.leak_type.cmp(&b.leak_type));
let top_processes = top_items(process_map, 5);
let top_destinations = top_items(dest_map, 5);
LeakSummary {
total,
by_type,
top_processes,
top_destinations,
}
}
}
fn top_items(map: BTreeMap<String, usize>, limit: usize) -> Vec<SummaryItem> {
let mut items = map
.into_iter()
.map(|(key, count)| SummaryItem { key, count })
.collect::<Vec<_>>();
items.sort_by(|a, b| b.count.cmp(&a.count).then_with(|| a.key.cmp(&b.key)));
items.truncate(limit);
items
}

View File

@@ -0,0 +1,48 @@
use crate::classify::ClassifiedEvent;
use crate::report::{EnrichedEvent, RouteClass};
use wtfnet_platform::FlowOwnerConfidence;
pub fn enrich_route(event: ClassifiedEvent) -> EnrichedEvent {
let route_class = if event.src_ip.is_loopback() || event.dst_ip.is_loopback() {
RouteClass::Loopback
} else if event
.iface_name
.as_ref()
.map(|name| is_tunnel_iface(name))
.unwrap_or(false)
{
RouteClass::Tunnel
} else if event.iface_name.is_some() {
RouteClass::Physical
} else {
RouteClass::Unknown
};
EnrichedEvent {
timestamp_ms: event.timestamp_ms,
proto: event.proto,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
iface_name: event.iface_name,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
route_class,
owner: None,
owner_confidence: FlowOwnerConfidence::None,
owner_failure: None,
}
}
fn is_tunnel_iface(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
}

View File

@@ -0,0 +1,116 @@
use crate::policy::LeakPolicy;
use crate::report::{EnrichedEvent, LeakTransport, LeakType, Severity};
#[derive(Debug, Clone)]
pub struct LeakDecision {
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
pub fn evaluate(event: &EnrichedEvent, policy: &LeakPolicy) -> Option<LeakDecision> {
match event.transport {
LeakTransport::Udp53 | LeakTransport::Tcp53 => {
if is_proxy_required(event, policy) && !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::B,
severity: Severity::P1,
policy_rule_id: "LEAK_B_PROXY_REQUIRED".to_string(),
});
}
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::A,
severity: Severity::P0,
policy_rule_id: "LEAK_A_PLAINTEXT".to_string(),
});
}
}
LeakTransport::Dot | LeakTransport::Doh => {
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::C,
severity: Severity::P1,
policy_rule_id: "LEAK_C_ENCRYPTED".to_string(),
});
}
}
LeakTransport::Unknown => {}
}
None
}
fn is_allowed(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let has_rules = !policy.allowed_ifaces.is_empty()
|| !policy.allowed_destinations.is_empty()
|| !policy.allowed_ports.is_empty()
|| !policy.allowed_processes.is_empty();
if !has_rules {
return false;
}
if let Some(iface) = event.iface_name.as_ref() {
if policy
.allowed_ifaces
.iter()
.any(|allowed| allowed.eq_ignore_ascii_case(iface))
{
return true;
}
}
if policy
.allowed_ports
.iter()
.any(|port| *port == event.dst_port)
{
return true;
}
if policy
.allowed_destinations
.iter()
.any(|net| net.contains(&event.dst_ip))
{
return true;
}
if let Some(name) = event
.owner
.as_ref()
.and_then(|owner| owner.process_name.as_ref())
{
if policy
.allowed_processes
.iter()
.any(|value| value.eq_ignore_ascii_case(name))
{
return true;
}
}
false
}
fn is_proxy_required(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let Some(qname) = event.qname.as_ref() else {
return false;
};
let qname = qname.to_ascii_lowercase();
if policy.proxy_required_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
}) {
return true;
}
if !policy.allowlist_domains.is_empty() {
let allowed = policy.allowlist_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
});
return !allowed;
}
false
}

View File

@@ -0,0 +1,380 @@
use crate::classify::{classify_dns_query, ClassifiedEvent};
use crate::report::LeakTransport;
use crate::DnsLeakError;
use std::collections::HashSet;
use std::net::IpAddr;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tracing::debug;
use wtfnet_platform::FlowProtocol;
use crate::LeakWatchOptions;
#[cfg(feature = "pcap")]
use pnet::datalink::{self, Channel, Config as DatalinkConfig};
#[cfg(feature = "pcap")]
use std::sync::mpsc;
#[cfg(not(feature = "pcap"))]
pub async fn capture_events(_options: &LeakWatchOptions) -> Result<Vec<ClassifiedEvent>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub async fn capture_events(options: &LeakWatchOptions) -> Result<Vec<ClassifiedEvent>, DnsLeakError> {
let options = options.clone();
let candidates = format_iface_list(&datalink::interfaces());
let timeout_ms = options.duration_ms.saturating_add(2000);
let handle = tokio::task::spawn_blocking(move || capture_events_blocking(options));
match tokio::time::timeout(Duration::from_millis(timeout_ms), handle).await {
Ok(joined) => joined.map_err(|err| DnsLeakError::Io(err.to_string()))?,
Err(_) => {
return Err(DnsLeakError::Io(
format!(
"capture timed out waiting for interface; candidates: {candidates}"
),
))
}
}
}
#[derive(Debug, Clone)]
pub struct IfaceDiag {
pub name: String,
pub open_ok: bool,
pub error: String,
}
#[cfg(not(feature = "pcap"))]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
let interfaces = datalink::interfaces();
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let mut out = Vec::new();
for iface in interfaces {
let result = match open_channel_with_timeout(iface.clone(), &config) {
Ok((_iface, _rx)) => IfaceDiag {
name: iface.name,
open_ok: true,
error: "-".to_string(),
},
Err(err) => IfaceDiag {
name: iface.name,
open_ok: false,
error: err,
},
};
out.push(result);
}
Ok(out)
}
#[cfg(feature = "pcap")]
fn capture_events_blocking(options: LeakWatchOptions) -> Result<Vec<ClassifiedEvent>, DnsLeakError> {
use pnet::packet::ethernet::{EtherTypes, EthernetPacket};
use pnet::packet::Packet;
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let (iface, mut rx) = select_interface(options.iface.as_deref(), &config)?;
let local_ips = iface.ips.iter().map(|ip| ip.ip()).collect::<Vec<_>>();
let iface_name = iface.name.clone();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut events = Vec::new();
let mut seen = HashSet::new();
while Instant::now() < deadline {
let frame = match rx.next() {
Ok(frame) => frame,
Err(_) => continue,
};
let ethernet = match EthernetPacket::new(frame) {
Some(packet) => packet,
None => continue,
};
let event = match ethernet.get_ethertype() {
EtherTypes::Ipv4 => parse_ipv4(
ethernet.payload(),
&local_ips,
&iface_name,
),
EtherTypes::Ipv6 => parse_ipv6(
ethernet.payload(),
&local_ips,
&iface_name,
),
_ => None,
};
if let Some(event) = event {
let key = format!(
"{:?}|{}|{}|{}|{}",
event.transport, event.src_ip, event.src_port, event.dst_ip, event.dst_port
);
if seen.insert(key) {
debug!(
transport = ?event.transport,
src_ip = %event.src_ip,
src_port = event.src_port,
dst_ip = %event.dst_ip,
dst_port = event.dst_port,
"dns leak event"
);
events.push(event);
}
}
}
Ok(events)
}
#[cfg(feature = "pcap")]
fn parse_ipv4(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::Packet;
let ipv4 = Ipv4Packet::new(payload)?;
let src = IpAddr::V4(ipv4.get_source());
if !local_ips.contains(&src) {
return None;
}
match ipv4.get_next_level_protocol() {
IpNextHeaderProtocols::Udp => parse_udp(
src,
IpAddr::V4(ipv4.get_destination()),
ipv4.payload(),
iface_name,
),
IpNextHeaderProtocols::Tcp => parse_tcp(
src,
IpAddr::V4(ipv4.get_destination()),
ipv4.payload(),
iface_name,
),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_ipv6(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv6::Ipv6Packet;
use pnet::packet::Packet;
let ipv6 = Ipv6Packet::new(payload)?;
let src = IpAddr::V6(ipv6.get_source());
if !local_ips.contains(&src) {
return None;
}
match ipv6.get_next_header() {
IpNextHeaderProtocols::Udp => parse_udp(
src,
IpAddr::V6(ipv6.get_destination()),
ipv6.payload(),
iface_name,
),
IpNextHeaderProtocols::Tcp => parse_tcp(
src,
IpAddr::V6(ipv6.get_destination()),
ipv6.payload(),
iface_name,
),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_udp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::udp::UdpPacket;
use pnet::packet::Packet;
let udp = UdpPacket::new(payload)?;
let dst_port = udp.get_destination();
if dst_port != 53 {
return None;
}
let (qname, qtype, rcode) = classify_dns_query(udp.payload())?;
Some(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Udp,
src_ip,
src_port: udp.get_source(),
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport: LeakTransport::Udp53,
qname: Some(qname),
qtype: Some(qtype),
rcode: Some(rcode),
})
}
#[cfg(feature = "pcap")]
fn parse_tcp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<ClassifiedEvent> {
use pnet::packet::tcp::TcpPacket;
let tcp = TcpPacket::new(payload)?;
let dst_port = tcp.get_destination();
let transport = match dst_port {
53 => LeakTransport::Tcp53,
853 => LeakTransport::Dot,
_ => return None,
};
Some(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Tcp,
src_ip,
src_port: tcp.get_source(),
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport,
qname: None,
qtype: None,
rcode: None,
})
}
#[cfg(feature = "pcap")]
fn select_interface(
name: Option<&str>,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), DnsLeakError> {
let interfaces = datalink::interfaces();
if let Some(name) = name {
let iface = interfaces
.iter()
.find(|iface| iface.name == name)
.cloned()
.ok_or_else(|| {
DnsLeakError::Io(format!(
"interface '{name}' not found; candidates: {}",
format_iface_list(&interfaces)
))
})?;
return open_channel_with_timeout(iface, config).map_err(|err| {
DnsLeakError::Io(format!(
"failed to open capture on interface ({err}); candidates: {}",
format_iface_list(&interfaces)
))
});
}
if let Some(iface) = pick_stable_iface(&interfaces) {
if let Ok(channel) = open_channel_with_timeout(iface, config) {
return Ok(channel);
}
}
for iface in interfaces.iter() {
if let Ok(channel) = open_channel_with_timeout(iface.clone(), config) {
return Ok(channel);
}
}
Err(DnsLeakError::Io(format!(
"no suitable interface found; candidates: {}",
format_iface_list(&interfaces)
)))
}
#[cfg(feature = "pcap")]
fn open_channel_with_timeout(
iface: datalink::NetworkInterface,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), String> {
let (tx, rx) = mpsc::channel();
let config = config.clone();
std::thread::spawn(move || {
let result = match datalink::channel(&iface, config) {
Ok(Channel::Ethernet(_, rx)) => Ok(rx),
Ok(_) => Err("unsupported channel".to_string()),
Err(err) => Err(err.to_string()),
};
let _ = tx.send((iface, result));
});
let timeout = Duration::from_millis(700);
match rx.recv_timeout(timeout) {
Ok((iface, Ok(rx))) => Ok((iface, rx)),
Ok((_iface, Err(err))) => Err(err),
Err(_) => Err("timeout opening capture".to_string()),
}
}
#[cfg(feature = "pcap")]
fn is_named_fallback(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("wlan")
|| name.contains("wifi")
|| name.contains("wi-fi")
|| name.contains("ethernet")
|| name.contains("eth")
|| name.contains("lan")
}
#[cfg(feature = "pcap")]
fn pick_stable_iface(
interfaces: &[datalink::NetworkInterface],
) -> Option<datalink::NetworkInterface> {
let mut preferred = interfaces
.iter()
.filter(|iface| {
iface.is_up()
&& !iface.is_loopback()
&& (is_named_fallback(&iface.name) || !iface.ips.is_empty())
})
.cloned()
.collect::<Vec<_>>();
if preferred.is_empty() {
preferred = interfaces
.iter()
.filter(|iface| !iface.is_loopback())
.cloned()
.collect();
}
preferred.into_iter().next()
}
#[cfg(feature = "pcap")]
fn format_iface_list(interfaces: &[datalink::NetworkInterface]) -> String {
if interfaces.is_empty() {
return "-".to_string();
}
interfaces
.iter()
.map(|iface| iface.name.as_str())
.collect::<Vec<_>>()
.join(", ")
}
#[cfg(feature = "pcap")]
fn now_ms() -> u128 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis()
}

View File

@@ -5,8 +5,21 @@ edition = "2024"
[dependencies]
reqwest = { version = "0.11", features = ["rustls-tls"] }
rustls = "0.21"
rustls-native-certs = "0.6"
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["net", "time"] }
tokio-rustls = "0.24"
tokio-socks = "0.5"
url = "2"
tracing = "0.1"
h3 = { version = "0.0.8", optional = true }
h3-quinn = { version = "0.0.10", optional = true }
quinn = { version = "0.11", optional = true }
http = "1"
webpki-roots = "1"
bytes = "1"
[features]
http3 = ["dep:h3", "dep:h3-quinn", "dep:quinn"]

View File

@@ -1,12 +1,28 @@
use reqwest::{Client, Method, Proxy, StatusCode};
use rustls::{Certificate, ClientConfig, RootCertStore, ServerName};
use serde::{Deserialize, Serialize};
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tokio::net::lookup_host;
use thiserror::Error;
use tokio::time::timeout;
use tokio_rustls::TlsConnector;
use tokio_socks::tcp::Socks5Stream;
use tracing::debug;
use url::Url;
#[cfg(feature = "http3")]
use bytes::Buf;
#[cfg(feature = "http3")]
use http::Request;
#[cfg(feature = "http3")]
use quinn::ClientConfig as QuinnClientConfig;
#[cfg(feature = "http3")]
use quinn::Endpoint;
#[cfg(feature = "http3")]
use webpki_roots::TLS_SERVER_ROOTS;
#[derive(Debug, Error)]
pub enum HttpError {
#[error("invalid url: {0}")]
@@ -36,6 +52,7 @@ pub struct HttpReport {
pub resolved_ips: Vec<String>,
pub headers: Vec<(String, String)>,
pub body: Option<String>,
pub warnings: Vec<String>,
pub timing: HttpTiming,
}
@@ -64,6 +81,8 @@ pub struct HttpRequestOptions {
pub show_body: bool,
pub http1_only: bool,
pub http2_only: bool,
pub http3: bool,
pub http3_only: bool,
pub proxy: Option<String>,
}
@@ -105,6 +124,43 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
}
let dns_ms = dns_start.elapsed().as_millis();
let mut warnings = Vec::new();
if opts.http3 || opts.http3_only {
if !cfg!(feature = "http3") {
warnings.push("http3 feature not enabled in build".to_string());
if opts.http3_only {
return Err(HttpError::Request(
"http3-only requested but feature is not enabled".to_string(),
));
}
}
}
#[cfg(feature = "http3")]
{
if opts.http3 || opts.http3_only {
match http3_request(url, &opts, &resolved_ips, dns_ms).await {
Ok((report, mut h3_warnings)) => {
warnings.append(&mut h3_warnings);
return Ok(HttpReport {
warnings,
..report
});
}
Err(err) => {
let err_string = err.to_string();
let category = classify_http3_error(&err_string);
warnings.push(format!(
"http3 failed (category={category}): {err_string}"
));
if opts.http3_only {
return Err(err);
}
}
}
}
}
let mut builder = Client::builder().timeout(Duration::from_millis(opts.timeout_ms));
builder = if let Some(max) = opts.follow_redirects {
builder.redirect(reqwest::redirect::Policy::limited(max as usize))
@@ -132,6 +188,16 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
}
let client = builder.build().map_err(|err| HttpError::Request(err.to_string()))?;
let (connect_ms, tls_ms, timing_warnings) = measure_connect_tls(
&parsed,
host,
port,
&resolved_ips,
opts.proxy.as_deref(),
opts.timeout_ms,
)
.await;
warnings.extend(timing_warnings);
let start = Instant::now();
let response = client
.request(opts.method.to_reqwest(), parsed.clone())
@@ -184,11 +250,12 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
resolved_ips,
headers,
body,
warnings,
timing: HttpTiming {
total_ms,
dns_ms: Some(dns_ms),
connect_ms: None,
tls_ms: None,
connect_ms,
tls_ms,
ttfb_ms: Some(ttfb_ms),
},
})
@@ -197,3 +264,342 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
fn status_code(status: StatusCode) -> Option<u16> {
Some(status.as_u16())
}
struct Socks5Proxy {
addr: String,
remote_dns: bool,
}
fn parse_socks5_proxy(value: &str) -> Result<Socks5Proxy, HttpError> {
let url = Url::parse(value).map_err(|err| HttpError::Request(err.to_string()))?;
let scheme = url.scheme();
let remote_dns = match scheme {
"socks5" => false,
"socks5h" => true,
_ => {
return Err(HttpError::Request(format!(
"unsupported proxy scheme: {scheme}"
)))
}
};
let host = url
.host_str()
.ok_or_else(|| HttpError::Request("invalid proxy host".to_string()))?;
let port = url
.port_or_known_default()
.ok_or_else(|| HttpError::Request("invalid proxy port".to_string()))?;
Ok(Socks5Proxy {
addr: format!("{host}:{port}"),
remote_dns,
})
}
async fn measure_connect_tls(
parsed: &Url,
host: &str,
port: u16,
resolved_ips: &[String],
proxy: Option<&str>,
timeout_ms: u64,
) -> (Option<u128>, Option<u128>, Vec<String>) {
let mut warnings = Vec::new();
let scheme = parsed.scheme();
if scheme != "http" && scheme != "https" {
warnings.push(format!("timing unavailable for scheme: {scheme}"));
return (None, None, warnings);
}
let timeout_dur = Duration::from_millis(timeout_ms);
let connect_start = Instant::now();
let tcp = if let Some(proxy) = proxy {
match parse_socks5_proxy(proxy) {
Ok(proxy) => {
let target = if proxy.remote_dns {
(host, port)
} else if let Some(ip) = resolved_ips.first() {
(ip.as_str(), port)
} else {
warnings.push("no resolved IPs for proxy connect".to_string());
return (None, None, warnings);
};
match timeout(timeout_dur, Socks5Stream::connect(proxy.addr.as_str(), target))
.await
{
Ok(Ok(stream)) => stream.into_inner(),
Ok(Err(err)) => {
warnings.push(format!("proxy connect failed: {err}"));
return (None, None, warnings);
}
Err(_) => {
warnings.push("proxy connect timed out".to_string());
return (None, None, warnings);
}
}
}
Err(err) => {
warnings.push(format!("proxy timing skipped: {err}"));
return (None, None, warnings);
}
}
} else {
let addr = match resolved_ips.first().and_then(|ip| ip.parse::<IpAddr>().ok()) {
Some(ip) => SocketAddr::new(ip, port),
None => {
warnings.push("no resolved IPs for connect timing".to_string());
return (None, None, warnings);
}
};
match timeout(timeout_dur, tokio::net::TcpStream::connect(addr)).await {
Ok(Ok(stream)) => stream,
Ok(Err(err)) => {
warnings.push(format!("connect failed: {err}"));
return (None, None, warnings);
}
Err(_) => {
warnings.push("connect timed out".to_string());
return (None, None, warnings);
}
}
};
let connect_ms = connect_start.elapsed().as_millis();
if scheme == "http" {
return (Some(connect_ms), None, warnings);
}
let tls_start = Instant::now();
let tls = match build_tls_connector() {
Ok(connector) => connector,
Err(err) => {
warnings.push(format!("tls timing skipped: {err}"));
return (Some(connect_ms), None, warnings);
}
};
let server_name = match ServerName::try_from(host) {
Ok(name) => name,
Err(_) => {
warnings.push("invalid tls server name".to_string());
return (Some(connect_ms), None, warnings);
}
};
match timeout(timeout_dur, tls.connect(server_name, tcp)).await {
Ok(Ok(_)) => {}
Ok(Err(err)) => {
warnings.push(format!("tls handshake failed: {err}"));
return (Some(connect_ms), None, warnings);
}
Err(_) => {
warnings.push("tls handshake timed out".to_string());
return (Some(connect_ms), None, warnings);
}
}
let tls_ms = tls_start.elapsed().as_millis();
(Some(connect_ms), Some(tls_ms), warnings)
}
fn build_tls_connector() -> Result<TlsConnector, HttpError> {
let mut roots = RootCertStore::empty();
let store = rustls_native_certs::load_native_certs()
.map_err(|err| HttpError::Request(err.to_string()))?;
for cert in store {
roots
.add(&Certificate(cert.0))
.map_err(|err| HttpError::Request(err.to_string()))?;
}
let config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
Ok(TlsConnector::from(Arc::new(config)))
}
#[cfg(feature = "http3")]
fn classify_http3_error(message: &str) -> &'static str {
let message = message.to_ascii_lowercase();
if message.contains("timeout") || message.contains("timed out") {
return "timeout";
}
if message.contains("no resolved ips") || message.contains("no addresses resolved") {
return "resolve";
}
if message.contains("udp") && message.contains("blocked") {
return "udp_blocked";
}
if message.contains("quic") || message.contains("connection refused") {
return "connect";
}
if message.contains("alpn") || message.contains("application protocol") {
return "alpn";
}
if message.contains("tls")
|| message.contains("certificate")
|| message.contains("crypto")
|| message.contains("handshake")
{
return "tls";
}
if message.contains("permission denied") || message.contains("access is denied") {
return "permission";
}
"unknown"
}
#[cfg(feature = "http3")]
async fn http3_request(
url: &str,
opts: &HttpRequestOptions,
resolved_ips: &[String],
dns_ms: u128,
) -> Result<(HttpReport, Vec<String>), HttpError> {
let mut warnings = Vec::new();
let parsed = Url::parse(url).map_err(|err| HttpError::Url(err.to_string()))?;
if parsed.scheme() != "https" {
return Err(HttpError::Request("http3 requires https scheme".to_string()));
}
if opts.proxy.is_some() {
return Err(HttpError::Request(
"http3 proxying is not supported".to_string(),
));
}
let host = parsed
.host_str()
.ok_or_else(|| HttpError::Url("missing host".to_string()))?;
let port = parsed
.port_or_known_default()
.ok_or_else(|| HttpError::Url("missing port".to_string()))?;
let ip = resolved_ips
.first()
.and_then(|value| value.parse::<IpAddr>().ok())
.ok_or_else(|| HttpError::Request("no resolved IPs for http3".to_string()))?;
let quinn_config = build_quinn_config()?;
let mut endpoint = Endpoint::client("0.0.0.0:0".parse().unwrap())
.map_err(|err| HttpError::Request(err.to_string()))?;
endpoint.set_default_client_config(quinn_config);
let connect_start = Instant::now();
let connecting = endpoint
.connect(SocketAddr::new(ip, port), host)
.map_err(|err| HttpError::Request(err.to_string()))?;
let connection = timeout(Duration::from_millis(opts.timeout_ms), connecting)
.await
.map_err(|_| HttpError::Request("http3 connect timed out".to_string()))?
.map_err(|err| HttpError::Request(err.to_string()))?;
let connect_ms = connect_start.elapsed().as_millis();
let conn = h3_quinn::Connection::new(connection);
let (mut driver, mut send_request) = h3::client::new(conn)
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
tokio::spawn(async move {
let _ = driver.wait_idle().await;
});
let start = Instant::now();
let method = match opts.method {
HttpMethod::Head => http::Method::HEAD,
HttpMethod::Get => http::Method::GET,
};
let request = Request::builder()
.method(method)
.uri(parsed.as_str())
.header("user-agent", "wtfnet")
.body(())
.map_err(|err| HttpError::Request(err.to_string()))?;
let mut stream = send_request
.send_request(request)
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
stream
.finish()
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
let response = stream
.recv_response()
.await
.map_err(|err| HttpError::Response(err.to_string()))?;
let ttfb_ms = start.elapsed().as_millis();
let status = response.status();
let final_url = parsed.to_string();
let headers = if opts.show_headers {
response
.headers()
.iter()
.map(|(name, value)| {
let value = value.to_str().unwrap_or("-").to_string();
(name.to_string(), value)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let body = if opts.show_body {
let mut buf = Vec::new();
while let Some(chunk) = stream
.recv_data()
.await
.map_err(|err| HttpError::Response(err.to_string()))?
{
let mut chunk = chunk;
while chunk.has_remaining() {
let bytes = chunk.copy_to_bytes(chunk.remaining());
buf.extend_from_slice(&bytes);
}
if buf.len() >= opts.max_body_bytes {
buf.truncate(opts.max_body_bytes);
break;
}
}
Some(String::from_utf8_lossy(&buf).to_string())
} else {
None
};
let total_ms = start.elapsed().as_millis();
warnings.push("http3 timing for tls/connect is best-effort".to_string());
Ok((
HttpReport {
url: url.to_string(),
final_url: Some(final_url),
method: match opts.method {
HttpMethod::Head => "HEAD".to_string(),
HttpMethod::Get => "GET".to_string(),
},
status: Some(status.as_u16()),
http_version: Some("HTTP/3".to_string()),
resolved_ips: resolved_ips.to_vec(),
headers,
body,
warnings: Vec::new(),
timing: HttpTiming {
total_ms,
dns_ms: Some(dns_ms),
connect_ms: Some(connect_ms),
tls_ms: None,
ttfb_ms: Some(ttfb_ms),
},
},
warnings,
))
}
#[cfg(feature = "http3")]
fn build_quinn_config() -> Result<QuinnClientConfig, HttpError> {
let mut roots = quinn::rustls::RootCertStore::empty();
roots.extend(TLS_SERVER_ROOTS.iter().cloned());
let mut client_config =
QuinnClientConfig::with_root_certificates(Arc::new(roots)).map_err(|err| {
HttpError::Request(format!("quinn config error: {err}"))
})?;
let mut transport = quinn::TransportConfig::default();
transport.keep_alive_interval(Some(Duration::from_secs(5)));
client_config.transport_config(Arc::new(transport));
Ok(client_config)
}

View File

@@ -2,11 +2,13 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use sha2::Digest;
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use wtfnet_core::ErrorCode;
use wtfnet_platform::{
CertProvider, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider, NetInterface,
Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
};
use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -19,6 +21,7 @@ pub fn platform() -> Platform {
ports: Arc::new(LinuxPortsProvider),
cert: Arc::new(LinuxCertProvider),
neigh: Arc::new(LinuxNeighProvider),
flow_owner: Arc::new(LinuxFlowOwnerProvider),
}
}
@@ -26,6 +29,7 @@ struct LinuxSysProvider;
struct LinuxPortsProvider;
struct LinuxCertProvider;
struct LinuxNeighProvider;
struct LinuxFlowOwnerProvider;
#[async_trait]
impl SysProvider for LinuxSysProvider {
@@ -240,6 +244,63 @@ fn parse_linux_tcp_with_inode_map(
Ok(sockets)
}
fn parse_linux_tcp_conns(
path: &str,
is_v6: bool,
inode_map: &HashMap<String, ProcInfo>,
) -> Result<Vec<ConnSocket>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut sockets = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = parts[1];
let remote = parts[2];
let state = parts[3];
let inode = parts.get(9).copied();
if state == "0A" {
continue;
}
let local_addr = match parse_proc_socket_addr(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
let (pid, ppid, process_name, process_path) =
inode.and_then(|value| inode_map.get(value)).map_or(
(None, None, None, None),
|info| {
(
Some(info.pid),
info.ppid,
info.name.clone(),
info.path.clone(),
)
},
);
sockets.push(ConnSocket {
proto: "tcp".to_string(),
local_addr,
remote_addr,
state: Some(map_tcp_state(state)),
pid,
ppid,
process_name,
process_path,
});
}
Ok(sockets)
}
fn parse_linux_udp_with_inode_map(
path: &str,
is_v6: bool,
@@ -286,6 +347,24 @@ fn parse_linux_udp_with_inode_map(
Ok(sockets)
}
fn map_tcp_state(value: &str) -> String {
match value {
"01" => "ESTABLISHED",
"02" => "SYN_SENT",
"03" => "SYN_RECV",
"04" => "FIN_WAIT1",
"05" => "FIN_WAIT2",
"06" => "TIME_WAIT",
"07" => "CLOSE",
"08" => "CLOSE_WAIT",
"09" => "LAST_ACK",
"0A" => "LISTEN",
"0B" => "CLOSING",
_ => "UNKNOWN",
}
.to_string()
}
fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
let mut parts = value.split(':');
let addr_hex = parts.next()?;
@@ -300,6 +379,20 @@ fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
}
}
fn parse_proc_socket_addr_value(value: &str, is_v6: bool) -> Option<SocketAddr> {
let mut parts = value.split(':');
let addr_hex = parts.next()?;
let port_hex = parts.next()?;
let port = u16::from_str_radix(port_hex, 16).ok()?;
if is_v6 {
let addr = parse_ipv6_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V6(addr), port))
} else {
let addr = parse_ipv4_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V4(addr), port))
}
}
fn parse_linux_arp(contents: &str) -> Vec<NeighborEntry> {
let mut neighbors = Vec::new();
for (idx, line) in contents.lines().enumerate() {
@@ -407,6 +500,138 @@ fn read_ppid(pid: u32) -> Option<u32> {
Some(ppid)
}
#[derive(Clone)]
struct ProcSocketEntry {
local: SocketAddr,
remote: SocketAddr,
inode: String,
}
fn parse_proc_socket_entries(
path: &str,
is_v6: bool,
) -> Result<Vec<ProcSocketEntry>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut entries = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
continue;
}
let local = parts[1];
let remote = parts[2];
let inode = match parts.get(9) {
Some(value) => (*value).to_string(),
None => continue,
};
let local_addr = match parse_proc_socket_addr_value(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr_value(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
entries.push(ProcSocketEntry {
local: local_addr,
remote: remote_addr,
inode,
});
}
Ok(entries)
}
fn match_flow_entry<'a>(
flow: &FlowTuple,
entries: &'a [ProcSocketEntry],
match_remote: bool,
) -> Option<(&'a ProcSocketEntry, FlowOwnerConfidence)> {
for entry in entries {
let local_match = entry.local.port() == flow.src_port
&& (entry.local.ip() == flow.src_ip
|| entry.local.ip().is_unspecified()
|| entry.local.ip().is_loopback() && flow.src_ip.is_loopback());
if !local_match {
continue;
}
if match_remote {
let remote_match = entry.remote.port() == flow.dst_port
&& (entry.remote.ip() == flow.dst_ip
|| entry.remote.ip().is_unspecified());
if remote_match {
return Some((entry, FlowOwnerConfidence::High));
}
} else {
return Some((entry, FlowOwnerConfidence::Medium));
}
}
None
}
fn resolve_flow_owner(
flow: &FlowTuple,
) -> Result<FlowOwnerResult, PlatformError> {
let inode_map = build_inode_map();
let entries = match flow.proto {
FlowProtocol::Tcp => {
let mut out = parse_proc_socket_entries("/proc/net/tcp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/tcp6", true)?);
out
}
FlowProtocol::Udp => {
let mut out = parse_proc_socket_entries("/proc/net/udp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/udp6", true)?);
out
}
};
let match_remote = matches!(flow.proto, FlowProtocol::Tcp);
let matched = match_flow_entry(flow, &entries, match_remote)
.or_else(|| {
if matches!(flow.proto, FlowProtocol::Udp) {
match_flow_entry(flow, &entries, false)
} else {
None
}
});
let (entry, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let owner = inode_map.get(&entry.inode).map(|info| FlowOwner {
pid: Some(info.pid),
ppid: info.ppid,
process_name: info.name.clone(),
process_path: info.path.clone(),
});
if owner.is_none() {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::Low,
failure_reason: Some("socket owner not found".to_string()),
});
}
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -518,6 +743,22 @@ impl PortsProvider for LinuxPortsProvider {
.filter(|socket| extract_port(&socket.local_addr) == Some(port))
.collect())
}
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError> {
let inode_map = build_inode_map();
let mut sockets = Vec::new();
sockets.extend(parse_linux_tcp_conns(
"/proc/net/tcp",
false,
&inode_map,
)?);
sockets.extend(parse_linux_tcp_conns(
"/proc/net/tcp6",
true,
&inode_map,
)?);
Ok(sockets)
}
}
#[async_trait]
@@ -535,3 +776,10 @@ impl NeighProvider for LinuxNeighProvider {
Ok(parse_linux_arp(&contents))
}
}
#[async_trait]
impl FlowOwnerProvider for LinuxFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -2,6 +2,7 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use regex::Regex;
use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use sha2::Digest;
use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -10,8 +11,9 @@ use x509_parser::oid_registry::{
use std::sync::Arc;
use wtfnet_core::ErrorCode;
use wtfnet_platform::{
CertProvider, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider, NetInterface,
Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
};
pub fn platform() -> Platform {
@@ -20,6 +22,7 @@ pub fn platform() -> Platform {
ports: Arc::new(WindowsPortsProvider),
cert: Arc::new(WindowsCertProvider),
neigh: Arc::new(WindowsNeighProvider),
flow_owner: Arc::new(WindowsFlowOwnerProvider),
}
}
@@ -27,6 +30,7 @@ struct WindowsSysProvider;
struct WindowsPortsProvider;
struct WindowsCertProvider;
struct WindowsNeighProvider;
struct WindowsFlowOwnerProvider;
#[async_trait]
impl SysProvider for WindowsSysProvider {
@@ -333,6 +337,33 @@ fn parse_windows_listeners() -> Result<Vec<ListenSocket>, PlatformError> {
Ok(sockets)
}
fn parse_windows_connections() -> Result<Vec<ConnSocket>, PlatformError> {
let proc_map = load_windows_process_map();
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut sockets = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if !trimmed.starts_with("TCP") {
continue;
}
if let Some(mut socket) = parse_netstat_tcp_conn_line(trimmed) {
enrich_conn_socket(&mut socket, &proc_map);
sockets.push(socket);
}
}
Ok(sockets)
}
fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
@@ -358,6 +389,32 @@ fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> {
})
}
fn parse_netstat_tcp_conn_line(line: &str) -> Option<ConnSocket> {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
return None;
}
let local = parts[1];
let remote = parts[2];
let state = parts[3];
let pid = parts[4].parse::<u32>().ok();
if state == "LISTENING" {
return None;
}
Some(ConnSocket {
proto: "tcp".to_string(),
local_addr: local.to_string(),
remote_addr: remote.to_string(),
state: Some(state.to_string()),
pid,
ppid: None,
process_name: None,
process_path: None,
})
}
fn parse_netstat_udp_line(line: &str) -> Option<ListenSocket> {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 {
@@ -429,6 +486,17 @@ fn enrich_socket(socket: &mut ListenSocket, map: &HashMap<u32, ProcInfo>) {
}
}
fn enrich_conn_socket(socket: &mut ConnSocket, map: &HashMap<u32, ProcInfo>) {
let pid = match socket.pid {
Some(pid) => pid,
None => return,
};
if let Some(info) = map.get(&pid) {
socket.process_name = info.name.clone();
socket.process_path = info.path.clone();
}
}
#[derive(Clone)]
struct ProcInfo {
name: Option<String>,
@@ -515,6 +583,155 @@ fn parse_csv_line(line: &str) -> Vec<String> {
out
}
#[derive(Clone)]
struct FlowEntry {
proto: FlowProtocol,
local: SocketAddr,
remote: Option<SocketAddr>,
pid: u32,
}
fn parse_netstat_flow_entries() -> Result<Vec<FlowEntry>, PlatformError> {
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut entries = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if trimmed.starts_with("TCP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 5 {
continue;
}
let state = parts[3];
if state == "LISTENING" {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let remote = match parse_netstat_addr(parts[2]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[4].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Tcp,
local,
remote: Some(remote),
pid,
});
} else if trimmed.starts_with("UDP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[3].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Udp,
local,
remote: None,
pid,
});
}
}
Ok(entries)
}
fn parse_netstat_addr(value: &str) -> Option<SocketAddr> {
let value = value.trim();
if value == "*:*" {
return None;
}
if let Some(rest) = value.strip_prefix('[') {
let end = rest.find(']')?;
let host = &rest[..end];
let port = rest[end + 2..].parse::<u16>().ok()?;
let host = host.split('%').next().unwrap_or(host);
let ip: IpAddr = host.parse().ok()?;
return Some(SocketAddr::new(ip, port));
}
let pos = value.rfind(':')?;
let host = &value[..pos];
let port = value[pos + 1..].parse::<u16>().ok()?;
let ip: IpAddr = host.parse().ok()?;
Some(SocketAddr::new(ip, port))
}
fn resolve_flow_owner(flow: &FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
let entries = parse_netstat_flow_entries()?;
let proc_map = load_windows_process_map();
let mut matched: Option<(u32, FlowOwnerConfidence)> = None;
for entry in entries {
if entry.proto != flow.proto {
continue;
}
let local_match = entry.local.ip() == flow.src_ip && entry.local.port() == flow.src_port;
if !local_match {
continue;
}
match flow.proto {
FlowProtocol::Tcp => {
if let Some(remote) = entry.remote {
if remote.ip() == flow.dst_ip && remote.port() == flow.dst_port {
matched = Some((entry.pid, FlowOwnerConfidence::High));
break;
}
}
}
FlowProtocol::Udp => {
matched = Some((entry.pid, FlowOwnerConfidence::Medium));
break;
}
}
}
let (pid, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let info = proc_map.get(&pid);
let owner = Some(FlowOwner {
pid: Some(pid),
ppid: None,
process_name: info.and_then(|value| value.name.clone()),
process_path: info.and_then(|value| value.path.clone()),
});
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -605,6 +822,10 @@ impl PortsProvider for WindowsPortsProvider {
.filter(|socket| extract_port(&socket.local_addr) == Some(port))
.collect())
}
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError> {
parse_windows_connections()
}
}
#[async_trait]
@@ -628,3 +849,10 @@ impl NeighProvider for WindowsNeighProvider {
Ok(parse_arp_output(&text))
}
}
#[async_trait]
impl FlowOwnerProvider for WindowsFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -1,5 +1,6 @@
use async_trait::async_trait;
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use std::sync::Arc;
use wtfnet_core::ErrorCode;
@@ -46,6 +47,18 @@ pub struct ListenSocket {
pub owner: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnSocket {
pub proto: String,
pub local_addr: String,
pub remote_addr: String,
pub state: Option<String>,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RootCert {
pub subject: String,
@@ -68,6 +81,46 @@ pub struct NeighborEntry {
pub state: Option<String>,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum FlowProtocol {
Udp,
Tcp,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum FlowOwnerConfidence {
High,
Medium,
Low,
None,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwner {
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwnerResult {
pub owner: Option<FlowOwner>,
pub confidence: FlowOwnerConfidence,
pub failure_reason: Option<String>,
}
#[derive(Debug, Clone)]
pub struct FlowTuple {
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
}
#[derive(Debug, Clone)]
pub struct PlatformError {
pub code: ErrorCode,
@@ -98,6 +151,7 @@ pub trait SysProvider: Send + Sync {
pub trait PortsProvider: Send + Sync {
async fn listening(&self) -> Result<Vec<ListenSocket>, PlatformError>;
async fn who_owns(&self, port: u16) -> Result<Vec<ListenSocket>, PlatformError>;
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError>;
}
#[async_trait]
@@ -110,9 +164,15 @@ pub trait NeighProvider: Send + Sync {
async fn neighbors(&self) -> Result<Vec<NeighborEntry>, PlatformError>;
}
#[async_trait]
pub trait FlowOwnerProvider: Send + Sync {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError>;
}
pub struct Platform {
pub sys: Arc<dyn SysProvider>,
pub ports: Arc<dyn PortsProvider>,
pub cert: Arc<dyn CertProvider>,
pub neigh: Arc<dyn NeighProvider>,
pub flow_owner: Arc<dyn FlowOwnerProvider>,
}

View File

@@ -35,6 +35,9 @@ pub struct TlsCertSummary {
pub not_before: String,
pub not_after: String,
pub san: Vec<String>,
pub signature_algorithm: Option<String>,
pub key_usage: Option<Vec<String>>,
pub extended_key_usage: Option<Vec<String>>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -45,6 +48,7 @@ pub struct TlsHandshakeReport {
pub alpn_negotiated: Option<String>,
pub tls_version: Option<String>,
pub cipher: Option<String>,
pub ocsp_stapled: Option<bool>,
pub cert_chain: Vec<TlsCertSummary>,
}
@@ -56,6 +60,7 @@ pub struct TlsVerifyReport {
pub alpn_negotiated: Option<String>,
pub tls_version: Option<String>,
pub cipher: Option<String>,
pub ocsp_stapled: Option<bool>,
pub verified: bool,
pub error: Option<String>,
}
@@ -64,6 +69,7 @@ pub struct TlsVerifyReport {
pub struct TlsCertReport {
pub target: String,
pub sni: Option<String>,
pub ocsp_stapled: Option<bool>,
pub cert_chain: Vec<TlsCertSummary>,
}
@@ -83,6 +89,8 @@ pub struct TlsOptions {
pub insecure: bool,
pub socks5: Option<String>,
pub prefer_ipv4: bool,
pub show_extensions: bool,
pub ocsp: bool,
}
pub async fn handshake(target: &str, options: TlsOptions) -> Result<TlsHandshakeReport, TlsError> {
@@ -120,7 +128,8 @@ pub async fn handshake(target: &str, options: TlsOptions) -> Result<TlsHandshake
cipher: session
.negotiated_cipher_suite()
.map(|suite| format!("{suite:?}")),
cert_chain: extract_cert_chain(session.peer_certificates())?,
ocsp_stapled: ocsp_status(session, options.ocsp),
cert_chain: extract_cert_chain(session.peer_certificates(), options.show_extensions)?,
})
}
@@ -160,6 +169,7 @@ pub async fn verify(target: &str, options: TlsOptions) -> Result<TlsVerifyReport
cipher: session
.negotiated_cipher_suite()
.map(|suite| format!("{suite:?}")),
ocsp_stapled: ocsp_status(session, options.ocsp),
verified: true,
error: None,
})
@@ -171,6 +181,7 @@ pub async fn verify(target: &str, options: TlsOptions) -> Result<TlsVerifyReport
alpn_negotiated: None,
tls_version: None,
cipher: None,
ocsp_stapled: None,
verified: false,
error: Some(err.to_string()),
}),
@@ -203,7 +214,8 @@ pub async fn certs(target: &str, options: TlsOptions) -> Result<TlsCertReport, T
Ok(TlsCertReport {
target: target.to_string(),
sni: options.sni,
cert_chain: extract_cert_chain(session.peer_certificates())?,
ocsp_stapled: ocsp_status(session, options.ocsp),
cert_chain: extract_cert_chain(session.peer_certificates(), options.show_extensions)?,
})
}
@@ -427,26 +439,41 @@ fn socks5_target_host(proxy: &str, host: &str) -> (String, bool) {
(host.to_string(), remote_dns)
}
fn extract_cert_chain(certs: Option<&[Certificate]>) -> Result<Vec<TlsCertSummary>, TlsError> {
fn extract_cert_chain(
certs: Option<&[Certificate]>,
show_extensions: bool,
) -> Result<Vec<TlsCertSummary>, TlsError> {
let mut results = Vec::new();
if let Some(certs) = certs {
for cert in certs {
let summary = parse_cert(&cert.0)?;
let summary = parse_cert(&cert.0, show_extensions)?;
results.push(summary);
}
}
Ok(results)
}
fn parse_cert(der: &[u8]) -> Result<TlsCertSummary, TlsError> {
fn parse_cert(der: &[u8], show_extensions: bool) -> Result<TlsCertSummary, TlsError> {
let (_, cert) =
X509Certificate::from_der(der).map_err(|err| TlsError::Parse(err.to_string()))?;
let (key_usage, extended_key_usage, signature_algorithm) = if show_extensions {
(
extract_key_usage(&cert),
extract_extended_key_usage(&cert),
Some(cert.signature_algorithm.algorithm.to_string()),
)
} else {
(None, None, None)
};
Ok(TlsCertSummary {
subject: cert.subject().to_string(),
issuer: cert.issuer().to_string(),
not_before: cert.validity().not_before.to_string(),
not_after: cert.validity().not_after.to_string(),
san: extract_san(&cert),
signature_algorithm,
key_usage,
extended_key_usage,
})
}
@@ -460,6 +487,85 @@ fn extract_san(cert: &X509Certificate<'_>) -> Vec<String> {
result
}
fn extract_key_usage(cert: &X509Certificate<'_>) -> Option<Vec<String>> {
let ext = cert.key_usage().ok()??;
let mut result = Vec::new();
if ext.value.digital_signature() {
result.push("digitalSignature".to_string());
}
if ext.value.non_repudiation() {
result.push("nonRepudiation".to_string());
}
if ext.value.key_encipherment() {
result.push("keyEncipherment".to_string());
}
if ext.value.data_encipherment() {
result.push("dataEncipherment".to_string());
}
if ext.value.key_agreement() {
result.push("keyAgreement".to_string());
}
if ext.value.key_cert_sign() {
result.push("keyCertSign".to_string());
}
if ext.value.crl_sign() {
result.push("cRLSign".to_string());
}
if ext.value.encipher_only() {
result.push("encipherOnly".to_string());
}
if ext.value.decipher_only() {
result.push("decipherOnly".to_string());
}
if result.is_empty() {
None
} else {
Some(result)
}
}
fn extract_extended_key_usage(cert: &X509Certificate<'_>) -> Option<Vec<String>> {
let ext = cert.extended_key_usage().ok()??;
let mut result = Vec::new();
if ext.value.any {
result.push("any".to_string());
}
if ext.value.server_auth {
result.push("serverAuth".to_string());
}
if ext.value.client_auth {
result.push("clientAuth".to_string());
}
if ext.value.code_signing {
result.push("codeSigning".to_string());
}
if ext.value.email_protection {
result.push("emailProtection".to_string());
}
if ext.value.time_stamping {
result.push("timeStamping".to_string());
}
if ext.value.ocsp_signing {
result.push("ocspSigning".to_string());
}
for oid in &ext.value.other {
result.push(oid.to_string());
}
if result.is_empty() {
None
} else {
Some(result)
}
}
fn ocsp_status(_session: &rustls::ClientConnection, enabled: bool) -> Option<bool> {
if enabled {
None
} else {
None
}
}
struct NoVerifier;
impl rustls::client::ServerCertVerifier for NoVerifier {

69
docs/COMMANDS.md Normal file
View File

@@ -0,0 +1,69 @@
# WTFnet Commands
This document lists CLI commands and supported flags. Output defaults to text; use `--json` for structured output.
## Global flags
- `--json` / `--pretty`
- `--no-color` / `--quiet`
- `-v` / `-vv` / `--verbose`
- `--log-level <error|warn|info|debug|trace>`
- `--log-format <text|json>`
- `--log-file <path>`
- `NETTOOL_LOG_FILTER` or `RUST_LOG` can override log filters (ex: `maxminddb::decoder=debug`)
## sys
- `sys ifaces`
- `sys ip` flags: `--all`, `--iface <name>`
- `sys route` flags: `--ipv4`, `--ipv6`, `--to <ip>`
- `sys dns`
## ports
- `ports listen` flags: `--tcp`, `--udp`, `--port <n>`
- `ports who <port>`
- `ports conns` flags: `--top <n>`, `--by-process`
## neigh
- `neigh list` flags: `--ipv4`, `--ipv6`, `--iface <name>`
## cert
- `cert roots`
- `cert baseline <path>`
- `cert diff <path>`
## geoip
- `geoip lookup <ip>`
- `geoip status`
## probe
- `probe ping <host>` flags: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping <host:port>` flags: `--count <n>`, `--timeout-ms <n>`, `--socks5 <url>`, `--prefer-ipv4`, `--no-geoip`
- `probe trace <host>` flags: `--max-hops <n>`, `--per-hop <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--rdns`, `--no-geoip`
## dns
- `dns query <domain> <type>` flags: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect <domain>` flags: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch` flags: `--duration <Ns|Nms>`, `--iface <name>`, `--filter <pattern>`
- `dns leak status` flags: `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`
- `dns leak watch` flags: `--duration <Ns|Nms>`, `--iface <name>`, `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`, `--privacy <full|redacted|minimal>`, `--out <path>`, `--summary-only`, `--iface-diag`
- `dns leak report` flags: `<path>`, `--privacy <full|redacted|minimal>`
## http
- `http head|get <url>` flags: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--http3` (feature `http3`), `--http3-only` (feature `http3`), `--geoip`, `--socks5 <url>`
## tls
- `tls handshake|cert|verify|alpn <host:port>` flags: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`, `--show-extensions`, `--ocsp`
## discover
- `discover mdns` flags: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp` flags: `--duration <Ns|Nms>`
- `discover llmnr` flags: `--duration <Ns|Nms>`, `--name <host>`
- `discover nbns` flags: `--duration <Ns|Nms>`
## diag
- `diag` flags: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## calc
- `calc subnet <cidr>|<ip> <mask>`
- `calc contains <a> <b>`
- `calc overlap <a> <b>`
- `calc summarize <cidr...>`

View File

@@ -0,0 +1,176 @@
# DNS Leak Detector - Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output

View File

@@ -10,7 +10,7 @@ v0.3.0 focuses on improving diagnostic depth and fidelity of existing commands r
Major upgrades in this release:
- richer traceroute output and per-hop statistics
- HTTP timing breakdown accuracy (connect/tls stages)
- optional HTTP/3 support (best-effort)
- optional HTTP/3 support (feature-gated; experimental)
- TLS diagnostics upgrades (OCSP stapling indicator, richer certificate parsing)
- ports connections view and summaries
- improved cert baseline/diff for system roots
@@ -67,7 +67,7 @@ Acceptance:
- on timeout / failure, partial timing must still be meaningful.
### 3.3 HTTP/3 (optional feature flag) (SHOULD)
Current: HTTP/3 not implemented.
Current: feature-gated HTTP/3 path exists but is incomplete; keep disabled in default builds.
Target:
- add `--http3` support behind Cargo feature `http3`
- behavior:
@@ -79,6 +79,7 @@ Target:
Acceptance:
- builds without `http3` feature still work
- with feature enabled, HTTP/3 works on at least one known compatible endpoint
- documented as experimental until stabilized
### 3.4 TLS extras: OCSP + richer cert parsing (MUST)
Current: `tls handshake/verify/cert/alpn` exists.

154
docs/RELEASE_v0.4.0.md Normal file
View File

@@ -0,0 +1,154 @@
# WTFnet v0.4.0 - DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic -> classify (Plain DNS / DoT / DoH) ->
enrich with interface/route/process metadata -> evaluate leak definitions (A/B/C/D) ->
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / kill switch management (detection only)
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple -> process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--out <path>` (write JSON report/events)
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS -> subsequent TCP/TLS connection) for Leak-D mismatch indicator
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands

View File

@@ -3,42 +3,43 @@
This is a practical checklist to execute v0.3.0.
## 1) probe/trace upgrades
- [ ] add `--per-hop <n>` and store RTT samples per hop
- [ ] compute loss% per hop
- [ ] add `--rdns` best-effort reverse lookup (cached + time-bounded)
- [ ] improve hop formatting + JSON schema
- [x] add `--per-hop <n>` and store RTT samples per hop
- [x] compute loss% per hop
- [x] add `--rdns` best-effort reverse lookup (cached + time-bounded)
- [x] improve hop formatting + JSON schema
## 2) http timing improvements
- [ ] implement `connect_ms` and `tls_ms` timing
- [ ] report `null` + warning when measurement unavailable
- [x] implement `connect_ms` and `tls_ms` timing
- [x] report `null` + warning when measurement unavailable
- [ ] keep current `dns_ms` and `ttfb_ms`
## 3) optional HTTP/3
- [ ] add `http3` cargo feature + deps
- [ ] implement `--http3` / `--http3-only`
- [ ] define error classification for QUIC failures
## 3) tls extras
- [x] add OCSP stapling presence indicator (if available)
- [x] parse SANs and key usage / EKU best-effort
- [x] add `--show-extensions` and `--ocsp` flags
## 4) tls extras
- [ ] add OCSP stapling presence indicator (if available)
- [ ] parse SANs and key usage / EKU best-effort
- [ ] add `--show-extensions` and `--ocsp` flags
## 4) ports conns
- [x] implement `wtfn ports conns`
- [x] add `--top <n>` and `--by-process`
- [x] best-effort PID mapping with warnings
## 5) ports conns
- [ ] implement `wtfn ports conns`
- [ ] add `--top <n>` and `--by-process`
- [ ] best-effort PID mapping with warnings
## 5) cert baseline/diff improvements
- [x] baseline schema version
- [x] match by SHA256 fingerprint
- [x] diff categories: add/remove/expired/changed
## 6) cert baseline/diff improvements
- [ ] baseline schema version
- [ ] match by SHA256 fingerprint
- [ ] diff categories: add/remove/expired/changed
## 6) optional LLMNR/NBNS
- [x] implement `discover llmnr`
- [x] implement `discover nbns`
- [x] bounded collection, low-noise
## 7) optional LLMNR/NBNS
- [ ] implement `discover llmnr`
- [ ] implement `discover nbns`
- [ ] bounded collection, low-noise
## 7) docs updates
- [x] update README roadmap
- [x] update COMMANDS.md with new flags/commands
- [x] add RELEASE_v0.3.0.md
## 8) docs updates
- [ ] update README roadmap
- [ ] update COMMANDS.md with new flags/commands
- [ ] add RELEASE_v0.3.0.md
## 8) optional HTTP/3 (last)
- [x] add `http3` cargo feature + deps
- [x] implement `--http3` / `--http3-only`
- [x] define error classification for QUIC failures
- [x] keep feature disabled in default builds until stabilized

33
docs/WORK_ITEMS_v0.4.0.md Normal file
View File

@@ -0,0 +1,33 @@
# WTFnet v0.4.0 - Work Items
This is a practical checklist to execute v0.4.0.
## 1) platform flow ownership
- [x] add FlowOwnerProvider trait + data types
- [x] implement Linux best-effort lookup
- [x] implement Windows best-effort lookup
## 2) new wtfnet-dnsleak crate
- [x] crate scaffold + pcap feature
- [x] UDP/TCP 53 capture + classify
- [x] DoT detection (TCP 853)
- [x] policy model + profiles
- [x] leak rules A/B/C (partial)
- [x] privacy modes
- [x] report + summary builder
## 3) CLI wiring
- [x] add `dns leak status`
- [x] add `dns leak watch`
- [x] add `dns leak report`
## 4) docs updates
- [x] add `docs/RELEASE_v0.4.0.md`
- [x] add `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
- [x] update README roadmap + flags
- [x] update COMMANDS.md
- [x] update status/implementation docs
## 5) follow-ups
- [ ] add DoH heuristic classification (optional)
- [ ] add Leak-D mismatch correlation (optional)

View File

@@ -0,0 +1,723 @@
Below is a **high-level (language-agnostic)** design for a **client-side DNS leak detector** aimed at *censorship-resistance threat models*, i.e.:
> “Censor/ISP can observe/log DNS intent or infer proxy usage; we want to detect when DNS behavior escapes the intended protection path.”
Ill cover: **definitions**, **detection standards**, **workflow**, **modules**, **passive+active detection**, **outputs**, and **test methodology**.
---
# 1) Scope and goals
## Goals
Your detector should answer, with evidence:
1. **Did any DNS query leave the device outside the intended safe path?**
2. **Which domains leaked?** (when visible)
3. **Which transport leaked?** (UDP/53, TCP/53, DoT/853, DoH)
4. **Which interface leaked?** (Wi-Fi/Ethernet vs tunnel)
5. **Which process/app triggered it?** (if your OS allows attribution)
And in your censorship model, it should also detect:
6. **Split-policy intent leakage**: “unknown/sensitive domains were resolved using domestic/ISP-facing DNS.”
## Non-goals (be explicit)
* Not a censorship circumvention tool itself
* Not a full firewall manager (can suggest fixes, but detection is the core)
* Not perfect attribution on every OS (process mapping may be partial)
---
# 2) Define “DNS leak” precisely (your programs standard)
You need a **formal definition** because “DNS leak” is overloaded.
## Standard definition A (classic VPN / tunnel bypass)
A leak occurs if:
> **An unencrypted DNS query is sent outside the secure tunnel path**
> This is essentially how popular leak test sites define it (“unencrypted DNS query sent OUTSIDE the established VPN tunnel”). ([IP Leak][1])
Your detector should implement it in a machine-checkable way:
**Leak-A condition**
* DNS over **UDP/53 or TCP/53**
* Destination is **not** a “trusted resolver path” (e.g., not the tunnel interface, not loopback stub, not proxy channel)
* Interface is **not** the intended egress
✅ Strong for censorship: plaintext DNS exposes intent.
---
## Standard definition B (split-policy intent leak)
A leak occurs if:
> **A domain that should be “proxied / remote-resolved” was queried via local/ISP-facing DNS.**
This is the “proxy split rules still leak intent” case.
**Leak-B condition**
* Query name matches either:
* a “proxy-required set” (sensitive list, non-allowlist, unknown), or
* a policy rule (“everything except allowlist must resolve via proxy DNS”)
* And the query was observed going to:
* ISP resolver(s) / domestic resolver(s) / non-tunnel interface
✅ This is the leak most users in censorship settings care about.
---
## Standard definition C (encrypted DNS escape / bypass)
A leak occurs if:
> DNS was encrypted, but escaped the intended channel (e.g., app uses its own DoH directly to the Internet).
This matters because DoH hides the QNAME but still creates **observable behavior** and breaks your “DNS must follow proxy” invariant.
**Leak-C condition**
* DoH (RFC 8484) ([IETF Datatracker][2]) or DoT (RFC 7858) ([IETF Datatracker][3]) flow exists
* And it does **not** go through your approved egress path (tunnel/proxy)
✅ Detects “Firefox/Chrome built-in DoH bypass” style cases.
---
## Standard definition D (mismatch risk indicator)
Not a “leak” by itself, but a **proxy inference amplifier**:
> DNS egress region/path differs from traffic egress region/path.
This is a *censorship-resistance hygiene metric*, not a binary leak.
**Mismatch condition**
* Same domain produces:
* DNS resolution via path X
* TCP/TLS connection via path Y
* Where X ≠ Y (interface, ASN region, etc.)
✅ Helps catch “DNS direct, traffic proxy” or “DNS proxy, traffic direct” weirdness.
---
# 3) High-level architecture
## Core components
1. **Policy & Configuration**
* What counts as “safe DNS path”
* Which interfaces are “protected” (tunnel) vs “physical”
* Allowlist / proxy-required sets (optional)
* Known resolver lists (optional)
* Severity thresholds
2. **Traffic Sensor (Passive Monitor)**
* Captures outbound traffic metadata (and optionally payload for DNS parsing)
* Must cover:
* UDP/53, TCP/53
* TCP/853 (DoT)
* HTTPS flows that look like DoH (see below)
* Emits normalized events into a pipeline
3. **Classifier**
* Recognize DNS protocol types:
* Plain DNS
* DoT
* DoH
* Attach confidence scores (especially for DoH)
4. **DNS Parser (for plaintext DNS only)**
* Extract: QNAME, QTYPE, transaction IDs, response codes (optional)
* Store minimally (privacy-aware)
5. **Flow Tracker**
* Correlate packets into “flows”
* Map flow → interface → destination → process (if possible)
* Track timing correlation: DNS → connection attempts
6. **Leak Detector (Rules Engine)**
* Apply Leak-A/B/C/D definitions
* Produce leak events + severity + evidence chain
7. **Active Prober**
* Generates controlled DNS lookups to test behavior
* Can test fail-closed, bypasses, multi-interface behavior, etc.
8. **Report Generator**
* Human-readable summary
* Machine-readable logs (JSON)
* Recommendations (non-invasive)
---
# 4) Workflow (end-to-end)
## Workflow 0: Setup & baseline
1. Enumerate interfaces and routes
* Identify physical NICs
* Identify tunnel / proxy interface (or “expected egress destinations”)
2. Identify system DNS configuration
* Default resolvers per interface
* Local stub presence (127.0.0.1, etc.)
3. Load policy profile
* Full-tunnel, split-tunnel, or proxy-based
4. Start passive monitor
**Output:** “Current state snapshot” (useful even before testing).
---
## Workflow 1: Passive detection loop (always-on)
Continuously:
1. Capture outbound packets/flows
2. Classify as DNS-like (plain DNS / DoT / DoH / unknown)
3. If plaintext DNS → parse QNAME/QTYPE
4. Assign metadata:
* interface
* dst IP/port
* process (if possible)
* timestamp
5. Evaluate leak rules:
* Leak-A/B/C/D
6. Write event log + optional real-time alert
**Key design point:** passive mode should be able to detect leaks **without requiring any special test domain**.
---
## Workflow 2: Active test suite (on-demand)
Active tests exist because some leaks are intermittent or only happen under stress.
### Active Test A: “No plaintext DNS escape”
* Trigger a set of DNS queries (unique random domains)
* Verify **zero UDP/53 & TCP/53** leaves physical interfaces
### Active Test B: “Fail-closed test”
* Temporarily disrupt the “protected path” (e.g., tunnel down)
* Trigger lookups again
* Expected: DNS fails (no fallback to ISP DNS)
### Active Test C: “App bypass test”
* Launch test scenarios that mimic real apps
* Confirm no direct DoH/DoT flows go to public Internet outside the proxy path
### Active Test D: “Split-policy correctness”
* Query domains that should be:
* direct-allowed
* proxy-required
* unknown
* Confirm resolution path matches policy
---
# 5) How to recognize DNS transports (detection mechanics)
## Plain DNS (strongest signal)
**Match conditions**
* UDP dst port 53 OR TCP dst port 53
* Parse DNS header
* Extract QNAME/QTYPE
**Evidence strength:** high
**Intent visibility:** yes (domain visible)
---
## DoT (port-based, easy)
DoT is defined over TLS, typically port **853**. ([IETF Datatracker][3])
**Match conditions**
* TCP dst port 853
* Optionally confirm TLS handshake exists
**Evidence strength:** high
**Intent visibility:** no (domain hidden)
---
## DoH (harder; heuristic + optional allowlists)
DoH is DNS over HTTPS (RFC 8484). ([IETF Datatracker][2])
**Recognizers (from strongest to weakest):**
1. HTTP request with `Content-Type: application/dns-message`
2. Path/pattern common to DoH endpoints (optional list)
3. SNI matches known DoH providers (optional list)
4. Traffic resembles frequent small HTTPS POST/GET bursts typical of DoH (weak)
**Evidence strength:** medium
**Intent visibility:** no (domain hidden)
**Important for your use-case:** you may not need to *prove* its DoH; you mostly need to detect “DNS-like encrypted resolver traffic bypassing the proxy channel.”
---
# 6) Policy model: define “safe DNS path”
You need a simple abstraction users can configure:
### Safe DNS path can be defined by one or more of:
* **Allowed interfaces**
* loopback (local stub)
* tunnel interface
* **Allowed destination set**
* proxy server IP(s)
* internal resolver IP(s)
* **Allowed process**
* only your local stub + proxy allowed to resolve externally
* **Allowed port set**
* maybe only permit 443 to proxy server (if DNS rides inside it)
Then implement:
**A DNS event is a “leak” if it violates safe-path constraints.**
---
# 7) Leak severity model (useful for real-world debugging)
### Severity P0 (critical)
* Plaintext DNS (UDP/TCP 53) on physical interface to ISP/public resolver
* Especially if QNAME matches proxy-required/sensitive list
### Severity P1 (high)
* DoH/DoT bypassing proxy channel directly to public Internet
### Severity P2 (medium)
* Policy mismatch: domain resolved locally but connection later proxied (or vice versa)
### Severity P3 (low / info)
* Authoritative-side “resolver egress exposure” (less relevant for client-side leak detector)
* CDN performance mismatch indicators
---
# 8) Outputs and reporting
## Real-time console output (for debugging)
* “DNS leak detected: Plain DNS”
* domain (if visible)
* destination resolver IP
* interface
* process name (if available)
* policy rule violated
* suggested fix category (e.g., “force stub + block port 53”)
## Forensics log (machine-readable)
A single **LeakEvent** record could include:
* timestamp
* leak_type (A/B/C/D)
* transport (UDP53, TCP53, DoT, DoH)
* qname/qtype (nullable)
* src_iface / dst_ip / dst_port
* process_id/process_name (nullable)
* correlation_id (link DNS → subsequent connection attempt)
* confidence score (esp. DoH)
* raw evidence pointers (pcap offsets / event IDs)
## Summary report
* Leak counts by type
* Top leaking processes
* Top leaking resolver destinations
* Timeline view (bursts often indicate OS fallback behavior)
* “Pass/Fail” per policy definition
---
# 9) Validation strategy (“how do I know my detector is correct?”)
## Ground truth tests
1. **Known-leak scenario**
* intentionally set OS DNS to ISP DNS, no tunnel
* detector must catch plaintext DNS
2. **Known-safe scenario**
* local stub only + blocked outbound 53/853
* detector should show zero leaks
3. **Bypass scenario**
* enable browser built-in DoH directly
* detector should catch encrypted resolver bypass (Leak-C)
4. **Split-policy scenario**
* allowlist CN direct, everything else proxy-resolve
* detector should show:
* allowlist resolved direct
* unknown resolved via proxy path
---
# 10) Recommended “profiles” (makes tool usable)
Provide built-in presets:
### Profile 1: Full-tunnel VPN
* allow DNS only via tunnel interface or loopback stub
* any UDP/TCP 53 on physical NIC = leak
### Profile 2: Proxy + local stub (your case)
* allow DNS only to loopback stub
* allow stub upstream only via proxy server destinations
* flag any direct DoH/DoT to public endpoints
### Profile 3: Split tunnel (geoip + allowlist)
* allow plaintext DNS **only** for allowlisted domains (if user accepts risk)
* enforce “unknown → proxy-resolve”
* emphasize Leak-B correctness
---
Below is an updated **high-level design** (still language-agnostic) that integrates **process attribution** cleanly, including how it fits into the workflow and what to log.
---
# 1) New component: Process Attribution Engine (PAE)
## Purpose
When a DNS-like event is observed, the PAE tries to attach:
* **PID**
* **PPID**
* **process name**
* *(optional but extremely useful)* full command line, executable path, user, container/app package, etc.
This lets your logs answer:
> “Which program generated the leaked DNS request?”
> “Was it a browser, OS service, updater, antivirus, proxy itself, or some library?”
## Position in the pipeline
It sits between **Traffic Sensor** and **Leak Detector** as an “event enricher”:
**Traffic Event → (Classifier) → (Process Attribution) → Enriched Event → Leak Rules → Report**
---
# 2) Updated architecture (with process attribution)
### Existing modules (from earlier design)
1. Policy & Configuration
2. Traffic Sensor (packet/flow monitor)
3. Classifier (Plain DNS / DoT / DoH / Unknown)
4. DNS Parser (plaintext only)
5. Flow Tracker
6. Leak Detector (rules engine)
7. Active Prober
8. Report Generator
### New module
9. **Process Attribution Engine (PAE)**
* resolves “who owns this flow / packet”
* emits PID/PPID/name
* handles platform-specific differences and fallbacks
---
# 3) Workflow changes (what happens when a potential leak is seen)
## Passive detection loop (updated)
1. Capture outbound traffic event
2. Classify transport type:
* UDP/53, TCP/53 → plaintext DNS
* TCP/853 → DoT
* HTTPS patterns → DoH (heuristic)
3. Extract the **5-tuple**
* src IP:port, dst IP:port, protocol
4. **PAE lookup**
* resolve the owner process for this traffic
* attach PID/PPID/name (+ optional metadata)
5. Apply leak rules (A/B/C/D)
6. Emit:
* realtime log line (human readable)
* structured record (JSON/event log)
---
# 4) Process attribution: what to detect and how (high-level)
Process attribution always works on one core concept:
> **Map observed traffic (socket/flow) → owning process**
### Inputs PAE needs
* protocol (UDP/TCP)
* local src port
* local address
* timestamp
* optionally: connection state / flow ID
### Output from PAE
* `pid`, `ppid`, `process_name`
* optional enrichment:
* `exe_path`
* `cmdline`
* `user`
* “process tree chain” (for debugging: parent → child → …)
---
# 5) Platform support strategy (without implementation detail)
Process attribution is **OS-specific**, so structure it as:
## “Attribution Provider” interface
* Provider A: “kernel-level flow owner”
* Provider B: “socket table owner lookup”
* Provider C: “event tracing feed”
* Provider D: fallback “unknown / not supported”
Your main design goal is:
### Design rule
**Attribution must be best-effort + gracefully degrading**, never blocking detection.
So you always log the leak even if PID is unavailable:
* `pid=null, attribution_confidence=LOW`
---
# 6) Attribution confidence + race handling (important!)
Attribution can be tricky because:
* a process may exit quickly (“short-lived resolver helper”)
* ports can be reused
* NAT or local proxies may obscure the real origin
So log **confidence**:
* **HIGH**: direct mapping from kernel/socket owner at time of event
* **MEDIUM**: mapping by lookup shortly after event (possible race)
* **LOW**: inferred / uncertain
* **NONE**: not resolved
Also record *why* attribution failed:
* “permission denied”
* “flow already gone”
* “unsupported transport”
* “ambiguous mapping”
This makes debugging much easier.
---
# 7) What PID/PPID adds to your leak definitions
### Leak-A (plaintext DNS outside safe path)
Now you can say:
> “`svchost.exe (PID 1234)` sent UDP/53 to ISP resolver on Wi-Fi interface”
### Leak-B (split-policy intent leak)
You can catch:
* “game launcher looked up blocked domain”
* “system service triggered a sensitive name unexpectedly”
* “your proxy itself isnt actually resolving via its own channel”
### Leak-C (encrypted DNS bypass)
This becomes *very actionable*:
> “`firefox.exe` started direct DoH to resolver outside tunnel”
### Leak-D (mismatch indicator)
You can also correlate:
* DNS resolved by one process
* connection made by another process
(e.g., local stub vs app)
---
# 8) Reporting / realtime logging format (updated)
## Realtime log line (human readable)
Example (conceptual):
* **[P0][Leak-A] Plain DNS leaked**
* Domain: `example-sensitive.com` (A)
* From: `Wi-Fi` → To: `1.2.3.4:53`
* Process: `browser.exe` **PID=4321 PPID=1200**
* Policy violated: “No UDP/53 on physical NIC”
## Structured event (JSON-style fields)
Minimum recommended fields:
### Event identity
* `event_id`
* `timestamp`
### DNS identity
* `transport` (udp53/tcp53/dot/doh/unknown)
* `qname` (nullable)
* `qtype` (nullable)
### Network path
* `interface_name`
* `src_ip`, `src_port`
* `dst_ip`, `dst_port`
* `route_class` (tunnel / physical / loopback)
### Process identity (your requested additions)
* `pid`
* `ppid`
* `process_name`
* optional:
* `exe_path`
* `cmdline`
* `user`
### Detection result
* `leak_type` (A/B/C/D)
* `severity` (P0..P3)
* `policy_rule_id`
* `attribution_confidence`
---
# 9) Privacy and safety notes (important in a DNS tool)
Because youre logging **domains** and **process command lines**, this becomes sensitive.
Add a “privacy mode” policy:
* **Full**: store full domain + cmdline
* **Redacted**: hash domain; keep TLD only; truncate cmdline
* **Minimal**: only keep leak counts + resolver IPs + process name
Also allow “capture window” (rotate logs, avoid giant histories).
---
# 10) UX feature: “Show me the process tree”
When a leak happens, a good debugger view is:
* `PID: foo (pid 1000)`
* `PPID: bar (pid 900)`
* `PPID: systemd/svchost/etc`
This is extremely useful to identify:
* browsers spawning helpers
* OS DNS services
* containerized processes
* update agents / telemetry daemons
So your report generator should support:
**Process chain rendering** (where possible)
---
# 11) Practical edge cases you should detect (with PID helping)
1. **Local stub is fine, upstream isnt**
* Your local resolver process leaks upstream plaintext DNS
2. **Browser uses its own DoH**
* process attribution immediately reveals it
3. **Multiple interfaces**
* a leak only happens on Wi-Fi but not Ethernet
4. **Kill-switch failure**
* when tunnel drops, PID shows which app starts leaking first
---

View File

@@ -0,0 +1,42 @@
# DNS Leak Detection - Implementation Status
This document tracks the current DNS leak detector implementation against the design in
`docs/dns_leak_detection_design.md` and `docs/requirement_docs_v0.4.md`.
## Implemented
- New `wtfnet-dnsleak` crate with passive capture (pcap feature).
- Transport classification:
- Plain DNS (UDP/53, TCP/53) with qname/qtype parsing.
- DoT (TCP/853) detection.
- DoH detection is not implemented (skipped for now).
- Leak rules:
- Leak-A (plaintext DNS outside safe path).
- Leak-B (split-policy intent leak based on proxy-required/allowlist domains).
- Leak-C (encrypted DNS bypass for DoT).
- Policy profiles: `full-tunnel`, `proxy-stub`, `split`.
- Privacy modes: full/redacted/minimal (redacts qname).
- Process attribution:
- Best-effort `FlowOwnerProvider` with Linux `/proc` and Windows `netstat` lookups.
- Confidence levels and failure reasons exposed in events.
- CLI commands:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
- `dns leak watch --iface-diag` (diagnostics for capture-capable interfaces).
- Interface selection:
- per-interface open timeout to avoid capture hangs
- stable default pick (up, non-loopback, named ethernet/wlan) before fallback scan
## Partially implemented
- Route/interface classification: heuristic only (loopback/tunnel/physical by iface name).
- Safe path matching: allowed ifaces/dests/ports/processes; no route-based policy.
## Not implemented (v0.4 backlog)
- DoH heuristic detection (SNI/endpoint list/traffic shape).
- Leak-D mismatch correlation (DNS -> TCP/TLS flows).
- GeoIP enrichment of leak events.
- Process tree reporting (PPID chain).
## Known limitations
- On Windows, pcap capture may require selecting a specific NPF interface; use
`dns leak watch --iface-diag` to list interfaces that can be opened.

View File

@@ -9,21 +9,22 @@ This document tracks current implementation status against the original design i
- GeoIP: local GeoLite2 Country + ASN support.
- Probe: ping/tcping/trace with GeoIP enrichment.
- DNS: Hickory-based query/detect with best-effort heuristics.
- HTTP: head/get via reqwest.
- DNS leak detection: new passive monitor with policy and privacy modes (best-effort).
- HTTP: head/get via reqwest with best-effort timing breakdown and optional HTTP/3 (feature-gated).
- TLS: rustls-based handshake/verify/cert/alpn.
- Discover: mDNS/SSDP bounded collection.
- Discover: mDNS/SSDP bounded collection plus LLMNR/NBNS (best-effort).
- Diag: bundle export in zip.
## Deviations or refinements
- DNS adds DoT/DoH and SOCKS5 proxy support.
- HTTP/TLS/TCP ping include SOCKS5 proxy support.
- HTTP timing breakdown is best-effort: `dns_ms` and `ttfb_ms` are captured; `connect_ms`/`tls_ms` remain placeholders.
- HTTP timing breakdown is best-effort: `dns_ms`/`ttfb_ms` are captured; `connect_ms`/`tls_ms` are measured via a separate probe and can be `null` with warnings.
- DNS watch uses `pnet` and is feature-gated as best-effort.
## Gaps vs design (as of now)
- HTTP/3 not implemented.
- HTTP/3 is feature-gated and incomplete; not enabled in default builds.
- TLS verification is rustls-based (no OS-native verifier).
- Discover does not include LLMNR/NBNS.
- DNS leak DoH detection is heuristic and currently optional.
## Current stage summary
- v0.1 scope is complete.

View File

@@ -0,0 +1,484 @@
Yep — your DNS leak detector design **fits your current architecture cleanly**, and **making it a dedicated subcrate is the right move**.
Your existing workspace already follows “thin CLI + feature crates + platform traits + OS dispatch”, so the detector drops in naturally as **one new feature crate** plus a small extension to `wtfnet-platform` for **best-effort process attribution**. This matches the designs pipeline idea: **Traffic Sensor → Classifier → (Process Attribution) → Leak Rules → Report**
Below Ill do two things:
1. **Evaluate fit + integration plan** (what needs changing / what can be reused)
2. Provide **repo-ready v0.4.0 documents**, focused on implementing this detector.
---
# 1) Can this design fit your current architecture?
## ✅ Yes — it matches your crate boundaries almost 1:1
Your detector design already breaks down into modules that map directly into a new subcrate:
* Policy & Configuration
* Passive Traffic Sensor (UDP53/TCP53/853/“DoH-ish”)
* Classifier (DNS / DoT / DoH + confidence)
* DNS Parser (plaintext only)
* Flow Tracker (interface/route/process correlation)
* Leak Detector rules A/B/C/D
* Report generator + structured events
So: **new crate = `wtfnet-dnsleak`**, and keep `wtfnet-dns` for **active query/detect/watch** (already exists).
## ✅ What you can reuse immediately
### Reuse from your current codebase
* `wtfnet-dns watch` capture plumbing (you already have passive-ish capture)
* existing DNS parsing logic (for UDP/TCP 53)
* existing GeoIP enrichment pipeline (optional)
* `wtfnet-platform` interface snapshot + routing info (for “which interface leaked?”)
* your JSON output envelope + logging style
### Reuse from the detector design directly
* Leak definitions A/B/C/D (this is already precise and CLI-tool friendly)
* DoH recognition levels + confidence model (strong → weak)
* “safe DNS path” abstraction (interfaces/dests/process/ports)
* process attribution confidence levels and failure reasons
* privacy modes (Full/Redacted/Minimal)
## ✅ What youll need to add (small + contained)
### 1) New platform trait for flow → process owner (best effort)
Your design explicitly wants PID/PPID/process name to show “who leaked DNS” , and it recommends an OS-specific provider interface .
So extend `wtfnet-platform` with something like:
* `FlowOwnerProvider::owner_of(tuple, timestamp) -> ProcessInfo + confidence`
This stays consistent with your existing “platform traits + OS dispatch” architecture.
### 2) Route/interface classification (“tunnel vs physical vs loopback”)
Your event schema wants `route_class` (tunnel/physical/loopback) .
Implement this via **policy-configured interface labels** + a small heuristic fallback.
### 3) DoH detection will be heuristic (and thats OK)
Your design already plans for this: DoH classification is “hard”, confidence-based .
In v0.4, keep it simple:
* DoT = TCP/853
* Plain DNS = UDP/TCP 53 (+ parse QNAME)
* “DoH-ish” = TCP/443 to known resolver host/IP OR “small HTTPS bursts” pattern
…and attach confidence.
---
# 2) v0.4.0 documents (repo-ready)
Below are two docs you can drop into `docs/`.
---
## `docs/RELEASE_v0.4.0.md`
```markdown
# WTFnet v0.4.0 — DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
---
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic → classify (Plain DNS / DoT / DoH) →
enrich with interface/route/process metadata → evaluate leak definitions (A/B/C/D) →
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
---
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
---
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / "kill switch" management (detection only)
---
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple → process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
---
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--geoip` (include GeoIP in event outputs)
- `--out <path>` (write JSON report/events)
---
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
---
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
---
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS → subsequent TCP/TLS connection) for Leak-D mismatch indicator
---
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands
```
---
## `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
```markdown
# DNS Leak Detector — Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
---
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- parse_dns.rs # plaintext DNS parser: qname/qtype
- attrib.rs # process attribution integration (platform provider)
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
---
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
- geoip: optional
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
---
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
ts: SystemTime,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
---
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
Important: you mostly need to detect encrypted resolver traffic bypassing the proxy channel,
not to fully prove DoH with payload inspection.
---
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
---
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
---
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
---
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
---
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output
```
---
# Small note about *where* it lives in your architecture
This design slots in with **minimal churn**:
* ✅ Add `crates/wtfnet-dnsleak`
* ✅ Add `dns leak ...` subcommands in CLI
* ✅ Add a **single** new platform trait for process attribution (best-effort)
* ✅ Reuse your existing `dns watch` capture approach as the sensor
…which is exactly what your design describes: passive monitoring + classification + rules + evidence output and the PAE “event enricher” location in the pipeline .
**If it's too hard to detect DoH traffic, skip it.**
---

View File

@@ -26,8 +26,13 @@ This document tracks the planned roadmap alongside the current implementation st
- TLS extras: OCSP stapling indicator, richer cert parsing
- ports conns improvements (top talkers / summary)
- better baseline/diff for system roots
- optional HTTP/3 (feature-gated)
- optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (dns leak detection)
- dns leak detector (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage
@@ -63,13 +68,26 @@ This document tracks the planned roadmap alongside the current implementation st
- HTTP crate with head/get support, timing breakdown, optional GeoIP, and SOCKS5 proxy.
- TLS crate with handshake/verify/cert/alpn support in CLI (SOCKS5 proxy supported).
- TCP ping supports SOCKS5 proxy.
- v0.3: probe trace per-hop stats + rdns support.
- v0.3: http connect/tls timing best-effort with warnings.
- v0.3: ports conns (active TCP connections + summaries).
- v0.3: TLS extras (OCSP flag + richer cert parsing).
- v0.3: cert baseline/diff improvements.
- v0.3: HTTP/3 request path (feature-gated; experimental, incomplete).
- v0.3: HTTP/3 error classification (feature-gated).
- v0.4: platform flow-owner lookup (best-effort).
- v0.4: dns leak detector crate + CLI commands (status/watch/report).
- Discover crate with mdns/ssdp commands.
- Discover llmnr/nbns (best-effort).
- Diag crate with report and bundle export.
- Basic unit tests for calc and TLS parsing.
### In progress
- v0.3: probe trace upgrades (per-hop stats + rdns).
- v0.4: DoH heuristic classification (optional).
- v0.4: Leak-D mismatch correlation (optional).
- v0.3: optional HTTP/3 (feature-gated; keep disabled until stabilized).
### Next
- Complete v0.3 trace upgrades and update CLI output.
- Update docs/README/COMMANDS for v0.4.
- Add v0.2 tests (dns detect, basic http/tls smoke).
- Track DNS leak design status in `docs/dns_leak_implementation_status.md`.