Compare commits

...

13 Commits

Author SHA1 Message Date
DaZuo0122
5a23175a83 Update: Build instructions for just 2026-02-02 13:00:19 +08:00
DaZuo0122
57492ab654 Add: Justfile to replace cmake and make 2026-02-02 12:32:56 +08:00
DaZuo0122
7054ff77a7 Fix: http/3 alpn bugs 2026-01-18 23:05:41 +08:00
DaZuo0122
9bcb7549f3 Bump version to 0.4.0 2026-01-17 22:09:23 +08:00
DaZuo0122
1da9b915d8 Update documents 2026-01-17 20:13:37 +08:00
DaZuo0122
94762d139a Add: flag to make watch keep running 2026-01-17 20:07:13 +08:00
DaZuo0122
f349d4b4fa Add: description in help message 2026-01-17 19:49:53 +08:00
DaZuo0122
7f6ee839b2 Add: Leak-D for dns leak detection 2026-01-17 19:42:54 +08:00
DaZuo0122
a82a7fe2ad Add: include interface pickup failure in log 2026-01-17 19:10:52 +08:00
DaZuo0122
d5b92ede7b Fix: main thread timeout early than work thread 2026-01-17 19:07:10 +08:00
DaZuo0122
144e801e13 Add: verbose for dns leak iface picking process 2026-01-17 18:53:07 +08:00
DaZuo0122
cfa96bde08 Add: dns leak detection 2026-01-17 18:45:24 +08:00
DaZuo0122
ccd4a31d21 Add: H3 support - incomplete 2026-01-17 13:47:37 +08:00
38 changed files with 6199 additions and 252 deletions

1
.gitignore vendored
View File

@@ -1,2 +1,3 @@
/target /target
/data /data
/dist

View File

@@ -1,41 +0,0 @@
cmake_minimum_required(VERSION 3.20)
project(wtfnet LANGUAGES NONE)
set(CARGO_CMD cargo)
set(CARGO_TARGET_DIR "${CMAKE_BINARY_DIR}/cargo-target")
set(BIN_NAME "wtfn${CMAKE_EXECUTABLE_SUFFIX}")
set(BIN_PATH "${CARGO_TARGET_DIR}/release/${BIN_NAME}")
file(READ "${CMAKE_SOURCE_DIR}/crates/wtfnet-cli/Cargo.toml" CLI_TOML)
string(REGEX MATCH "version = \"([0-9]+\\.[0-9]+\\.[0-9]+)\"" CLI_VERSION_MATCH "${CLI_TOML}")
if(CMAKE_MATCH_1)
set(PACKAGE_VERSION "${CMAKE_MATCH_1}")
else()
set(PACKAGE_VERSION "0.1.0")
endif()
add_custom_command(
OUTPUT "${BIN_PATH}"
COMMAND "${CMAKE_COMMAND}" -E env CARGO_TARGET_DIR="${CARGO_TARGET_DIR}"
"${CARGO_CMD}" build --release --workspace --bin wtfn
WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}"
COMMENT "Building wtfn with cargo"
VERBATIM
)
add_custom_target(wtfnet_build ALL DEPENDS "${BIN_PATH}")
install(PROGRAMS "${BIN_PATH}" DESTINATION bin)
install(DIRECTORY "${CMAKE_SOURCE_DIR}/data" DESTINATION share/wtfnet)
add_dependencies(install wtfnet_build)
set(CPACK_PACKAGE_NAME "wtfnet")
set(CPACK_PACKAGE_VERSION "${PACKAGE_VERSION}")
set(CPACK_PACKAGE_FILE_NAME "wtfnet-${PACKAGE_VERSION}-${CMAKE_SYSTEM_NAME}-${CMAKE_SYSTEM_PROCESSOR}")
if(WIN32)
set(CPACK_GENERATOR "ZIP")
else()
set(CPACK_GENERATOR "TGZ")
endif()
include(CPack)

528
Cargo.lock generated
View File

@@ -217,12 +217,24 @@ dependencies = [
"shlex", "shlex",
] ]
[[package]]
name = "cesu8"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
[[package]] [[package]]
name = "cfg-if" name = "cfg-if"
version = "1.0.4" version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
[[package]]
name = "cfg_aliases"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
[[package]] [[package]]
name = "cipher" name = "cipher"
version = "0.4.4" version = "0.4.4"
@@ -279,6 +291,16 @@ version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
[[package]]
name = "combine"
version = "4.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
dependencies = [
"bytes",
"memchr",
]
[[package]] [[package]]
name = "concurrent-queue" name = "concurrent-queue"
version = "2.5.0" version = "2.5.0"
@@ -304,6 +326,16 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "core-foundation"
version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6"
dependencies = [
"core-foundation-sys",
"libc",
]
[[package]] [[package]]
name = "core-foundation-sys" name = "core-foundation-sys"
version = "0.8.7" version = "0.8.7"
@@ -447,6 +479,18 @@ dependencies = [
"windows-sys 0.61.2", "windows-sys 0.61.2",
] ]
[[package]]
name = "fastbloom"
version = "0.14.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4e7f34442dbe69c60fe8eaf58a8cafff81a1f278816d8ab4db255b3bef4ac3c4"
dependencies = [
"getrandom 0.3.4",
"libm",
"rand 0.9.2",
"siphasher",
]
[[package]] [[package]]
name = "fastrand" name = "fastrand"
version = "2.3.0" version = "2.3.0"
@@ -511,6 +555,21 @@ dependencies = [
"percent-encoding", "percent-encoding",
] ]
[[package]]
name = "futures"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876"
dependencies = [
"futures-channel",
"futures-core",
"futures-executor",
"futures-io",
"futures-sink",
"futures-task",
"futures-util",
]
[[package]] [[package]]
name = "futures-channel" name = "futures-channel"
version = "0.3.31" version = "0.3.31"
@@ -518,6 +577,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10"
dependencies = [ dependencies = [
"futures-core", "futures-core",
"futures-sink",
] ]
[[package]] [[package]]
@@ -526,12 +586,34 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e"
[[package]]
name = "futures-executor"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f"
dependencies = [
"futures-core",
"futures-task",
"futures-util",
]
[[package]] [[package]]
name = "futures-io" name = "futures-io"
version = "0.3.31" version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6"
[[package]]
name = "futures-macro"
version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
dependencies = [
"proc-macro2",
"quote",
"syn",
]
[[package]] [[package]]
name = "futures-sink" name = "futures-sink"
version = "0.3.31" version = "0.3.31"
@@ -550,8 +632,13 @@ version = "0.3.31"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81"
dependencies = [ dependencies = [
"futures-channel",
"futures-core", "futures-core",
"futures-io",
"futures-macro",
"futures-sink",
"futures-task", "futures-task",
"memchr",
"pin-project-lite", "pin-project-lite",
"pin-utils", "pin-utils",
"slab", "slab",
@@ -574,8 +661,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"js-sys",
"libc", "libc",
"wasi", "wasi",
"wasm-bindgen",
] ]
[[package]] [[package]]
@@ -585,9 +674,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"js-sys",
"libc", "libc",
"r-efi", "r-efi",
"wasip2", "wasip2",
"wasm-bindgen",
] ]
[[package]] [[package]]
@@ -607,7 +698,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-sink", "futures-sink",
"futures-util", "futures-util",
"http", "http 0.2.12",
"indexmap", "indexmap",
"slab", "slab",
"tokio", "tokio",
@@ -615,6 +706,34 @@ dependencies = [
"tracing", "tracing",
] ]
[[package]]
name = "h3"
version = "0.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10872b55cfb02a821b69dc7cf8dc6a71d6af25eb9a79662bec4a9d016056b3be"
dependencies = [
"bytes",
"fastrand",
"futures-util",
"http 1.4.0",
"pin-project-lite",
"tokio",
]
[[package]]
name = "h3-quinn"
version = "0.0.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8b2e732c8d91a74731663ac8479ab505042fbf547b9a207213ab7fbcbfc4f8b4"
dependencies = [
"bytes",
"futures",
"h3",
"quinn",
"tokio",
"tokio-util",
]
[[package]] [[package]]
name = "hashbrown" name = "hashbrown"
version = "0.16.1" version = "0.16.1"
@@ -648,12 +767,12 @@ dependencies = [
"futures-io", "futures-io",
"futures-util", "futures-util",
"h2", "h2",
"http", "http 0.2.12",
"idna", "idna",
"ipnet", "ipnet",
"once_cell", "once_cell",
"rand 0.8.5", "rand 0.8.5",
"rustls", "rustls 0.21.12",
"rustls-native-certs 0.6.3", "rustls-native-certs 0.6.3",
"rustls-pemfile 1.0.4", "rustls-pemfile 1.0.4",
"thiserror 1.0.69", "thiserror 1.0.69",
@@ -679,7 +798,7 @@ dependencies = [
"parking_lot", "parking_lot",
"rand 0.8.5", "rand 0.8.5",
"resolv-conf", "resolv-conf",
"rustls", "rustls 0.21.12",
"rustls-native-certs 0.6.3", "rustls-native-certs 0.6.3",
"smallvec", "smallvec",
"thiserror 1.0.69", "thiserror 1.0.69",
@@ -708,6 +827,16 @@ dependencies = [
"itoa", "itoa",
] ]
[[package]]
name = "http"
version = "1.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a"
dependencies = [
"bytes",
"itoa",
]
[[package]] [[package]]
name = "http-body" name = "http-body"
version = "0.4.6" version = "0.4.6"
@@ -715,7 +844,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
dependencies = [ dependencies = [
"bytes", "bytes",
"http", "http 0.2.12",
"pin-project-lite", "pin-project-lite",
] ]
@@ -742,7 +871,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-util", "futures-util",
"h2", "h2",
"http", "http 0.2.12",
"http-body", "http-body",
"httparse", "httparse",
"httpdate", "httpdate",
@@ -762,9 +891,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
dependencies = [ dependencies = [
"futures-util", "futures-util",
"http", "http 0.2.12",
"hyper", "hyper",
"rustls", "rustls 0.21.12",
"tokio", "tokio",
"tokio-rustls", "tokio-rustls",
] ]
@@ -930,6 +1059,9 @@ name = "ipnet"
version = "2.11.0" version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
dependencies = [
"serde",
]
[[package]] [[package]]
name = "ipnetwork" name = "ipnetwork"
@@ -952,6 +1084,28 @@ version = "1.0.17"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
[[package]]
name = "jni"
version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
dependencies = [
"cesu8",
"cfg-if",
"combine",
"jni-sys",
"log",
"thiserror 1.0.69",
"walkdir",
"windows-sys 0.45.0",
]
[[package]]
name = "jni-sys"
version = "0.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
[[package]] [[package]]
name = "jobserver" name = "jobserver"
version = "0.1.34" version = "0.1.34"
@@ -984,6 +1138,12 @@ version = "0.2.180"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc"
[[package]]
name = "libm"
version = "0.2.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de"
[[package]] [[package]]
name = "linked-hash-map" name = "linked-hash-map"
version = "0.5.6" version = "0.5.6"
@@ -1026,6 +1186,12 @@ dependencies = [
"linked-hash-map", "linked-hash-map",
] ]
[[package]]
name = "lru-slab"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
[[package]] [[package]]
name = "matchers" name = "matchers"
version = "0.2.0" version = "0.2.0"
@@ -1108,10 +1274,10 @@ dependencies = [
"libc", "libc",
"log", "log",
"openssl", "openssl",
"openssl-probe", "openssl-probe 0.1.6",
"openssl-sys", "openssl-sys",
"schannel", "schannel",
"security-framework", "security-framework 2.11.1",
"security-framework-sys", "security-framework-sys",
"tempfile", "tempfile",
] ]
@@ -1240,6 +1406,12 @@ version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
[[package]]
name = "openssl-probe"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391"
[[package]] [[package]]
name = "openssl-sys" name = "openssl-sys"
version = "0.9.111" version = "0.9.111"
@@ -1482,6 +1654,64 @@ dependencies = [
"unicode-ident", "unicode-ident",
] ]
[[package]]
name = "quinn"
version = "0.11.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
dependencies = [
"bytes",
"cfg_aliases",
"futures-io",
"pin-project-lite",
"quinn-proto",
"quinn-udp",
"rustc-hash",
"rustls 0.23.36",
"socket2 0.6.1",
"thiserror 2.0.17",
"tokio",
"tracing",
"web-time",
]
[[package]]
name = "quinn-proto"
version = "0.11.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31"
dependencies = [
"bytes",
"fastbloom",
"getrandom 0.3.4",
"lru-slab",
"rand 0.9.2",
"ring",
"rustc-hash",
"rustls 0.23.36",
"rustls-pki-types",
"rustls-platform-verifier",
"slab",
"thiserror 2.0.17",
"tinyvec",
"tracing",
"web-time",
]
[[package]]
name = "quinn-udp"
version = "0.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.1",
"tracing",
"windows-sys 0.60.2",
]
[[package]] [[package]]
name = "quote" name = "quote"
version = "1.0.43" version = "1.0.43"
@@ -1606,7 +1836,7 @@ dependencies = [
"futures-core", "futures-core",
"futures-util", "futures-util",
"h2", "h2",
"http", "http 0.2.12",
"http-body", "http-body",
"hyper", "hyper",
"hyper-rustls", "hyper-rustls",
@@ -1619,7 +1849,7 @@ dependencies = [
"once_cell", "once_cell",
"percent-encoding", "percent-encoding",
"pin-project-lite", "pin-project-lite",
"rustls", "rustls 0.21.12",
"rustls-pemfile 1.0.4", "rustls-pemfile 1.0.4",
"serde", "serde",
"serde_json", "serde_json",
@@ -1635,7 +1865,7 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
"wasm-bindgen-futures", "wasm-bindgen-futures",
"web-sys", "web-sys",
"webpki-roots", "webpki-roots 0.25.4",
"winreg", "winreg",
] ]
@@ -1659,6 +1889,12 @@ dependencies = [
"windows-sys 0.52.0", "windows-sys 0.52.0",
] ]
[[package]]
name = "rustc-hash"
version = "2.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
[[package]] [[package]]
name = "rusticata-macros" name = "rusticata-macros"
version = "4.1.0" version = "4.1.0"
@@ -1689,20 +1925,34 @@ checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
dependencies = [ dependencies = [
"log", "log",
"ring", "ring",
"rustls-webpki", "rustls-webpki 0.101.7",
"sct", "sct",
] ]
[[package]]
name = "rustls"
version = "0.23.36"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b"
dependencies = [
"once_cell",
"ring",
"rustls-pki-types",
"rustls-webpki 0.103.9",
"subtle",
"zeroize",
]
[[package]] [[package]]
name = "rustls-native-certs" name = "rustls-native-certs"
version = "0.6.3" version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00" checksum = "a9aace74cb666635c918e9c12bc0d348266037aa8eb599b5cba565709a8dff00"
dependencies = [ dependencies = [
"openssl-probe", "openssl-probe 0.1.6",
"rustls-pemfile 1.0.4", "rustls-pemfile 1.0.4",
"schannel", "schannel",
"security-framework", "security-framework 2.11.1",
] ]
[[package]] [[package]]
@@ -1711,11 +1961,23 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5"
dependencies = [ dependencies = [
"openssl-probe", "openssl-probe 0.1.6",
"rustls-pemfile 2.2.0", "rustls-pemfile 2.2.0",
"rustls-pki-types", "rustls-pki-types",
"schannel", "schannel",
"security-framework", "security-framework 2.11.1",
]
[[package]]
name = "rustls-native-certs"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63"
dependencies = [
"openssl-probe 0.2.0",
"rustls-pki-types",
"schannel",
"security-framework 3.5.1",
] ]
[[package]] [[package]]
@@ -1742,9 +2004,37 @@ version = "1.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282" checksum = "21e6f2ab2928ca4291b86736a8bd920a277a399bba1589409d72154ff87c1282"
dependencies = [ dependencies = [
"web-time",
"zeroize", "zeroize",
] ]
[[package]]
name = "rustls-platform-verifier"
version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784"
dependencies = [
"core-foundation 0.10.1",
"core-foundation-sys",
"jni",
"log",
"once_cell",
"rustls 0.23.36",
"rustls-native-certs 0.8.3",
"rustls-platform-verifier-android",
"rustls-webpki 0.103.9",
"security-framework 3.5.1",
"security-framework-sys",
"webpki-root-certs",
"windows-sys 0.61.2",
]
[[package]]
name = "rustls-platform-verifier-android"
version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
[[package]] [[package]]
name = "rustls-webpki" name = "rustls-webpki"
version = "0.101.7" version = "0.101.7"
@@ -1755,6 +2045,17 @@ dependencies = [
"untrusted", "untrusted",
] ]
[[package]]
name = "rustls-webpki"
version = "0.103.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
dependencies = [
"ring",
"rustls-pki-types",
"untrusted",
]
[[package]] [[package]]
name = "rustversion" name = "rustversion"
version = "1.0.22" version = "1.0.22"
@@ -1767,6 +2068,15 @@ version = "1.0.22"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
[[package]]
name = "same-file"
version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502"
dependencies = [
"winapi-util",
]
[[package]] [[package]]
name = "schannel" name = "schannel"
version = "0.1.28" version = "0.1.28"
@@ -1799,7 +2109,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02"
dependencies = [ dependencies = [
"bitflags 2.10.0", "bitflags 2.10.0",
"core-foundation", "core-foundation 0.9.4",
"core-foundation-sys",
"libc",
"security-framework-sys",
]
[[package]]
name = "security-framework"
version = "3.5.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef"
dependencies = [
"bitflags 2.10.0",
"core-foundation 0.10.1",
"core-foundation-sys", "core-foundation-sys",
"libc", "libc",
"security-framework-sys", "security-framework-sys",
@@ -1907,12 +2230,27 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
[[package]]
name = "signal-hook-registry"
version = "1.4.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad"
dependencies = [
"libc",
]
[[package]] [[package]]
name = "simd-adler32" name = "simd-adler32"
version = "0.3.8" version = "0.3.8"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2"
[[package]]
name = "siphasher"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56199f7ddabf13fe5074ce809e7d3f42b42ae711800501b5b16ea82ad029c39d"
[[package]] [[package]]
name = "slab" name = "slab"
version = "0.4.11" version = "0.4.11"
@@ -2033,7 +2371,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
dependencies = [ dependencies = [
"bitflags 1.3.2", "bitflags 1.3.2",
"core-foundation", "core-foundation 0.9.4",
"system-configuration-sys", "system-configuration-sys",
] ]
@@ -2175,6 +2513,7 @@ dependencies = [
"libc", "libc",
"mio", "mio",
"pin-project-lite", "pin-project-lite",
"signal-hook-registry",
"socket2 0.6.1", "socket2 0.6.1",
"tokio-macros", "tokio-macros",
"windows-sys 0.61.2", "windows-sys 0.61.2",
@@ -2207,7 +2546,7 @@ version = "0.24.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
dependencies = [ dependencies = [
"rustls", "rustls 0.21.12",
"tokio", "tokio",
] ]
@@ -2248,6 +2587,7 @@ version = "0.1.44"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100"
dependencies = [ dependencies = [
"log",
"pin-project-lite", "pin-project-lite",
"tracing-attributes", "tracing-attributes",
"tracing-core", "tracing-core",
@@ -2394,6 +2734,16 @@ version = "0.9.5"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a"
[[package]]
name = "walkdir"
version = "2.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b"
dependencies = [
"same-file",
"winapi-util",
]
[[package]] [[package]]
name = "want" name = "want"
version = "0.3.1" version = "0.3.1"
@@ -2487,12 +2837,40 @@ dependencies = [
"wasm-bindgen", "wasm-bindgen",
] ]
[[package]]
name = "web-time"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb"
dependencies = [
"js-sys",
"wasm-bindgen",
]
[[package]]
name = "webpki-root-certs"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc"
dependencies = [
"rustls-pki-types",
]
[[package]] [[package]]
name = "webpki-roots" name = "webpki-roots"
version = "0.25.4" version = "0.25.4"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
[[package]]
name = "webpki-roots"
version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12bed680863276c63889429bfd6cab3b99943659923822de1c8a39c49e4d722c"
dependencies = [
"rustls-pki-types",
]
[[package]] [[package]]
name = "widestring" name = "widestring"
version = "1.2.1" version = "1.2.1"
@@ -2515,6 +2893,15 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
[[package]]
name = "winapi-util"
version = "0.1.11"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22"
dependencies = [
"windows-sys 0.61.2",
]
[[package]] [[package]]
name = "winapi-x86_64-pc-windows-gnu" name = "winapi-x86_64-pc-windows-gnu"
version = "0.4.0" version = "0.4.0"
@@ -2527,6 +2914,15 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets 0.42.2",
]
[[package]] [[package]]
name = "windows-sys" name = "windows-sys"
version = "0.48.0" version = "0.48.0"
@@ -2563,6 +2959,21 @@ dependencies = [
"windows-link", "windows-link",
] ]
[[package]]
name = "windows-targets"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
dependencies = [
"windows_aarch64_gnullvm 0.42.2",
"windows_aarch64_msvc 0.42.2",
"windows_i686_gnu 0.42.2",
"windows_i686_msvc 0.42.2",
"windows_x86_64_gnu 0.42.2",
"windows_x86_64_gnullvm 0.42.2",
"windows_x86_64_msvc 0.42.2",
]
[[package]] [[package]]
name = "windows-targets" name = "windows-targets"
version = "0.48.5" version = "0.48.5"
@@ -2611,6 +3022,12 @@ dependencies = [
"windows_x86_64_msvc 0.53.1", "windows_x86_64_msvc 0.53.1",
] ]
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
[[package]] [[package]]
name = "windows_aarch64_gnullvm" name = "windows_aarch64_gnullvm"
version = "0.48.5" version = "0.48.5"
@@ -2629,6 +3046,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
[[package]]
name = "windows_aarch64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
[[package]] [[package]]
name = "windows_aarch64_msvc" name = "windows_aarch64_msvc"
version = "0.48.5" version = "0.48.5"
@@ -2647,6 +3070,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
[[package]]
name = "windows_i686_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
[[package]] [[package]]
name = "windows_i686_gnu" name = "windows_i686_gnu"
version = "0.48.5" version = "0.48.5"
@@ -2677,6 +3106,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
[[package]]
name = "windows_i686_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
[[package]] [[package]]
name = "windows_i686_msvc" name = "windows_i686_msvc"
version = "0.48.5" version = "0.48.5"
@@ -2695,6 +3130,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
[[package]]
name = "windows_x86_64_gnu"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
[[package]] [[package]]
name = "windows_x86_64_gnu" name = "windows_x86_64_gnu"
version = "0.48.5" version = "0.48.5"
@@ -2713,6 +3154,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
[[package]] [[package]]
name = "windows_x86_64_gnullvm" name = "windows_x86_64_gnullvm"
version = "0.48.5" version = "0.48.5"
@@ -2731,6 +3178,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
[[package]]
name = "windows_x86_64_msvc"
version = "0.42.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
[[package]] [[package]]
name = "windows_x86_64_msvc" name = "windows_x86_64_msvc"
version = "0.48.5" version = "0.48.5"
@@ -2782,17 +3235,19 @@ dependencies = [
[[package]] [[package]]
name = "wtfnet-cli" name = "wtfnet-cli"
version = "0.1.0" version = "0.4.0"
dependencies = [ dependencies = [
"clap", "clap",
"serde", "serde",
"serde_json", "serde_json",
"time",
"tokio", "tokio",
"wtfnet-calc", "wtfnet-calc",
"wtfnet-core", "wtfnet-core",
"wtfnet-diag", "wtfnet-diag",
"wtfnet-discover", "wtfnet-discover",
"wtfnet-dns", "wtfnet-dns",
"wtfnet-dnsleak",
"wtfnet-geoip", "wtfnet-geoip",
"wtfnet-http", "wtfnet-http",
"wtfnet-platform", "wtfnet-platform",
@@ -2830,6 +3285,7 @@ dependencies = [
name = "wtfnet-discover" name = "wtfnet-discover"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"hickory-proto",
"mdns-sd", "mdns-sd",
"serde", "serde",
"thiserror 2.0.17", "thiserror 2.0.17",
@@ -2844,7 +3300,7 @@ dependencies = [
"hickory-resolver", "hickory-resolver",
"pnet", "pnet",
"reqwest", "reqwest",
"rustls", "rustls 0.21.12",
"rustls-native-certs 0.6.3", "rustls-native-certs 0.6.3",
"serde", "serde",
"thiserror 2.0.17", "thiserror 2.0.17",
@@ -2855,6 +3311,20 @@ dependencies = [
"url", "url",
] ]
[[package]]
name = "wtfnet-dnsleak"
version = "0.1.0"
dependencies = [
"hickory-proto",
"ipnet",
"pnet",
"serde",
"thiserror 2.0.17",
"tokio",
"tracing",
"wtfnet-platform",
]
[[package]] [[package]]
name = "wtfnet-geoip" name = "wtfnet-geoip"
version = "0.1.0" version = "0.1.0"
@@ -2868,12 +3338,22 @@ dependencies = [
name = "wtfnet-http" name = "wtfnet-http"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"bytes",
"h3",
"h3-quinn",
"http 1.4.0",
"quinn",
"reqwest", "reqwest",
"rustls 0.21.12",
"rustls-native-certs 0.6.3",
"serde", "serde",
"thiserror 2.0.17", "thiserror 2.0.17",
"tokio", "tokio",
"tokio-rustls",
"tokio-socks",
"tracing", "tracing",
"url", "url",
"webpki-roots 1.0.5",
] ]
[[package]] [[package]]
@@ -2939,7 +3419,7 @@ dependencies = [
name = "wtfnet-tls" name = "wtfnet-tls"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"rustls", "rustls 0.21.12",
"rustls-native-certs 0.6.3", "rustls-native-certs 0.6.3",
"serde", "serde",
"thiserror 2.0.17", "thiserror 2.0.17",

View File

@@ -10,6 +10,7 @@ members = [
"crates/wtfnet-geoip", "crates/wtfnet-geoip",
"crates/wtfnet-probe", "crates/wtfnet-probe",
"crates/wtfnet-dns", "crates/wtfnet-dns",
"crates/wtfnet-dnsleak",
"crates/wtfnet-http", "crates/wtfnet-http",
"crates/wtfnet-tls", "crates/wtfnet-tls",
"crates/wtfnet-discover", "crates/wtfnet-discover",

View File

@@ -1,18 +0,0 @@
BUILD_DIR ?= build
.PHONY: build configure package install clean
configure:
cmake -S . -B $(BUILD_DIR)
build: configure
cmake --build $(BUILD_DIR)
package: build
cmake --build $(BUILD_DIR) --target package
install: build
cmake --build $(BUILD_DIR) --target install
clean:
cmake -E rm -rf $(BUILD_DIR)

View File

@@ -7,8 +7,10 @@ WTFnet is a pure CLI toolbox for diagnosing network problems on Linux and Window
- Ports, neighbors, and trusted root certificates. - Ports, neighbors, and trusted root certificates.
- Probing: ping, tcping, traceroute (best-effort). - Probing: ping, tcping, traceroute (best-effort).
- DNS: query/detect/watch with GeoIP, DoT/DoH, and SOCKS5 support. - DNS: query/detect/watch with GeoIP, DoT/DoH, and SOCKS5 support.
- DNS leak detection with policy profiles and privacy modes (best-effort).
- GeoIP offline lookup via GeoLite2 Country/ASN. - GeoIP offline lookup via GeoLite2 Country/ASN.
- Subnet calculator: subnet/contains/overlap/summarize. - Subnet calculator: subnet/contains/overlap/summarize.
- Discover: mDNS/SSDP plus LLMNR/NBNS.
## Quickstart ## Quickstart
```bash ```bash
@@ -34,20 +36,25 @@ wtfn neigh list --ipv6
wtfn geoip lookup 8.8.8.8 wtfn geoip lookup 8.8.8.8
wtfn probe ping example.com --count 4 wtfn probe ping example.com --count 4
wtfn probe tcping example.com:443 --count 4 wtfn probe tcping example.com:443 --count 4
wtfn probe tcping example.com:443 --socks5 socks5://127.0.0.1:9909 wtfn probe tcping example.com:443 --socks5 socks5://127.0.0.1:10808
wtfn probe trace example.com:443 --max-hops 20 wtfn probe trace example.com:443 --max-hops 20
# DNS # DNS
wtfn dns query example.com A wtfn dns query example.com A
wtfn dns query example.com AAAA --server 1.1.1.1 wtfn dns query example.com AAAA --server 1.1.1.1
wtfn dns query example.com A --transport doh --server 1.1.1.1 --tls-name cloudflare-dns.com wtfn dns query example.com A --transport doh --server 1.1.1.1 --tls-name cloudflare-dns.com
wtfn dns query example.com A --transport dot --server 1.1.1.1 --tls-name cloudflare-dns.com --socks5 socks5://127.0.0.1:9909 wtfn dns query example.com A --transport dot --server 1.1.1.1 --tls-name cloudflare-dns.com --socks5 socks5://127.0.0.1:10808
wtfn dns detect example.com --transport doh --servers 1.1.1.1 --tls-name cloudflare-dns.com wtfn dns detect example.com --transport doh --servers 1.1.1.1 --tls-name cloudflare-dns.com
wtfn dns watch --duration 10s --filter example.com wtfn dns watch --duration 10s --filter example.com
wtfn dns watch --follow
wtfn dns leak status
wtfn dns leak watch --duration 10s --profile proxy-stub
wtfn dns leak watch --follow
wtfn dns leak report report.json
# TLS # TLS
wtfn tls handshake example.com:443 wtfn tls handshake example.com:443
wtfn tls handshake example.com:443 --socks5 socks5://127.0.0.1:9909 wtfn tls handshake example.com:443 --socks5 socks5://127.0.0.1:10808
wtfn tls cert example.com:443 wtfn tls cert example.com:443
wtfn tls verify example.com:443 wtfn tls verify example.com:443
wtfn tls alpn example.com:443 --alpn h2,http/1.1 wtfn tls alpn example.com:443 --alpn h2,http/1.1
@@ -55,6 +62,8 @@ wtfn tls alpn example.com:443 --alpn h2,http/1.1
# Discover # Discover
wtfn discover mdns --duration 3s wtfn discover mdns --duration 3s
wtfn discover ssdp --duration 3s wtfn discover ssdp --duration 3s
wtfn discover llmnr --duration 3s
wtfn discover nbns --duration 3s
# Diag # Diag
wtfn diag --out report.json --json wtfn diag --out report.json --json
@@ -66,32 +75,8 @@ wtfn calc overlap 10.0.0.0/24 10.0.1.0/24
wtfn calc summarize 10.0.0.0/24 10.0.1.0/24 wtfn calc summarize 10.0.0.0/24 10.0.1.0/24
``` ```
## Supported flags ## Command reference
Global flags: See `docs/COMMANDS.md` for the full list of commands and flags (with descriptions).
- `--json` / `--pretty`
- `--no-color` / `--quiet`
- `-v` / `-vv` / `--verbose`
- `--log-level <error|warn|info|debug|trace>`
- `--log-format <text|json>`
- `--log-file <path>`
- `NETTOOL_LOG_FILTER` or `RUST_LOG` can override log filters (ex: `maxminddb::decoder=debug`)
Command flags (implemented):
- `sys ip`: `--all`, `--iface <name>`
- `sys route`: `--ipv4`, `--ipv6`, `--to <ip>`
- `ports listen`: `--tcp`, `--udp`, `--port <n>`
- `neigh list`: `--ipv4`, `--ipv6`, `--iface <name>`
- `probe ping`: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping`: `--count <n>`, `--timeout-ms <n>`, `--socks5 <url>`, `--prefer-ipv4`, `--no-geoip`
- `probe trace`: `--max-hops <n>`, `--per-hop <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--rdns`, `--no-geoip`
- `dns query`: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect`: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch`: `--duration <Ns|Nms>`, `--iface <name>`, `--filter <pattern>`
- `http head|get`: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--geoip`, `--socks5 <url>`
- `tls handshake|cert|verify|alpn`: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`
- `discover mdns`: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp`: `--duration <Ns|Nms>`
- `diag`: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## GeoIP data files ## GeoIP data files
GeoLite2 mmdb files should live in `data/`. GeoLite2 mmdb files should live in `data/`.
@@ -100,16 +85,37 @@ Lookup order:
2) `data/` next to the CLI binary 2) `data/` next to the CLI binary
3) `data/` in the current working directory 3) `data/` in the current working directory
## Build and package ## Build
### Only build binary
```bash ```bash
cmake -S . -B build cargo build --release
cmake --build build
cmake --build build --target package
``` ```
Install: ### Build and package
1. Prepare GeoLite2 databases (required `GeoLite2-ASN.mmdb` and `GeoLite2-Country.mmdb` ):
```bash ```bash
cmake --build build --target install # Place your mmdb files under data/
mkdir data
```
> **Note**: This step requires `python3` and `just`.
2. Use `just` to run build and package command (Note: you don't need bash environment on windows):
```bash
# You will find package under dist/, zip file on windows, tar.gz file on linux
just release
```
## HTTP/3 (experimental)
HTTP/3 support is feature-gated and best-effort. Enable it only when you want to test QUIC
connectivity.
To enable locally for testing:
```bash
cargo run -p wtfnet-cli --features wtfnet-http/http3 -- http head https://cloudflare-quic.com --http3
``` ```
## Roadmap ## Roadmap
@@ -135,8 +141,13 @@ cmake --build build --target install
- TLS extras: OCSP stapling indicator, richer cert parsing - TLS extras: OCSP stapling indicator, richer cert parsing
- ports conns improvements (top talkers / summary) - ports conns improvements (top talkers / summary)
- better baseline/diff for system roots - better baseline/diff for system roots
- optional HTTP/3 (feature-gated)
- optional LLMNR/NBNS discovery - optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (current requirements)
- dns leak detection (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage ## Current stage
Implemented: Implemented:
@@ -146,14 +157,16 @@ Implemented:
- http head/get with timing and GeoIP. - http head/get with timing and GeoIP.
- tls handshake/verify/cert/alpn. - tls handshake/verify/cert/alpn.
- DoT/DoH + SOCKS5 proxy support. - DoT/DoH + SOCKS5 proxy support.
- discover mdns/ssdp. - discover mdns/ssdp/llmnr/nbns.
- dns leak detection (status/watch/report).
- diag report + bundle. - diag report + bundle.
- calc subcrate with subnet/contains/overlap/summarize. - calc subcrate with subnet/contains/overlap/summarize.
- CMake/Makefile build + package + install targets. - CMake/Makefile build + package + install targets.
- Basic unit tests for calc and TLS parsing. - Basic unit tests for calc and TLS parsing.
In progress: In progress:
- none. - dns leak: DoH heuristic classification (optional).
- dns leak: Leak-D mismatch correlation (optional).
See `docs/implementation_status.md` for a design-vs-implementation view. See `docs/implementation_status.md` for a design-vs-implementation view.

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "wtfnet-cli" name = "wtfnet-cli"
version = "0.1.0" version = "0.4.0"
edition = "2024" edition = "2024"
[[bin]] [[bin]]
@@ -11,13 +11,15 @@ path = "src/main.rs"
clap = { version = "4", features = ["derive"] } clap = { version = "4", features = ["derive"] }
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
serde_json = "1" serde_json = "1"
tokio = { version = "1", features = ["macros", "rt-multi-thread"] } time = { version = "0.3", features = ["formatting", "parsing"] }
tokio = { version = "1", features = ["macros", "rt-multi-thread", "signal"] }
wtfnet-core = { path = "../wtfnet-core" } wtfnet-core = { path = "../wtfnet-core" }
wtfnet-calc = { path = "../wtfnet-calc" } wtfnet-calc = { path = "../wtfnet-calc" }
wtfnet-geoip = { path = "../wtfnet-geoip" } wtfnet-geoip = { path = "../wtfnet-geoip" }
wtfnet-platform = { path = "../wtfnet-platform" } wtfnet-platform = { path = "../wtfnet-platform" }
wtfnet-probe = { path = "../wtfnet-probe" } wtfnet-probe = { path = "../wtfnet-probe" }
wtfnet-dns = { path = "../wtfnet-dns", features = ["pcap"] } wtfnet-dns = { path = "../wtfnet-dns", features = ["pcap"] }
wtfnet-dnsleak = { path = "../wtfnet-dnsleak", features = ["pcap"] }
wtfnet-http = { path = "../wtfnet-http" } wtfnet-http = { path = "../wtfnet-http" }
wtfnet-tls = { path = "../wtfnet-tls" } wtfnet-tls = { path = "../wtfnet-tls" }
wtfnet-discover = { path = "../wtfnet-discover" } wtfnet-discover = { path = "../wtfnet-discover" }

File diff suppressed because it is too large Load Diff

View File

@@ -4,6 +4,7 @@ version = "0.1.0"
edition = "2024" edition = "2024"
[dependencies] [dependencies]
hickory-proto = "0.24"
mdns-sd = "0.8" mdns-sd = "0.8"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
thiserror = "2" thiserror = "2"

View File

@@ -1,7 +1,9 @@
use hickory_proto::op::{Message, MessageType, Query};
use hickory_proto::rr::{Name, RData, RecordType};
use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo}; use mdns_sd::{ServiceDaemon, ServiceEvent, ServiceInfo};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::net::{SocketAddr, UdpSocket}; use std::net::{IpAddr, SocketAddr, UdpSocket};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use thiserror::Error; use thiserror::Error;
@@ -24,6 +26,17 @@ pub struct SsdpOptions {
pub duration_ms: u64, pub duration_ms: u64,
} }
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrOptions {
pub duration_ms: u64,
pub name: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsOptions {
pub duration_ms: u64,
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct MdnsService { pub struct MdnsService {
pub service_type: String, pub service_type: String,
@@ -56,6 +69,34 @@ pub struct SsdpReport {
pub services: Vec<SsdpService>, pub services: Vec<SsdpService>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrAnswer {
pub from: String,
pub name: String,
pub record_type: String,
pub data: String,
pub ttl: u32,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LlmnrReport {
pub duration_ms: u64,
pub name: String,
pub answers: Vec<LlmnrAnswer>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsNodeStatus {
pub from: String,
pub names: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct NbnsReport {
pub duration_ms: u64,
pub nodes: Vec<NbnsNodeStatus>,
}
pub async fn mdns_discover(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> { pub async fn mdns_discover(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || mdns_discover_blocking(options)) tokio::task::spawn_blocking(move || mdns_discover_blocking(options))
.await .await
@@ -68,6 +109,18 @@ pub async fn ssdp_discover(options: SsdpOptions) -> Result<SsdpReport, DiscoverE
.map_err(|err| DiscoverError::Io(err.to_string()))? .map_err(|err| DiscoverError::Io(err.to_string()))?
} }
pub async fn llmnr_discover(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
tokio::task::spawn_blocking(move || llmnr_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
pub async fn nbns_discover(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
tokio::task::spawn_blocking(move || nbns_discover_blocking(options))
.await
.map_err(|err| DiscoverError::Io(err.to_string()))?
}
fn mdns_discover_blocking(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> { fn mdns_discover_blocking(options: MdnsOptions) -> Result<MdnsReport, DiscoverError> {
let daemon = ServiceDaemon::new().map_err(|err| DiscoverError::Mdns(err.to_string()))?; let daemon = ServiceDaemon::new().map_err(|err| DiscoverError::Mdns(err.to_string()))?;
let mut service_types = BTreeSet::new(); let mut service_types = BTreeSet::new();
@@ -174,6 +227,94 @@ fn ssdp_discover_blocking(options: SsdpOptions) -> Result<SsdpReport, DiscoverEr
}) })
} }
fn llmnr_discover_blocking(options: LlmnrOptions) -> Result<LlmnrReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let name = options
.name
.clone()
.filter(|value| !value.trim().is_empty())
.unwrap_or_else(|| "wpad".to_string());
let query = build_llmnr_query(&name)
.map_err(|err| DiscoverError::Io(format!("llmnr build query: {err}")))?;
let target = "224.0.0.252:5355";
let _ = socket.send_to(&query, target);
let mut answers = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(entries) = parse_llmnr_response(&buf[..len], from.ip()) {
for entry in entries {
let key = format!(
"{}|{}|{}|{}",
entry.from, entry.name, entry.record_type, entry.data
);
if seen.insert(key) {
answers.push(entry);
}
}
}
}
Err(_) => continue,
}
}
Ok(LlmnrReport {
duration_ms: options.duration_ms,
name,
answers,
})
}
fn nbns_discover_blocking(options: NbnsOptions) -> Result<NbnsReport, DiscoverError> {
let socket = UdpSocket::bind("0.0.0.0:0").map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_broadcast(true)
.map_err(|err| DiscoverError::Io(err.to_string()))?;
socket
.set_read_timeout(Some(Duration::from_millis(200)))
.map_err(|err| DiscoverError::Io(err.to_string()))?;
let query = build_nbns_node_status_query();
let _ = socket.send_to(&query, "255.255.255.255:137");
let mut nodes = Vec::new();
let mut seen = BTreeSet::new();
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut buf = [0u8; 2048];
while Instant::now() < deadline {
match socket.recv_from(&mut buf) {
Ok((len, from)) => {
if let Some(names) = parse_nbns_node_status(&buf[..len]) {
let key = format!("{}|{}", from.ip(), names.join(","));
if seen.insert(key) {
nodes.push(NbnsNodeStatus {
from: from.ip().to_string(),
names,
});
}
}
}
Err(_) => continue,
}
}
Ok(NbnsReport {
duration_ms: options.duration_ms,
nodes,
})
}
fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> { fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
let mut st = None; let mut st = None;
let mut usn = None; let mut usn = None;
@@ -207,3 +348,183 @@ fn parse_ssdp_response(payload: &str, from: SocketAddr) -> Option<SsdpService> {
server, server,
}) })
} }
fn build_llmnr_query(name: &str) -> Result<Vec<u8>, String> {
let name = Name::from_ascii(name).map_err(|err| format!("invalid name: {err}"))?;
let mut message = Message::new();
message
.set_id(0)
.set_message_type(MessageType::Query)
.set_recursion_desired(false)
.add_query(Query::query(name.clone(), RecordType::A))
.add_query(Query::query(name, RecordType::AAAA));
message.to_vec().map_err(|err| err.to_string())
}
fn parse_llmnr_response(payload: &[u8], from: IpAddr) -> Option<Vec<LlmnrAnswer>> {
let message = Message::from_vec(payload).ok()?;
if message.message_type() != MessageType::Response {
return None;
}
let mut answers = Vec::new();
for record in message.answers() {
let record_type = record.record_type();
let data = match record.data() {
Some(RData::A(addr)) => addr.to_string(),
Some(RData::AAAA(addr)) => addr.to_string(),
_ => continue,
};
answers.push(LlmnrAnswer {
from: from.to_string(),
name: record.name().to_string(),
record_type: record_type.to_string(),
data,
ttl: record.ttl(),
});
}
if answers.is_empty() {
None
} else {
Some(answers)
}
}
fn build_nbns_node_status_query() -> Vec<u8> {
let mut buf = Vec::with_capacity(50);
let id = nbns_query_id();
buf.extend_from_slice(&id.to_be_bytes());
buf.extend_from_slice(&0u16.to_be_bytes()); // flags
buf.extend_from_slice(&1u16.to_be_bytes()); // qdcount
buf.extend_from_slice(&0u16.to_be_bytes()); // ancount
buf.extend_from_slice(&0u16.to_be_bytes()); // nscount
buf.extend_from_slice(&0u16.to_be_bytes()); // arcount
buf.extend_from_slice(&nbns_encode_name("*", 0x00));
buf.extend_from_slice(&0x0021u16.to_be_bytes()); // NBSTAT
buf.extend_from_slice(&0x0001u16.to_be_bytes()); // IN
buf
}
fn nbns_query_id() -> u16 {
let nanos = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.subsec_nanos();
(nanos & 0xffff) as u16
}
fn nbns_encode_name(name: &str, suffix: u8) -> Vec<u8> {
let mut raw = [b' '; 16];
let mut bytes = name.as_bytes().to_vec();
for byte in bytes.iter_mut() {
byte.make_ascii_uppercase();
}
for (idx, byte) in bytes.iter().take(15).enumerate() {
raw[idx] = *byte;
}
raw[15] = suffix;
let mut encoded = Vec::with_capacity(34);
encoded.push(32);
for byte in raw {
let high = ((byte >> 4) & 0x0f) + b'A';
let low = (byte & 0x0f) + b'A';
encoded.push(high);
encoded.push(low);
}
encoded.push(0);
encoded
}
fn parse_nbns_node_status(payload: &[u8]) -> Option<Vec<String>> {
if payload.len() < 12 {
return None;
}
let flags = u16::from_be_bytes([payload[2], payload[3]]);
if flags & 0x8000 == 0 {
return None;
}
let qdcount = u16::from_be_bytes([payload[4], payload[5]]) as usize;
let ancount = u16::from_be_bytes([payload[6], payload[7]]) as usize;
let mut offset = 12;
for _ in 0..qdcount {
offset = skip_dns_name(payload, offset)?;
if offset + 4 > payload.len() {
return None;
}
offset += 4;
}
let mut names = Vec::new();
for _ in 0..ancount {
offset = skip_dns_name(payload, offset)?;
if offset + 10 > payload.len() {
return None;
}
let rr_type = u16::from_be_bytes([payload[offset], payload[offset + 1]]);
let _rr_class = u16::from_be_bytes([payload[offset + 2], payload[offset + 3]]);
let _ttl = u32::from_be_bytes([
payload[offset + 4],
payload[offset + 5],
payload[offset + 6],
payload[offset + 7],
]);
let rdlength = u16::from_be_bytes([payload[offset + 8], payload[offset + 9]]) as usize;
offset += 10;
if offset + rdlength > payload.len() {
return None;
}
if rr_type == 0x0021 && rdlength > 0 {
if let Some(list) = parse_nbns_name_list(&payload[offset..offset + rdlength]) {
names.extend(list);
}
}
offset += rdlength;
}
if names.is_empty() {
None
} else {
Some(names)
}
}
fn parse_nbns_name_list(payload: &[u8]) -> Option<Vec<String>> {
let count = *payload.first()? as usize;
let mut offset = 1;
let mut names = Vec::new();
for _ in 0..count {
if offset + 18 > payload.len() {
return None;
}
let name_bytes = &payload[offset..offset + 15];
let suffix = payload[offset + 15];
let name = String::from_utf8_lossy(name_bytes)
.trim_end()
.to_string();
names.push(format!("{name}<{suffix:02x}>"));
offset += 18;
}
Some(names)
}
fn skip_dns_name(payload: &[u8], mut offset: usize) -> Option<usize> {
if offset >= payload.len() {
return None;
}
loop {
let len = *payload.get(offset)?;
if len & 0xc0 == 0xc0 {
if offset + 1 >= payload.len() {
return None;
}
return Some(offset + 2);
}
if len == 0 {
return Some(offset + 1);
}
offset += 1 + len as usize;
if offset >= payload.len() {
return None;
}
}
}

View File

@@ -0,0 +1,17 @@
[package]
name = "wtfnet-dnsleak"
version = "0.1.0"
edition = "2024"
[dependencies]
hickory-proto = "0.24"
ipnet = { version = "2", features = ["serde"] }
serde = { version = "1", features = ["derive"] }
thiserror = "2"
tokio = { version = "1", features = ["rt"] }
tracing = "0.1"
wtfnet-platform = { path = "../wtfnet-platform" }
pnet = { version = "0.34", optional = true }
[features]
pcap = ["dep:pnet"]

View File

@@ -0,0 +1,60 @@
use crate::report::LeakTransport;
use hickory_proto::op::{Message, MessageType};
use hickory_proto::rr::RData;
use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use wtfnet_platform::FlowProtocol;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ClassifiedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub is_response: bool,
pub answer_ips: Vec<IpAddr>,
}
pub struct ParsedDns {
pub qname: String,
pub qtype: String,
pub rcode: String,
pub is_response: bool,
pub answer_ips: Vec<IpAddr>,
}
pub fn parse_dns_message(payload: &[u8]) -> Option<ParsedDns> {
let message = Message::from_vec(payload).ok()?;
let is_response = message.message_type() == MessageType::Response;
let query = message.queries().first()?;
let qname = query.name().to_utf8();
let qtype = query.query_type().to_string();
let rcode = message.response_code().to_string();
let mut answer_ips = Vec::new();
if is_response {
for record in message.answers() {
if let Some(data) = record.data() {
match data {
RData::A(addr) => answer_ips.push(IpAddr::V4(addr.0)),
RData::AAAA(addr) => answer_ips.push(IpAddr::V6(addr.0)),
_ => {}
}
}
}
}
Some(ParsedDns {
qname,
qtype,
rcode,
is_response,
answer_ips,
})
}

View File

@@ -0,0 +1,222 @@
mod classify;
mod policy;
mod privacy;
mod report;
mod route;
mod rules;
mod sensor;
use crate::classify::ClassifiedEvent;
use crate::sensor::{capture_events, SensorEvent, TcpEvent};
use std::time::Instant;
use thiserror::Error;
use tracing::debug;
use wtfnet_platform::{FlowOwnerProvider, FlowTuple};
pub use crate::policy::{LeakPolicy, LeakPolicyProfile, PolicySummary};
pub use crate::privacy::{apply_privacy, PrivacyMode};
pub use crate::report::{LeakEvent, LeakReport, LeakSummary, LeakTransport, RouteClass, Severity};
pub use crate::sensor::{iface_diagnostics, IfaceDiag};
#[derive(Debug, Error)]
pub enum DnsLeakError {
#[error("not supported: {0}")]
NotSupported(String),
#[error("io error: {0}")]
Io(String),
#[error("policy error: {0}")]
Policy(String),
}
#[derive(Debug, Clone)]
pub struct LeakWatchOptions {
pub duration_ms: u64,
pub iface: Option<String>,
pub policy: LeakPolicy,
pub privacy: PrivacyMode,
pub include_events: bool,
}
pub async fn watch(
options: LeakWatchOptions,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> Result<LeakReport, DnsLeakError> {
debug!(
duration_ms = options.duration_ms,
iface = ?options.iface,
include_events = options.include_events,
"dns leak watch start"
);
let start = Instant::now();
let events = capture_events(&options).await?;
let mut leak_events = Vec::new();
let mut dns_cache: std::collections::HashMap<std::net::IpAddr, DnsCacheEntry> =
std::collections::HashMap::new();
for event in events {
match event {
SensorEvent::Dns(event) => {
let enriched = enrich_event(event, flow_owner).await;
if enriched.is_response {
update_dns_cache(&mut dns_cache, &enriched);
continue;
}
if let Some(decision) = rules::evaluate(&enriched, &options.policy) {
let mut leak_event = report::LeakEvent::from_decision(enriched, decision);
privacy::apply_privacy(&mut leak_event, options.privacy);
leak_events.push(leak_event);
}
}
SensorEvent::Tcp(event) => {
if let Some(leak_event) =
evaluate_mismatch(event, flow_owner, &mut dns_cache, options.privacy).await
{
leak_events.push(leak_event);
}
}
}
}
let summary = LeakSummary::from_events(&leak_events);
let report = LeakReport {
duration_ms: start.elapsed().as_millis() as u64,
policy: options.policy.summary(),
summary,
events: if options.include_events {
leak_events
} else {
Vec::new()
},
};
Ok(report)
}
async fn enrich_event(
event: ClassifiedEvent,
flow_owner: Option<&dyn FlowOwnerProvider>,
) -> report::EnrichedEvent {
let mut enriched = route::enrich_route(event);
if let Some(provider) = flow_owner {
let flow = FlowTuple {
proto: enriched.proto,
src_ip: enriched.src_ip,
src_port: enriched.src_port,
dst_ip: enriched.dst_ip,
dst_port: enriched.dst_port,
};
match provider.owner_of(flow).await {
Ok(result) => {
enriched.owner = result.owner;
enriched.owner_confidence = result.confidence;
enriched.owner_failure = result.failure_reason;
}
Err(err) => {
enriched.owner_failure = Some(err.message);
}
}
}
enriched
}
struct DnsCacheEntry {
qname: String,
route_class: RouteClass,
timestamp_ms: u128,
}
const DNS_CACHE_TTL_MS: u128 = 60_000;
fn update_dns_cache(cache: &mut std::collections::HashMap<std::net::IpAddr, DnsCacheEntry>, event: &report::EnrichedEvent) {
let Some(qname) = event.qname.as_ref() else { return };
let now = event.timestamp_ms;
prune_dns_cache(cache, now);
for ip in event.answer_ips.iter() {
debug!(
"dns leak cache insert ip={} qname={} route={:?}",
ip, qname, event.route_class
);
cache.insert(
*ip,
DnsCacheEntry {
qname: qname.clone(),
route_class: event.route_class,
timestamp_ms: now,
},
);
}
}
fn prune_dns_cache(
cache: &mut std::collections::HashMap<std::net::IpAddr, DnsCacheEntry>,
now_ms: u128,
) {
cache.retain(|_, entry| now_ms.saturating_sub(entry.timestamp_ms) <= DNS_CACHE_TTL_MS);
}
async fn evaluate_mismatch(
event: TcpEvent,
flow_owner: Option<&dyn FlowOwnerProvider>,
cache: &mut std::collections::HashMap<std::net::IpAddr, DnsCacheEntry>,
privacy: PrivacyMode,
) -> Option<LeakEvent> {
prune_dns_cache(cache, event.timestamp_ms);
debug!(
"dns leak tcp syn dst_ip={} dst_port={} cache_size={}",
event.dst_ip,
event.dst_port,
cache.len()
);
let entry = cache.get(&event.dst_ip)?;
let tcp_route = route::route_class_for(event.src_ip, event.dst_ip, event.iface_name.as_deref());
if tcp_route == entry.route_class {
debug!(
"dns leak mismatch skip dst_ip={} tcp_route={:?} dns_route={:?}",
event.dst_ip, tcp_route, entry.route_class
);
return None;
}
let mut enriched = report::EnrichedEvent {
timestamp_ms: event.timestamp_ms,
proto: wtfnet_platform::FlowProtocol::Tcp,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
iface_name: event.iface_name.clone(),
transport: LeakTransport::Unknown,
qname: Some(entry.qname.clone()),
qtype: None,
rcode: None,
is_response: false,
answer_ips: Vec::new(),
route_class: tcp_route,
owner: None,
owner_confidence: wtfnet_platform::FlowOwnerConfidence::None,
owner_failure: None,
};
if let Some(provider) = flow_owner {
let flow = FlowTuple {
proto: wtfnet_platform::FlowProtocol::Tcp,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
};
if let Ok(result) = provider.owner_of(flow).await {
enriched.owner = result.owner;
enriched.owner_confidence = result.confidence;
enriched.owner_failure = result.failure_reason;
}
}
let decision = rules::LeakDecision {
leak_type: report::LeakType::D,
severity: Severity::P2,
policy_rule_id: "LEAK_D_MISMATCH".to_string(),
};
let mut leak_event = report::LeakEvent::from_decision(enriched, decision);
privacy::apply_privacy(&mut leak_event, privacy);
Some(leak_event)
}

View File

@@ -0,0 +1,113 @@
use ipnet::IpNet;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum LeakPolicyProfile {
FullTunnel,
ProxyStub,
Split,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakPolicy {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub loopback_ifaces: Vec<String>,
pub allowed_destinations: Vec<IpNet>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
pub proxy_required_domains: Vec<String>,
pub allowlist_domains: Vec<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct PolicySummary {
pub profile: LeakPolicyProfile,
pub allowed_ifaces: Vec<String>,
pub tunnel_ifaces: Vec<String>,
pub allowed_destinations: Vec<String>,
pub allowed_ports: Vec<u16>,
pub allowed_processes: Vec<String>,
}
impl LeakPolicy {
pub fn from_profile(profile: LeakPolicyProfile, ifaces: &[String]) -> Self {
let loopback_ifaces = detect_loopback_ifaces(ifaces);
let tunnel_ifaces = detect_tunnel_ifaces(ifaces);
let allowed_ifaces = match profile {
LeakPolicyProfile::FullTunnel | LeakPolicyProfile::ProxyStub => {
merge_lists(&loopback_ifaces, &tunnel_ifaces)
}
LeakPolicyProfile::Split => merge_lists(&loopback_ifaces, &tunnel_ifaces),
};
LeakPolicy {
profile,
allowed_ifaces,
tunnel_ifaces,
loopback_ifaces,
allowed_destinations: Vec::new(),
allowed_ports: Vec::new(),
allowed_processes: Vec::new(),
proxy_required_domains: Vec::new(),
allowlist_domains: Vec::new(),
}
}
pub fn summary(&self) -> PolicySummary {
PolicySummary {
profile: self.profile,
allowed_ifaces: self.allowed_ifaces.clone(),
tunnel_ifaces: self.tunnel_ifaces.clone(),
allowed_destinations: self
.allowed_destinations
.iter()
.map(|net| net.to_string())
.collect(),
allowed_ports: self.allowed_ports.clone(),
allowed_processes: self.allowed_processes.clone(),
}
}
}
fn detect_loopback_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name == "lo"
|| name == "lo0"
|| name.contains("loopback")
|| name.contains("localhost")
})
.cloned()
.collect()
}
fn detect_tunnel_ifaces(ifaces: &[String]) -> Vec<String> {
ifaces
.iter()
.filter(|name| {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
})
.cloned()
.collect()
}
fn merge_lists(a: &[String], b: &[String]) -> Vec<String> {
let mut out = Vec::new();
for value in a.iter().chain(b.iter()) {
if !out.iter().any(|entry| entry == value) {
out.push(value.clone());
}
}
out
}

View File

@@ -0,0 +1,35 @@
use crate::report::LeakEvent;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum PrivacyMode {
Full,
Redacted,
Minimal,
}
pub fn apply_privacy(event: &mut LeakEvent, mode: PrivacyMode) {
match mode {
PrivacyMode::Full => {}
PrivacyMode::Redacted => {
if let Some(value) = event.qname.as_ref() {
event.qname = Some(redact_domain(value));
}
}
PrivacyMode::Minimal => {
event.qname = None;
event.qtype = None;
event.rcode = None;
}
}
}
fn redact_domain(value: &str) -> String {
let parts: Vec<&str> = value.split('.').filter(|part| !part.is_empty()).collect();
if parts.len() >= 2 {
format!("{}.{}", parts[parts.len() - 2], parts[parts.len() - 1])
} else {
value.to_string()
}
}

View File

@@ -0,0 +1,194 @@
use crate::policy::PolicySummary;
use serde::{Deserialize, Serialize};
use std::collections::{BTreeMap, HashMap};
use std::net::IpAddr;
use wtfnet_platform::{FlowOwner, FlowOwnerConfidence, FlowProtocol};
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum LeakTransport {
Udp53,
Tcp53,
Dot,
Doh,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "lowercase")]
pub enum LeakType {
A,
B,
C,
D,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum RouteClass {
Loopback,
Tunnel,
Physical,
Unknown,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum Severity {
P0,
P1,
P2,
P3,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct EnrichedEvent {
pub timestamp_ms: u128,
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub is_response: bool,
pub answer_ips: Vec<IpAddr>,
pub route_class: RouteClass,
pub owner: Option<FlowOwner>,
pub owner_confidence: FlowOwnerConfidence,
pub owner_failure: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakEvent {
pub timestamp_ms: u128,
pub transport: LeakTransport,
pub qname: Option<String>,
pub qtype: Option<String>,
pub rcode: Option<String>,
pub iface_name: Option<String>,
pub route_class: RouteClass,
pub dst_ip: String,
pub dst_port: u16,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
pub attribution_confidence: FlowOwnerConfidence,
pub attribution_failure: Option<String>,
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakTypeCount {
pub leak_type: LeakType,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SummaryItem {
pub key: String,
pub count: usize,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakSummary {
pub total: usize,
pub by_type: Vec<LeakTypeCount>,
pub top_processes: Vec<SummaryItem>,
pub top_destinations: Vec<SummaryItem>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LeakReport {
pub duration_ms: u64,
pub policy: PolicySummary,
pub summary: LeakSummary,
pub events: Vec<LeakEvent>,
}
impl LeakEvent {
pub fn from_decision(event: EnrichedEvent, decision: crate::rules::LeakDecision) -> Self {
let (pid, ppid, process_name, process_path) = event
.owner
.as_ref()
.map(|owner| {
(
owner.pid,
owner.ppid,
owner.process_name.clone(),
owner.process_path.clone(),
)
})
.unwrap_or((None, None, None, None));
LeakEvent {
timestamp_ms: event.timestamp_ms,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
iface_name: event.iface_name,
route_class: event.route_class,
dst_ip: event.dst_ip.to_string(),
dst_port: event.dst_port,
pid,
ppid,
process_name,
process_path,
attribution_confidence: event.owner_confidence,
attribution_failure: event.owner_failure,
leak_type: decision.leak_type,
severity: decision.severity,
policy_rule_id: decision.policy_rule_id,
}
}
}
impl LeakSummary {
pub fn from_events(events: &[LeakEvent]) -> Self {
let total = events.len();
let mut by_type_map: HashMap<LeakType, usize> = HashMap::new();
let mut process_map: BTreeMap<String, usize> = BTreeMap::new();
let mut dest_map: BTreeMap<String, usize> = BTreeMap::new();
for event in events {
*by_type_map.entry(event.leak_type).or_insert(0) += 1;
if let Some(name) = event.process_name.as_ref() {
*process_map.entry(name.clone()).or_insert(0) += 1;
}
let dst_key = format!("{}:{}", event.dst_ip, event.dst_port);
*dest_map.entry(dst_key).or_insert(0) += 1;
}
let mut by_type = by_type_map
.into_iter()
.map(|(leak_type, count)| LeakTypeCount { leak_type, count })
.collect::<Vec<_>>();
by_type.sort_by(|a, b| a.leak_type.cmp(&b.leak_type));
let top_processes = top_items(process_map, 5);
let top_destinations = top_items(dest_map, 5);
LeakSummary {
total,
by_type,
top_processes,
top_destinations,
}
}
}
fn top_items(map: BTreeMap<String, usize>, limit: usize) -> Vec<SummaryItem> {
let mut items = map
.into_iter()
.map(|(key, count)| SummaryItem { key, count })
.collect::<Vec<_>>();
items.sort_by(|a, b| b.count.cmp(&a.count).then_with(|| a.key.cmp(&b.key)));
items.truncate(limit);
items
}

View File

@@ -0,0 +1,53 @@
use crate::classify::ClassifiedEvent;
use crate::report::{EnrichedEvent, RouteClass};
use wtfnet_platform::FlowOwnerConfidence;
pub fn enrich_route(event: ClassifiedEvent) -> EnrichedEvent {
let route_class = route_class_for(event.src_ip, event.dst_ip, event.iface_name.as_deref());
EnrichedEvent {
timestamp_ms: event.timestamp_ms,
proto: event.proto,
src_ip: event.src_ip,
src_port: event.src_port,
dst_ip: event.dst_ip,
dst_port: event.dst_port,
iface_name: event.iface_name,
transport: event.transport,
qname: event.qname,
qtype: event.qtype,
rcode: event.rcode,
is_response: event.is_response,
answer_ips: event.answer_ips,
route_class,
owner: None,
owner_confidence: FlowOwnerConfidence::None,
owner_failure: None,
}
}
pub fn route_class_for(
src_ip: std::net::IpAddr,
dst_ip: std::net::IpAddr,
iface_name: Option<&str>,
) -> RouteClass {
if src_ip.is_loopback() || dst_ip.is_loopback() {
RouteClass::Loopback
} else if iface_name.map(is_tunnel_iface).unwrap_or(false) {
RouteClass::Tunnel
} else if iface_name.is_some() {
RouteClass::Physical
} else {
RouteClass::Unknown
}
}
fn is_tunnel_iface(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("tun")
|| name.contains("tap")
|| name.contains("wg")
|| name.contains("wireguard")
|| name.contains("vpn")
|| name.contains("ppp")
}

View File

@@ -0,0 +1,116 @@
use crate::policy::LeakPolicy;
use crate::report::{EnrichedEvent, LeakTransport, LeakType, Severity};
#[derive(Debug, Clone)]
pub struct LeakDecision {
pub leak_type: LeakType,
pub severity: Severity,
pub policy_rule_id: String,
}
pub fn evaluate(event: &EnrichedEvent, policy: &LeakPolicy) -> Option<LeakDecision> {
match event.transport {
LeakTransport::Udp53 | LeakTransport::Tcp53 => {
if is_proxy_required(event, policy) && !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::B,
severity: Severity::P1,
policy_rule_id: "LEAK_B_PROXY_REQUIRED".to_string(),
});
}
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::A,
severity: Severity::P0,
policy_rule_id: "LEAK_A_PLAINTEXT".to_string(),
});
}
}
LeakTransport::Dot | LeakTransport::Doh => {
if !is_allowed(event, policy) {
return Some(LeakDecision {
leak_type: LeakType::C,
severity: Severity::P1,
policy_rule_id: "LEAK_C_ENCRYPTED".to_string(),
});
}
}
LeakTransport::Unknown => {}
}
None
}
fn is_allowed(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let has_rules = !policy.allowed_ifaces.is_empty()
|| !policy.allowed_destinations.is_empty()
|| !policy.allowed_ports.is_empty()
|| !policy.allowed_processes.is_empty();
if !has_rules {
return false;
}
if let Some(iface) = event.iface_name.as_ref() {
if policy
.allowed_ifaces
.iter()
.any(|allowed| allowed.eq_ignore_ascii_case(iface))
{
return true;
}
}
if policy
.allowed_ports
.iter()
.any(|port| *port == event.dst_port)
{
return true;
}
if policy
.allowed_destinations
.iter()
.any(|net| net.contains(&event.dst_ip))
{
return true;
}
if let Some(name) = event
.owner
.as_ref()
.and_then(|owner| owner.process_name.as_ref())
{
if policy
.allowed_processes
.iter()
.any(|value| value.eq_ignore_ascii_case(name))
{
return true;
}
}
false
}
fn is_proxy_required(event: &EnrichedEvent, policy: &LeakPolicy) -> bool {
let Some(qname) = event.qname.as_ref() else {
return false;
};
let qname = qname.to_ascii_lowercase();
if policy.proxy_required_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
}) {
return true;
}
if !policy.allowlist_domains.is_empty() {
let allowed = policy.allowlist_domains.iter().any(|domain| {
let domain = domain.to_ascii_lowercase();
qname == domain || qname.ends_with(&format!(".{domain}"))
});
return !allowed;
}
false
}

View File

@@ -0,0 +1,444 @@
use crate::classify::{parse_dns_message, ClassifiedEvent};
use crate::report::LeakTransport;
use crate::DnsLeakError;
use std::collections::HashSet;
use std::net::IpAddr;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use tracing::debug;
use wtfnet_platform::FlowProtocol;
use crate::LeakWatchOptions;
#[cfg(feature = "pcap")]
use pnet::datalink::{self, Channel, Config as DatalinkConfig};
#[cfg(feature = "pcap")]
use std::sync::mpsc;
#[cfg(feature = "pcap")]
const OPEN_IFACE_TIMEOUT_MS: u64 = 700;
#[cfg(feature = "pcap")]
const FRAME_RECV_TIMEOUT_MS: u64 = 200;
#[cfg(not(feature = "pcap"))]
pub async fn capture_events(_options: &LeakWatchOptions) -> Result<Vec<SensorEvent>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub async fn capture_events(options: &LeakWatchOptions) -> Result<Vec<SensorEvent>, DnsLeakError> {
let options = options.clone();
let iface_list = datalink::interfaces();
let candidates = format_iface_list(&iface_list);
let select_budget_ms = (iface_list.len().max(1) as u64).saturating_mul(OPEN_IFACE_TIMEOUT_MS);
let timeout_ms = options
.duration_ms
.saturating_add(select_budget_ms)
.saturating_add(2000);
let handle = tokio::task::spawn_blocking(move || capture_events_blocking(options));
match tokio::time::timeout(Duration::from_millis(timeout_ms), handle).await {
Ok(joined) => joined.map_err(|err| DnsLeakError::Io(err.to_string()))?,
Err(_) => {
return Err(DnsLeakError::Io(
format!(
"capture timed out waiting for interface; candidates: {candidates}"
),
))
}
}
}
#[derive(Debug, Clone)]
pub struct TcpEvent {
pub timestamp_ms: u128,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
pub iface_name: Option<String>,
}
#[derive(Debug, Clone)]
pub enum SensorEvent {
Dns(ClassifiedEvent),
Tcp(TcpEvent),
}
#[derive(Debug, Clone)]
pub struct IfaceDiag {
pub name: String,
pub open_ok: bool,
pub error: String,
}
#[cfg(not(feature = "pcap"))]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
Err(DnsLeakError::NotSupported(
"dns leak watch requires pcap feature".to_string(),
))
}
#[cfg(feature = "pcap")]
pub fn iface_diagnostics() -> Result<Vec<IfaceDiag>, DnsLeakError> {
let interfaces = datalink::interfaces();
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let mut out = Vec::new();
for iface in interfaces {
let result = match open_channel_with_timeout(iface.clone(), &config) {
Ok((_iface, _rx)) => IfaceDiag {
name: iface.name,
open_ok: true,
error: "-".to_string(),
},
Err(err) => IfaceDiag {
name: iface.name,
open_ok: false,
error: err,
},
};
out.push(result);
}
Ok(out)
}
#[cfg(feature = "pcap")]
fn capture_events_blocking(options: LeakWatchOptions) -> Result<Vec<SensorEvent>, DnsLeakError> {
use pnet::packet::ethernet::{EtherTypes, EthernetPacket};
use pnet::packet::Packet;
let mut config = DatalinkConfig::default();
config.read_timeout = Some(Duration::from_millis(500));
let (iface, mut rx) = select_interface(options.iface.as_deref(), &config)?;
let local_ips = iface.ips.iter().map(|ip| ip.ip()).collect::<Vec<_>>();
let iface_name = iface.name.clone();
let (frame_tx, frame_rx) = mpsc::channel();
std::thread::spawn(move || loop {
match rx.next() {
Ok(frame) => {
if frame_tx.send(frame.to_vec()).is_err() {
break;
}
}
Err(_) => continue,
}
});
let deadline = Instant::now() + Duration::from_millis(options.duration_ms);
let mut events = Vec::new();
let mut seen = HashSet::new();
while Instant::now() < deadline {
let frame = match frame_rx.recv_timeout(Duration::from_millis(FRAME_RECV_TIMEOUT_MS)) {
Ok(frame) => frame,
Err(_) => continue,
};
let ethernet = match EthernetPacket::new(&frame) {
Some(packet) => packet,
None => continue,
};
let event = match ethernet.get_ethertype() {
EtherTypes::Ipv4 => parse_ipv4(
ethernet.payload(),
&local_ips,
&iface_name,
),
EtherTypes::Ipv6 => parse_ipv6(
ethernet.payload(),
&local_ips,
&iface_name,
),
_ => None,
};
if let Some(event) = event {
let key = match &event {
SensorEvent::Dns(value) => format!(
"dns:{:?}|{}|{}|{}|{}",
value.transport, value.src_ip, value.src_port, value.dst_ip, value.dst_port
),
SensorEvent::Tcp(value) => format!(
"tcp:{}|{}|{}|{}",
value.src_ip, value.src_port, value.dst_ip, value.dst_port
),
};
if seen.insert(key) {
match &event {
SensorEvent::Dns(value) => {
debug!(
transport = ?value.transport,
src_ip = %value.src_ip,
src_port = value.src_port,
dst_ip = %value.dst_ip,
dst_port = value.dst_port,
"dns leak event"
);
}
SensorEvent::Tcp(value) => {
debug!(
src_ip = %value.src_ip,
src_port = value.src_port,
dst_ip = %value.dst_ip,
dst_port = value.dst_port,
"dns leak tcp event"
);
}
}
events.push(event);
}
}
}
Ok(events)
}
#[cfg(feature = "pcap")]
fn parse_ipv4(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::Packet;
let ipv4 = Ipv4Packet::new(payload)?;
let src = IpAddr::V4(ipv4.get_source());
let dst = IpAddr::V4(ipv4.get_destination());
if !local_ips.contains(&src) && !local_ips.contains(&dst) {
return None;
}
match ipv4.get_next_level_protocol() {
IpNextHeaderProtocols::Udp => parse_udp(src, dst, ipv4.payload(), iface_name),
IpNextHeaderProtocols::Tcp => parse_tcp(src, dst, ipv4.payload(), iface_name),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_ipv6(
payload: &[u8],
local_ips: &[IpAddr],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::ip::IpNextHeaderProtocols;
use pnet::packet::ipv6::Ipv6Packet;
use pnet::packet::Packet;
let ipv6 = Ipv6Packet::new(payload)?;
let src = IpAddr::V6(ipv6.get_source());
let dst = IpAddr::V6(ipv6.get_destination());
if !local_ips.contains(&src) && !local_ips.contains(&dst) {
return None;
}
match ipv6.get_next_header() {
IpNextHeaderProtocols::Udp => parse_udp(src, dst, ipv6.payload(), iface_name),
IpNextHeaderProtocols::Tcp => parse_tcp(src, dst, ipv6.payload(), iface_name),
_ => None,
}
}
#[cfg(feature = "pcap")]
fn parse_udp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::udp::UdpPacket;
use pnet::packet::Packet;
let udp = UdpPacket::new(payload)?;
let src_port = udp.get_source();
let dst_port = udp.get_destination();
if src_port != 53 && dst_port != 53 {
return None;
}
let parsed = parse_dns_message(udp.payload())?;
Some(SensorEvent::Dns(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Udp,
src_ip,
src_port,
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport: LeakTransport::Udp53,
qname: Some(parsed.qname),
qtype: Some(parsed.qtype),
rcode: Some(parsed.rcode),
is_response: parsed.is_response,
answer_ips: parsed.answer_ips,
}))
}
#[cfg(feature = "pcap")]
fn parse_tcp(
src_ip: IpAddr,
dst_ip: IpAddr,
payload: &[u8],
iface_name: &str,
) -> Option<SensorEvent> {
use pnet::packet::tcp::TcpPacket;
let tcp = TcpPacket::new(payload)?;
let dst_port = tcp.get_destination();
let src_port = tcp.get_source();
let transport = match dst_port {
53 => LeakTransport::Tcp53,
853 => LeakTransport::Dot,
_ => {
let flags = tcp.get_flags();
let syn = flags & 0x02 != 0;
let ack = flags & 0x10 != 0;
if syn && !ack {
return Some(SensorEvent::Tcp(TcpEvent {
timestamp_ms: now_ms(),
src_ip,
src_port,
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
}));
}
return None;
}
};
Some(SensorEvent::Dns(ClassifiedEvent {
timestamp_ms: now_ms(),
proto: FlowProtocol::Tcp,
src_ip,
src_port,
dst_ip,
dst_port,
iface_name: Some(iface_name.to_string()),
transport,
qname: None,
qtype: None,
rcode: None,
is_response: false,
answer_ips: Vec::new(),
}))
}
#[cfg(feature = "pcap")]
fn select_interface(
name: Option<&str>,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), DnsLeakError> {
let interfaces = datalink::interfaces();
if let Some(name) = name {
debug!("dns leak iface pick: requested={name}");
let iface = interfaces
.iter()
.find(|iface| iface.name == name)
.cloned()
.ok_or_else(|| {
DnsLeakError::Io(format!(
"interface '{name}' not found; candidates: {}",
format_iface_list(&interfaces)
))
})?;
return open_channel_with_timeout(iface, config).map_err(|err| {
DnsLeakError::Io(format!(
"failed to open capture on interface ({err}); candidates: {}",
format_iface_list(&interfaces)
))
});
}
let ordered = order_interfaces(&interfaces);
for iface in ordered.iter() {
debug!("dns leak iface pick: try={}", iface.name);
match open_channel_with_timeout(iface.clone(), config) {
Ok(channel) => return Ok(channel),
Err(err) => {
debug!(
"dns leak iface pick: failed iface={} err={}",
iface.name, err
);
}
}
}
Err(DnsLeakError::Io(format!(
"no suitable interface found; candidates: {}",
format_iface_list(&interfaces)
)))
}
#[cfg(feature = "pcap")]
fn open_channel_with_timeout(
iface: datalink::NetworkInterface,
config: &DatalinkConfig,
) -> Result<(datalink::NetworkInterface, Box<dyn datalink::DataLinkReceiver>), String> {
let (tx, rx) = mpsc::channel();
let config = config.clone();
std::thread::spawn(move || {
let result = match datalink::channel(&iface, config) {
Ok(Channel::Ethernet(_, rx)) => Ok(rx),
Ok(_) => Err("unsupported channel".to_string()),
Err(err) => Err(err.to_string()),
};
let _ = tx.send((iface, result));
});
let timeout = Duration::from_millis(OPEN_IFACE_TIMEOUT_MS);
match rx.recv_timeout(timeout) {
Ok((iface, Ok(rx))) => Ok((iface, rx)),
Ok((_iface, Err(err))) => Err(err),
Err(_) => Err("timeout opening capture".to_string()),
}
}
#[cfg(feature = "pcap")]
fn is_named_fallback(name: &str) -> bool {
let name = name.to_ascii_lowercase();
name.contains("wlan")
|| name.contains("wifi")
|| name.contains("wi-fi")
|| name.contains("ethernet")
|| name.contains("eth")
|| name.contains("lan")
}
#[cfg(feature = "pcap")]
fn order_interfaces(
interfaces: &[datalink::NetworkInterface],
) -> Vec<datalink::NetworkInterface> {
let mut preferred = Vec::new();
let mut others = Vec::new();
for iface in interfaces.iter() {
if iface.is_loopback() {
continue;
}
if is_named_fallback(&iface.name) || !iface.ips.is_empty() {
preferred.push(iface.clone());
} else {
others.push(iface.clone());
}
}
preferred.extend(others);
if preferred.is_empty() {
interfaces.to_vec()
} else {
preferred
}
}
#[cfg(feature = "pcap")]
fn format_iface_list(interfaces: &[datalink::NetworkInterface]) -> String {
if interfaces.is_empty() {
return "-".to_string();
}
interfaces
.iter()
.map(|iface| iface.name.as_str())
.collect::<Vec<_>>()
.join(", ")
}
#[cfg(feature = "pcap")]
fn now_ms() -> u128 {
SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap_or_default()
.as_millis()
}

View File

@@ -5,8 +5,21 @@ edition = "2024"
[dependencies] [dependencies]
reqwest = { version = "0.11", features = ["rustls-tls"] } reqwest = { version = "0.11", features = ["rustls-tls"] }
rustls = "0.21"
rustls-native-certs = "0.6"
serde = { version = "1", features = ["derive"] } serde = { version = "1", features = ["derive"] }
thiserror = "2" thiserror = "2"
tokio = { version = "1", features = ["net", "time"] } tokio = { version = "1", features = ["net", "time"] }
tokio-rustls = "0.24"
tokio-socks = "0.5"
url = "2" url = "2"
tracing = "0.1" tracing = "0.1"
h3 = { version = "0.0.8", optional = true }
h3-quinn = { version = "0.0.10", optional = true }
quinn = { version = "0.11", optional = true }
http = "1"
webpki-roots = "1"
bytes = "1"
[features]
http3 = ["dep:h3", "dep:h3-quinn", "dep:quinn"]

View File

@@ -1,12 +1,30 @@
use reqwest::{Client, Method, Proxy, StatusCode}; use reqwest::{Client, Method, Proxy, StatusCode};
use rustls::{Certificate, ClientConfig, RootCertStore, ServerName};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::net::{IpAddr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use tokio::net::lookup_host; use tokio::net::lookup_host;
use thiserror::Error; use thiserror::Error;
use tokio::time::timeout;
use tokio_rustls::TlsConnector;
use tokio_socks::tcp::Socks5Stream;
use tracing::debug; use tracing::debug;
use url::Url; use url::Url;
#[cfg(feature = "http3")]
use bytes::Buf;
#[cfg(feature = "http3")]
use http::Request;
#[cfg(feature = "http3")]
use quinn::ClientConfig as QuinnClientConfig;
#[cfg(feature = "http3")]
use quinn::Endpoint;
#[cfg(feature = "http3")]
use quinn::crypto::rustls::QuicClientConfig;
#[cfg(feature = "http3")]
use webpki_roots::TLS_SERVER_ROOTS;
#[derive(Debug, Error)] #[derive(Debug, Error)]
pub enum HttpError { pub enum HttpError {
#[error("invalid url: {0}")] #[error("invalid url: {0}")]
@@ -36,6 +54,7 @@ pub struct HttpReport {
pub resolved_ips: Vec<String>, pub resolved_ips: Vec<String>,
pub headers: Vec<(String, String)>, pub headers: Vec<(String, String)>,
pub body: Option<String>, pub body: Option<String>,
pub warnings: Vec<String>,
pub timing: HttpTiming, pub timing: HttpTiming,
} }
@@ -64,6 +83,8 @@ pub struct HttpRequestOptions {
pub show_body: bool, pub show_body: bool,
pub http1_only: bool, pub http1_only: bool,
pub http2_only: bool, pub http2_only: bool,
pub http3: bool,
pub http3_only: bool,
pub proxy: Option<String>, pub proxy: Option<String>,
} }
@@ -105,6 +126,43 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
} }
let dns_ms = dns_start.elapsed().as_millis(); let dns_ms = dns_start.elapsed().as_millis();
let mut warnings = Vec::new();
if opts.http3 || opts.http3_only {
if !cfg!(feature = "http3") {
warnings.push("http3 feature not enabled in build".to_string());
if opts.http3_only {
return Err(HttpError::Request(
"http3-only requested but feature is not enabled".to_string(),
));
}
}
}
#[cfg(feature = "http3")]
{
if opts.http3 || opts.http3_only {
match http3_request(url, &opts, &resolved_ips, dns_ms).await {
Ok((report, mut h3_warnings)) => {
warnings.append(&mut h3_warnings);
return Ok(HttpReport {
warnings,
..report
});
}
Err(err) => {
let err_string = err.to_string();
let category = classify_http3_error(&err_string);
warnings.push(format!(
"http3 failed (category={category}): {err_string}"
));
if opts.http3_only {
return Err(err);
}
}
}
}
}
let mut builder = Client::builder().timeout(Duration::from_millis(opts.timeout_ms)); let mut builder = Client::builder().timeout(Duration::from_millis(opts.timeout_ms));
builder = if let Some(max) = opts.follow_redirects { builder = if let Some(max) = opts.follow_redirects {
builder.redirect(reqwest::redirect::Policy::limited(max as usize)) builder.redirect(reqwest::redirect::Policy::limited(max as usize))
@@ -132,6 +190,16 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
} }
let client = builder.build().map_err(|err| HttpError::Request(err.to_string()))?; let client = builder.build().map_err(|err| HttpError::Request(err.to_string()))?;
let (connect_ms, tls_ms, timing_warnings) = measure_connect_tls(
&parsed,
host,
port,
&resolved_ips,
opts.proxy.as_deref(),
opts.timeout_ms,
)
.await;
warnings.extend(timing_warnings);
let start = Instant::now(); let start = Instant::now();
let response = client let response = client
.request(opts.method.to_reqwest(), parsed.clone()) .request(opts.method.to_reqwest(), parsed.clone())
@@ -184,11 +252,12 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
resolved_ips, resolved_ips,
headers, headers,
body, body,
warnings,
timing: HttpTiming { timing: HttpTiming {
total_ms, total_ms,
dns_ms: Some(dns_ms), dns_ms: Some(dns_ms),
connect_ms: None, connect_ms,
tls_ms: None, tls_ms,
ttfb_ms: Some(ttfb_ms), ttfb_ms: Some(ttfb_ms),
}, },
}) })
@@ -197,3 +266,374 @@ pub async fn request(url: &str, opts: HttpRequestOptions) -> Result<HttpReport,
fn status_code(status: StatusCode) -> Option<u16> { fn status_code(status: StatusCode) -> Option<u16> {
Some(status.as_u16()) Some(status.as_u16())
} }
struct Socks5Proxy {
addr: String,
remote_dns: bool,
}
fn parse_socks5_proxy(value: &str) -> Result<Socks5Proxy, HttpError> {
let url = Url::parse(value).map_err(|err| HttpError::Request(err.to_string()))?;
let scheme = url.scheme();
let remote_dns = match scheme {
"socks5" => false,
"socks5h" => true,
_ => {
return Err(HttpError::Request(format!(
"unsupported proxy scheme: {scheme}"
)))
}
};
let host = url
.host_str()
.ok_or_else(|| HttpError::Request("invalid proxy host".to_string()))?;
let port = url
.port_or_known_default()
.ok_or_else(|| HttpError::Request("invalid proxy port".to_string()))?;
Ok(Socks5Proxy {
addr: format!("{host}:{port}"),
remote_dns,
})
}
async fn measure_connect_tls(
parsed: &Url,
host: &str,
port: u16,
resolved_ips: &[String],
proxy: Option<&str>,
timeout_ms: u64,
) -> (Option<u128>, Option<u128>, Vec<String>) {
let mut warnings = Vec::new();
let scheme = parsed.scheme();
if scheme != "http" && scheme != "https" {
warnings.push(format!("timing unavailable for scheme: {scheme}"));
return (None, None, warnings);
}
let timeout_dur = Duration::from_millis(timeout_ms);
let connect_start = Instant::now();
let tcp = if let Some(proxy) = proxy {
match parse_socks5_proxy(proxy) {
Ok(proxy) => {
let target = if proxy.remote_dns {
(host, port)
} else if let Some(ip) = resolved_ips.first() {
(ip.as_str(), port)
} else {
warnings.push("no resolved IPs for proxy connect".to_string());
return (None, None, warnings);
};
match timeout(timeout_dur, Socks5Stream::connect(proxy.addr.as_str(), target))
.await
{
Ok(Ok(stream)) => stream.into_inner(),
Ok(Err(err)) => {
warnings.push(format!("proxy connect failed: {err}"));
return (None, None, warnings);
}
Err(_) => {
warnings.push("proxy connect timed out".to_string());
return (None, None, warnings);
}
}
}
Err(err) => {
warnings.push(format!("proxy timing skipped: {err}"));
return (None, None, warnings);
}
}
} else {
let addr = match resolved_ips.first().and_then(|ip| ip.parse::<IpAddr>().ok()) {
Some(ip) => SocketAddr::new(ip, port),
None => {
warnings.push("no resolved IPs for connect timing".to_string());
return (None, None, warnings);
}
};
match timeout(timeout_dur, tokio::net::TcpStream::connect(addr)).await {
Ok(Ok(stream)) => stream,
Ok(Err(err)) => {
warnings.push(format!("connect failed: {err}"));
return (None, None, warnings);
}
Err(_) => {
warnings.push("connect timed out".to_string());
return (None, None, warnings);
}
}
};
let connect_ms = connect_start.elapsed().as_millis();
if scheme == "http" {
return (Some(connect_ms), None, warnings);
}
let tls_start = Instant::now();
let tls = match build_tls_connector() {
Ok(connector) => connector,
Err(err) => {
warnings.push(format!("tls timing skipped: {err}"));
return (Some(connect_ms), None, warnings);
}
};
let server_name = match ServerName::try_from(host) {
Ok(name) => name,
Err(_) => {
warnings.push("invalid tls server name".to_string());
return (Some(connect_ms), None, warnings);
}
};
match timeout(timeout_dur, tls.connect(server_name, tcp)).await {
Ok(Ok(_)) => {}
Ok(Err(err)) => {
warnings.push(format!("tls handshake failed: {err}"));
return (Some(connect_ms), None, warnings);
}
Err(_) => {
warnings.push("tls handshake timed out".to_string());
return (Some(connect_ms), None, warnings);
}
}
let tls_ms = tls_start.elapsed().as_millis();
(Some(connect_ms), Some(tls_ms), warnings)
}
fn build_tls_connector() -> Result<TlsConnector, HttpError> {
let mut roots = RootCertStore::empty();
let store = rustls_native_certs::load_native_certs()
.map_err(|err| HttpError::Request(err.to_string()))?;
for cert in store {
roots
.add(&Certificate(cert.0))
.map_err(|err| HttpError::Request(err.to_string()))?;
}
let config = ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates(roots)
.with_no_client_auth();
Ok(TlsConnector::from(Arc::new(config)))
}
#[cfg(feature = "http3")]
fn classify_http3_error(message: &str) -> &'static str {
let message = message.to_ascii_lowercase();
if message.contains("timeout") || message.contains("timed out") {
return "timeout";
}
if message.contains("no resolved ips") || message.contains("no addresses resolved") {
return "resolve";
}
if message.contains("udp") && message.contains("blocked") {
return "udp_blocked";
}
if message.contains("quic") || message.contains("connection refused") {
return "connect";
}
if message.contains("alpn") || message.contains("application protocol") {
return "alpn";
}
if message.contains("tls")
|| message.contains("certificate")
|| message.contains("crypto")
|| message.contains("handshake")
{
return "tls";
}
if message.contains("permission denied") || message.contains("access is denied") {
return "permission";
}
"unknown"
}
#[cfg(feature = "http3")]
async fn http3_request(
url: &str,
opts: &HttpRequestOptions,
resolved_ips: &[String],
dns_ms: u128,
) -> Result<(HttpReport, Vec<String>), HttpError> {
let mut warnings = Vec::new();
let parsed = Url::parse(url).map_err(|err| HttpError::Url(err.to_string()))?;
if parsed.scheme() != "https" {
return Err(HttpError::Request("http3 requires https scheme".to_string()));
}
if opts.proxy.is_some() {
return Err(HttpError::Request(
"http3 proxying is not supported".to_string(),
));
}
let host = parsed
.host_str()
.ok_or_else(|| HttpError::Url("missing host".to_string()))?;
let port = parsed
.port_or_known_default()
.ok_or_else(|| HttpError::Url("missing port".to_string()))?;
let quinn_config = build_quinn_config()?;
let candidates = resolved_ips
.iter()
.filter_map(|value| value.parse::<IpAddr>().ok())
.collect::<Vec<_>>();
if candidates.is_empty() {
return Err(HttpError::Request("no resolved IPs for http3".to_string()));
}
let mut endpoint_guard = None;
let mut connection = None;
let mut connect_ms = None;
for ip in candidates {
let bind_addr = match ip {
IpAddr::V4(_) => "0.0.0.0:0",
IpAddr::V6(_) => "[::]:0",
};
let mut endpoint = Endpoint::client(bind_addr.parse().unwrap())
.map_err(|err| HttpError::Request(err.to_string()))?;
endpoint.set_default_client_config(quinn_config.clone());
let connect_start = Instant::now();
let connecting = match endpoint.connect(SocketAddr::new(ip, port), host) {
Ok(connecting) => connecting,
Err(err) => {
warnings.push(format!("http3 connect failed to {ip}: {err}"));
continue;
}
};
match timeout(Duration::from_millis(opts.timeout_ms), connecting).await {
Ok(Ok(conn)) => {
connect_ms = Some(connect_start.elapsed().as_millis());
connection = Some(conn);
endpoint_guard = Some(endpoint);
break;
}
Ok(Err(err)) => {
warnings.push(format!("http3 connect failed to {ip}: {err}"));
}
Err(_) => {
warnings.push(format!("http3 connect to {ip} timed out"));
}
}
}
let connection = connection.ok_or_else(|| {
HttpError::Request("http3 connect failed for all resolved IPs".to_string())
})?;
let connect_ms = connect_ms.unwrap_or_default();
let conn = h3_quinn::Connection::new(connection);
let (mut driver, mut send_request) = h3::client::new(conn)
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
tokio::spawn(async move {
let _ = driver.wait_idle().await;
});
let start = Instant::now();
let method = match opts.method {
HttpMethod::Head => http::Method::HEAD,
HttpMethod::Get => http::Method::GET,
};
let request = Request::builder()
.method(method)
.uri(parsed.as_str())
.header("user-agent", "wtfnet")
.body(())
.map_err(|err| HttpError::Request(err.to_string()))?;
let mut stream = send_request
.send_request(request)
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
stream
.finish()
.await
.map_err(|err| HttpError::Request(err.to_string()))?;
let response = stream
.recv_response()
.await
.map_err(|err| HttpError::Response(err.to_string()))?;
let ttfb_ms = start.elapsed().as_millis();
let status = response.status();
let final_url = parsed.to_string();
let headers = if opts.show_headers {
response
.headers()
.iter()
.map(|(name, value)| {
let value = value.to_str().unwrap_or("-").to_string();
(name.to_string(), value)
})
.collect::<Vec<_>>()
} else {
Vec::new()
};
let body = if opts.show_body {
let mut buf = Vec::new();
while let Some(chunk) = stream
.recv_data()
.await
.map_err(|err| HttpError::Response(err.to_string()))?
{
let mut chunk = chunk;
while chunk.has_remaining() {
let bytes = chunk.copy_to_bytes(chunk.remaining());
buf.extend_from_slice(&bytes);
}
if buf.len() >= opts.max_body_bytes {
buf.truncate(opts.max_body_bytes);
break;
}
}
Some(String::from_utf8_lossy(&buf).to_string())
} else {
None
};
let total_ms = start.elapsed().as_millis();
warnings.push("http3 timing for tls/connect is best-effort".to_string());
let _endpoint_guard = endpoint_guard;
let report = HttpReport {
url: url.to_string(),
final_url: Some(final_url),
method: match opts.method {
HttpMethod::Head => "HEAD".to_string(),
HttpMethod::Get => "GET".to_string(),
},
status: Some(status.as_u16()),
http_version: Some("HTTP/3".to_string()),
resolved_ips: resolved_ips.to_vec(),
headers,
body,
warnings: Vec::new(),
timing: HttpTiming {
total_ms,
dns_ms: Some(dns_ms),
connect_ms: Some(connect_ms),
tls_ms: None,
ttfb_ms: Some(ttfb_ms),
},
};
Ok((report, warnings))
}
#[cfg(feature = "http3")]
fn build_quinn_config() -> Result<QuinnClientConfig, HttpError> {
let mut roots = quinn::rustls::RootCertStore::empty();
roots.extend(TLS_SERVER_ROOTS.iter().cloned());
let mut crypto = quinn::rustls::ClientConfig::builder()
.with_root_certificates(roots)
.with_no_client_auth();
crypto.alpn_protocols = vec![b"h3".to_vec()];
let mut client_config = QuinnClientConfig::new(Arc::new(
QuicClientConfig::try_from(crypto)
.map_err(|err| HttpError::Request(format!("quinn config error: {err}")))?,
));
let mut transport = quinn::TransportConfig::default();
transport.keep_alive_interval(Some(Duration::from_secs(5)));
client_config.transport_config(Arc::new(transport));
Ok(client_config)
}

View File

@@ -2,11 +2,13 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig}; use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use sha2::Digest; use sha2::Digest;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
use wtfnet_core::ErrorCode; use wtfnet_core::ErrorCode;
use wtfnet_platform::{ use wtfnet_platform::{
CertProvider, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider, NetInterface, CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider, FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
}; };
use x509_parser::oid_registry::{ use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256, OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -19,6 +21,7 @@ pub fn platform() -> Platform {
ports: Arc::new(LinuxPortsProvider), ports: Arc::new(LinuxPortsProvider),
cert: Arc::new(LinuxCertProvider), cert: Arc::new(LinuxCertProvider),
neigh: Arc::new(LinuxNeighProvider), neigh: Arc::new(LinuxNeighProvider),
flow_owner: Arc::new(LinuxFlowOwnerProvider),
} }
} }
@@ -26,6 +29,7 @@ struct LinuxSysProvider;
struct LinuxPortsProvider; struct LinuxPortsProvider;
struct LinuxCertProvider; struct LinuxCertProvider;
struct LinuxNeighProvider; struct LinuxNeighProvider;
struct LinuxFlowOwnerProvider;
#[async_trait] #[async_trait]
impl SysProvider for LinuxSysProvider { impl SysProvider for LinuxSysProvider {
@@ -240,6 +244,63 @@ fn parse_linux_tcp_with_inode_map(
Ok(sockets) Ok(sockets)
} }
fn parse_linux_tcp_conns(
path: &str,
is_v6: bool,
inode_map: &HashMap<String, ProcInfo>,
) -> Result<Vec<ConnSocket>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut sockets = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = parts[1];
let remote = parts[2];
let state = parts[3];
let inode = parts.get(9).copied();
if state == "0A" {
continue;
}
let local_addr = match parse_proc_socket_addr(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
let (pid, ppid, process_name, process_path) =
inode.and_then(|value| inode_map.get(value)).map_or(
(None, None, None, None),
|info| {
(
Some(info.pid),
info.ppid,
info.name.clone(),
info.path.clone(),
)
},
);
sockets.push(ConnSocket {
proto: "tcp".to_string(),
local_addr,
remote_addr,
state: Some(map_tcp_state(state)),
pid,
ppid,
process_name,
process_path,
});
}
Ok(sockets)
}
fn parse_linux_udp_with_inode_map( fn parse_linux_udp_with_inode_map(
path: &str, path: &str,
is_v6: bool, is_v6: bool,
@@ -286,6 +347,24 @@ fn parse_linux_udp_with_inode_map(
Ok(sockets) Ok(sockets)
} }
fn map_tcp_state(value: &str) -> String {
match value {
"01" => "ESTABLISHED",
"02" => "SYN_SENT",
"03" => "SYN_RECV",
"04" => "FIN_WAIT1",
"05" => "FIN_WAIT2",
"06" => "TIME_WAIT",
"07" => "CLOSE",
"08" => "CLOSE_WAIT",
"09" => "LAST_ACK",
"0A" => "LISTEN",
"0B" => "CLOSING",
_ => "UNKNOWN",
}
.to_string()
}
fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> { fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
let mut parts = value.split(':'); let mut parts = value.split(':');
let addr_hex = parts.next()?; let addr_hex = parts.next()?;
@@ -300,6 +379,20 @@ fn parse_proc_socket_addr(value: &str, is_v6: bool) -> Option<String> {
} }
} }
fn parse_proc_socket_addr_value(value: &str, is_v6: bool) -> Option<SocketAddr> {
let mut parts = value.split(':');
let addr_hex = parts.next()?;
let port_hex = parts.next()?;
let port = u16::from_str_radix(port_hex, 16).ok()?;
if is_v6 {
let addr = parse_ipv6_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V6(addr), port))
} else {
let addr = parse_ipv4_hex(addr_hex)?;
Some(SocketAddr::new(IpAddr::V4(addr), port))
}
}
fn parse_linux_arp(contents: &str) -> Vec<NeighborEntry> { fn parse_linux_arp(contents: &str) -> Vec<NeighborEntry> {
let mut neighbors = Vec::new(); let mut neighbors = Vec::new();
for (idx, line) in contents.lines().enumerate() { for (idx, line) in contents.lines().enumerate() {
@@ -407,6 +500,138 @@ fn read_ppid(pid: u32) -> Option<u32> {
Some(ppid) Some(ppid)
} }
#[derive(Clone)]
struct ProcSocketEntry {
local: SocketAddr,
remote: SocketAddr,
inode: String,
}
fn parse_proc_socket_entries(
path: &str,
is_v6: bool,
) -> Result<Vec<ProcSocketEntry>, PlatformError> {
let contents = std::fs::read_to_string(path)
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
let mut entries = Vec::new();
for (idx, line) in contents.lines().enumerate() {
if idx == 0 {
continue;
}
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 10 {
continue;
}
let local = parts[1];
let remote = parts[2];
let inode = match parts.get(9) {
Some(value) => (*value).to_string(),
None => continue,
};
let local_addr = match parse_proc_socket_addr_value(local, is_v6) {
Some(addr) => addr,
None => continue,
};
let remote_addr = match parse_proc_socket_addr_value(remote, is_v6) {
Some(addr) => addr,
None => continue,
};
entries.push(ProcSocketEntry {
local: local_addr,
remote: remote_addr,
inode,
});
}
Ok(entries)
}
fn match_flow_entry<'a>(
flow: &FlowTuple,
entries: &'a [ProcSocketEntry],
match_remote: bool,
) -> Option<(&'a ProcSocketEntry, FlowOwnerConfidence)> {
for entry in entries {
let local_match = entry.local.port() == flow.src_port
&& (entry.local.ip() == flow.src_ip
|| entry.local.ip().is_unspecified()
|| entry.local.ip().is_loopback() && flow.src_ip.is_loopback());
if !local_match {
continue;
}
if match_remote {
let remote_match = entry.remote.port() == flow.dst_port
&& (entry.remote.ip() == flow.dst_ip
|| entry.remote.ip().is_unspecified());
if remote_match {
return Some((entry, FlowOwnerConfidence::High));
}
} else {
return Some((entry, FlowOwnerConfidence::Medium));
}
}
None
}
fn resolve_flow_owner(
flow: &FlowTuple,
) -> Result<FlowOwnerResult, PlatformError> {
let inode_map = build_inode_map();
let entries = match flow.proto {
FlowProtocol::Tcp => {
let mut out = parse_proc_socket_entries("/proc/net/tcp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/tcp6", true)?);
out
}
FlowProtocol::Udp => {
let mut out = parse_proc_socket_entries("/proc/net/udp", false)?;
out.extend(parse_proc_socket_entries("/proc/net/udp6", true)?);
out
}
};
let match_remote = matches!(flow.proto, FlowProtocol::Tcp);
let matched = match_flow_entry(flow, &entries, match_remote)
.or_else(|| {
if matches!(flow.proto, FlowProtocol::Udp) {
match_flow_entry(flow, &entries, false)
} else {
None
}
});
let (entry, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let owner = inode_map.get(&entry.inode).map(|info| FlowOwner {
pid: Some(info.pid),
ppid: info.ppid,
process_name: info.name.clone(),
process_path: info.path.clone(),
});
if owner.is_none() {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::Low,
failure_reason: Some("socket owner not found".to_string()),
});
}
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> { fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs() let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?; .map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -518,6 +743,22 @@ impl PortsProvider for LinuxPortsProvider {
.filter(|socket| extract_port(&socket.local_addr) == Some(port)) .filter(|socket| extract_port(&socket.local_addr) == Some(port))
.collect()) .collect())
} }
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError> {
let inode_map = build_inode_map();
let mut sockets = Vec::new();
sockets.extend(parse_linux_tcp_conns(
"/proc/net/tcp",
false,
&inode_map,
)?);
sockets.extend(parse_linux_tcp_conns(
"/proc/net/tcp6",
true,
&inode_map,
)?);
Ok(sockets)
}
} }
#[async_trait] #[async_trait]
@@ -535,3 +776,10 @@ impl NeighProvider for LinuxNeighProvider {
Ok(parse_linux_arp(&contents)) Ok(parse_linux_arp(&contents))
} }
} }
#[async_trait]
impl FlowOwnerProvider for LinuxFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -2,6 +2,7 @@ use async_trait::async_trait;
use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig}; use network_interface::{Addr, NetworkInterface, NetworkInterfaceConfig};
use regex::Regex; use regex::Regex;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::{IpAddr, SocketAddr};
use sha2::Digest; use sha2::Digest;
use x509_parser::oid_registry::{ use x509_parser::oid_registry::{
OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256, OID_KEY_TYPE_DSA, OID_KEY_TYPE_EC_PUBLIC_KEY, OID_KEY_TYPE_GOST_R3410_2012_256,
@@ -10,8 +11,9 @@ use x509_parser::oid_registry::{
use std::sync::Arc; use std::sync::Arc;
use wtfnet_core::ErrorCode; use wtfnet_core::ErrorCode;
use wtfnet_platform::{ use wtfnet_platform::{
CertProvider, DnsConfigSnapshot, ListenSocket, NeighborEntry, NeighProvider, NetInterface, CertProvider, ConnSocket, DnsConfigSnapshot, FlowOwner, FlowOwnerConfidence, FlowOwnerProvider,
Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider, FlowOwnerResult, FlowProtocol, FlowTuple, ListenSocket, NeighborEntry, NeighProvider,
NetInterface, Platform, PlatformError, PortsProvider, RootCert, RouteEntry, SysProvider,
}; };
pub fn platform() -> Platform { pub fn platform() -> Platform {
@@ -20,6 +22,7 @@ pub fn platform() -> Platform {
ports: Arc::new(WindowsPortsProvider), ports: Arc::new(WindowsPortsProvider),
cert: Arc::new(WindowsCertProvider), cert: Arc::new(WindowsCertProvider),
neigh: Arc::new(WindowsNeighProvider), neigh: Arc::new(WindowsNeighProvider),
flow_owner: Arc::new(WindowsFlowOwnerProvider),
} }
} }
@@ -27,6 +30,7 @@ struct WindowsSysProvider;
struct WindowsPortsProvider; struct WindowsPortsProvider;
struct WindowsCertProvider; struct WindowsCertProvider;
struct WindowsNeighProvider; struct WindowsNeighProvider;
struct WindowsFlowOwnerProvider;
#[async_trait] #[async_trait]
impl SysProvider for WindowsSysProvider { impl SysProvider for WindowsSysProvider {
@@ -333,6 +337,33 @@ fn parse_windows_listeners() -> Result<Vec<ListenSocket>, PlatformError> {
Ok(sockets) Ok(sockets)
} }
fn parse_windows_connections() -> Result<Vec<ConnSocket>, PlatformError> {
let proc_map = load_windows_process_map();
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut sockets = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if !trimmed.starts_with("TCP") {
continue;
}
if let Some(mut socket) = parse_netstat_tcp_conn_line(trimmed) {
enrich_conn_socket(&mut socket, &proc_map);
sockets.push(socket);
}
}
Ok(sockets)
}
fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> { fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> {
let parts: Vec<&str> = line.split_whitespace().collect(); let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 { if parts.len() < 5 {
@@ -358,6 +389,32 @@ fn parse_netstat_tcp_line(line: &str) -> Option<ListenSocket> {
}) })
} }
fn parse_netstat_tcp_conn_line(line: &str) -> Option<ConnSocket> {
let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 5 {
return None;
}
let local = parts[1];
let remote = parts[2];
let state = parts[3];
let pid = parts[4].parse::<u32>().ok();
if state == "LISTENING" {
return None;
}
Some(ConnSocket {
proto: "tcp".to_string(),
local_addr: local.to_string(),
remote_addr: remote.to_string(),
state: Some(state.to_string()),
pid,
ppid: None,
process_name: None,
process_path: None,
})
}
fn parse_netstat_udp_line(line: &str) -> Option<ListenSocket> { fn parse_netstat_udp_line(line: &str) -> Option<ListenSocket> {
let parts: Vec<&str> = line.split_whitespace().collect(); let parts: Vec<&str> = line.split_whitespace().collect();
if parts.len() < 4 { if parts.len() < 4 {
@@ -429,6 +486,17 @@ fn enrich_socket(socket: &mut ListenSocket, map: &HashMap<u32, ProcInfo>) {
} }
} }
fn enrich_conn_socket(socket: &mut ConnSocket, map: &HashMap<u32, ProcInfo>) {
let pid = match socket.pid {
Some(pid) => pid,
None => return,
};
if let Some(info) = map.get(&pid) {
socket.process_name = info.name.clone();
socket.process_path = info.path.clone();
}
}
#[derive(Clone)] #[derive(Clone)]
struct ProcInfo { struct ProcInfo {
name: Option<String>, name: Option<String>,
@@ -515,6 +583,155 @@ fn parse_csv_line(line: &str) -> Vec<String> {
out out
} }
#[derive(Clone)]
struct FlowEntry {
proto: FlowProtocol,
local: SocketAddr,
remote: Option<SocketAddr>,
pid: u32,
}
fn parse_netstat_flow_entries() -> Result<Vec<FlowEntry>, PlatformError> {
let output = std::process::Command::new("netstat")
.arg("-ano")
.output()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
if !output.status.success() {
return Err(PlatformError::new(ErrorCode::IoError, "netstat -ano failed"));
}
let text = String::from_utf8_lossy(&output.stdout);
let mut entries = Vec::new();
for line in text.lines() {
let trimmed = line.trim();
if trimmed.starts_with("TCP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 5 {
continue;
}
let state = parts[3];
if state == "LISTENING" {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let remote = match parse_netstat_addr(parts[2]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[4].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Tcp,
local,
remote: Some(remote),
pid,
});
} else if trimmed.starts_with("UDP") {
let parts: Vec<&str> = trimmed.split_whitespace().collect();
if parts.len() < 4 {
continue;
}
let local = match parse_netstat_addr(parts[1]) {
Some(addr) => addr,
None => continue,
};
let pid = match parts[3].parse::<u32>() {
Ok(pid) => pid,
Err(_) => continue,
};
entries.push(FlowEntry {
proto: FlowProtocol::Udp,
local,
remote: None,
pid,
});
}
}
Ok(entries)
}
fn parse_netstat_addr(value: &str) -> Option<SocketAddr> {
let value = value.trim();
if value == "*:*" {
return None;
}
if let Some(rest) = value.strip_prefix('[') {
let end = rest.find(']')?;
let host = &rest[..end];
let port = rest[end + 2..].parse::<u16>().ok()?;
let host = host.split('%').next().unwrap_or(host);
let ip: IpAddr = host.parse().ok()?;
return Some(SocketAddr::new(ip, port));
}
let pos = value.rfind(':')?;
let host = &value[..pos];
let port = value[pos + 1..].parse::<u16>().ok()?;
let ip: IpAddr = host.parse().ok()?;
Some(SocketAddr::new(ip, port))
}
fn resolve_flow_owner(flow: &FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
let entries = parse_netstat_flow_entries()?;
let proc_map = load_windows_process_map();
let mut matched: Option<(u32, FlowOwnerConfidence)> = None;
for entry in entries {
if entry.proto != flow.proto {
continue;
}
let local_match = entry.local.ip() == flow.src_ip && entry.local.port() == flow.src_port;
if !local_match {
continue;
}
match flow.proto {
FlowProtocol::Tcp => {
if let Some(remote) = entry.remote {
if remote.ip() == flow.dst_ip && remote.port() == flow.dst_port {
matched = Some((entry.pid, FlowOwnerConfidence::High));
break;
}
}
}
FlowProtocol::Udp => {
matched = Some((entry.pid, FlowOwnerConfidence::Medium));
break;
}
}
}
let (pid, confidence) = match matched {
Some(value) => value,
None => {
return Ok(FlowOwnerResult {
owner: None,
confidence: FlowOwnerConfidence::None,
failure_reason: Some("no socket match".to_string()),
})
}
};
let info = proc_map.get(&pid);
let owner = Some(FlowOwner {
pid: Some(pid),
ppid: None,
process_name: info.and_then(|value| value.name.clone()),
process_path: info.and_then(|value| value.path.clone()),
});
Ok(FlowOwnerResult {
owner,
confidence,
failure_reason: None,
})
}
fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> { fn load_native_roots(store: &str) -> Result<Vec<RootCert>, PlatformError> {
let certs = rustls_native_certs::load_native_certs() let certs = rustls_native_certs::load_native_certs()
.map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?; .map_err(|err| PlatformError::new(ErrorCode::IoError, err.to_string()))?;
@@ -605,6 +822,10 @@ impl PortsProvider for WindowsPortsProvider {
.filter(|socket| extract_port(&socket.local_addr) == Some(port)) .filter(|socket| extract_port(&socket.local_addr) == Some(port))
.collect()) .collect())
} }
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError> {
parse_windows_connections()
}
} }
#[async_trait] #[async_trait]
@@ -628,3 +849,10 @@ impl NeighProvider for WindowsNeighProvider {
Ok(parse_arp_output(&text)) Ok(parse_arp_output(&text))
} }
} }
#[async_trait]
impl FlowOwnerProvider for WindowsFlowOwnerProvider {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError> {
resolve_flow_owner(&flow)
}
}

View File

@@ -1,5 +1,6 @@
use async_trait::async_trait; use async_trait::async_trait;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::net::IpAddr;
use std::sync::Arc; use std::sync::Arc;
use wtfnet_core::ErrorCode; use wtfnet_core::ErrorCode;
@@ -46,6 +47,18 @@ pub struct ListenSocket {
pub owner: Option<String>, pub owner: Option<String>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ConnSocket {
pub proto: String,
pub local_addr: String,
pub remote_addr: String,
pub state: Option<String>,
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RootCert { pub struct RootCert {
pub subject: String, pub subject: String,
@@ -68,6 +81,46 @@ pub struct NeighborEntry {
pub state: Option<String>, pub state: Option<String>,
} }
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq)]
#[serde(rename_all = "lowercase")]
pub enum FlowProtocol {
Udp,
Tcp,
}
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum FlowOwnerConfidence {
High,
Medium,
Low,
None,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwner {
pub pid: Option<u32>,
pub ppid: Option<u32>,
pub process_name: Option<String>,
pub process_path: Option<String>,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct FlowOwnerResult {
pub owner: Option<FlowOwner>,
pub confidence: FlowOwnerConfidence,
pub failure_reason: Option<String>,
}
#[derive(Debug, Clone)]
pub struct FlowTuple {
pub proto: FlowProtocol,
pub src_ip: IpAddr,
pub src_port: u16,
pub dst_ip: IpAddr,
pub dst_port: u16,
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct PlatformError { pub struct PlatformError {
pub code: ErrorCode, pub code: ErrorCode,
@@ -98,6 +151,7 @@ pub trait SysProvider: Send + Sync {
pub trait PortsProvider: Send + Sync { pub trait PortsProvider: Send + Sync {
async fn listening(&self) -> Result<Vec<ListenSocket>, PlatformError>; async fn listening(&self) -> Result<Vec<ListenSocket>, PlatformError>;
async fn who_owns(&self, port: u16) -> Result<Vec<ListenSocket>, PlatformError>; async fn who_owns(&self, port: u16) -> Result<Vec<ListenSocket>, PlatformError>;
async fn connections(&self) -> Result<Vec<ConnSocket>, PlatformError>;
} }
#[async_trait] #[async_trait]
@@ -110,9 +164,15 @@ pub trait NeighProvider: Send + Sync {
async fn neighbors(&self) -> Result<Vec<NeighborEntry>, PlatformError>; async fn neighbors(&self) -> Result<Vec<NeighborEntry>, PlatformError>;
} }
#[async_trait]
pub trait FlowOwnerProvider: Send + Sync {
async fn owner_of(&self, flow: FlowTuple) -> Result<FlowOwnerResult, PlatformError>;
}
pub struct Platform { pub struct Platform {
pub sys: Arc<dyn SysProvider>, pub sys: Arc<dyn SysProvider>,
pub ports: Arc<dyn PortsProvider>, pub ports: Arc<dyn PortsProvider>,
pub cert: Arc<dyn CertProvider>, pub cert: Arc<dyn CertProvider>,
pub neigh: Arc<dyn NeighProvider>, pub neigh: Arc<dyn NeighProvider>,
pub flow_owner: Arc<dyn FlowOwnerProvider>,
} }

View File

@@ -35,6 +35,9 @@ pub struct TlsCertSummary {
pub not_before: String, pub not_before: String,
pub not_after: String, pub not_after: String,
pub san: Vec<String>, pub san: Vec<String>,
pub signature_algorithm: Option<String>,
pub key_usage: Option<Vec<String>>,
pub extended_key_usage: Option<Vec<String>>,
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@@ -45,6 +48,7 @@ pub struct TlsHandshakeReport {
pub alpn_negotiated: Option<String>, pub alpn_negotiated: Option<String>,
pub tls_version: Option<String>, pub tls_version: Option<String>,
pub cipher: Option<String>, pub cipher: Option<String>,
pub ocsp_stapled: Option<bool>,
pub cert_chain: Vec<TlsCertSummary>, pub cert_chain: Vec<TlsCertSummary>,
} }
@@ -56,6 +60,7 @@ pub struct TlsVerifyReport {
pub alpn_negotiated: Option<String>, pub alpn_negotiated: Option<String>,
pub tls_version: Option<String>, pub tls_version: Option<String>,
pub cipher: Option<String>, pub cipher: Option<String>,
pub ocsp_stapled: Option<bool>,
pub verified: bool, pub verified: bool,
pub error: Option<String>, pub error: Option<String>,
} }
@@ -64,6 +69,7 @@ pub struct TlsVerifyReport {
pub struct TlsCertReport { pub struct TlsCertReport {
pub target: String, pub target: String,
pub sni: Option<String>, pub sni: Option<String>,
pub ocsp_stapled: Option<bool>,
pub cert_chain: Vec<TlsCertSummary>, pub cert_chain: Vec<TlsCertSummary>,
} }
@@ -83,6 +89,8 @@ pub struct TlsOptions {
pub insecure: bool, pub insecure: bool,
pub socks5: Option<String>, pub socks5: Option<String>,
pub prefer_ipv4: bool, pub prefer_ipv4: bool,
pub show_extensions: bool,
pub ocsp: bool,
} }
pub async fn handshake(target: &str, options: TlsOptions) -> Result<TlsHandshakeReport, TlsError> { pub async fn handshake(target: &str, options: TlsOptions) -> Result<TlsHandshakeReport, TlsError> {
@@ -120,7 +128,8 @@ pub async fn handshake(target: &str, options: TlsOptions) -> Result<TlsHandshake
cipher: session cipher: session
.negotiated_cipher_suite() .negotiated_cipher_suite()
.map(|suite| format!("{suite:?}")), .map(|suite| format!("{suite:?}")),
cert_chain: extract_cert_chain(session.peer_certificates())?, ocsp_stapled: ocsp_status(session, options.ocsp),
cert_chain: extract_cert_chain(session.peer_certificates(), options.show_extensions)?,
}) })
} }
@@ -160,6 +169,7 @@ pub async fn verify(target: &str, options: TlsOptions) -> Result<TlsVerifyReport
cipher: session cipher: session
.negotiated_cipher_suite() .negotiated_cipher_suite()
.map(|suite| format!("{suite:?}")), .map(|suite| format!("{suite:?}")),
ocsp_stapled: ocsp_status(session, options.ocsp),
verified: true, verified: true,
error: None, error: None,
}) })
@@ -171,6 +181,7 @@ pub async fn verify(target: &str, options: TlsOptions) -> Result<TlsVerifyReport
alpn_negotiated: None, alpn_negotiated: None,
tls_version: None, tls_version: None,
cipher: None, cipher: None,
ocsp_stapled: None,
verified: false, verified: false,
error: Some(err.to_string()), error: Some(err.to_string()),
}), }),
@@ -203,7 +214,8 @@ pub async fn certs(target: &str, options: TlsOptions) -> Result<TlsCertReport, T
Ok(TlsCertReport { Ok(TlsCertReport {
target: target.to_string(), target: target.to_string(),
sni: options.sni, sni: options.sni,
cert_chain: extract_cert_chain(session.peer_certificates())?, ocsp_stapled: ocsp_status(session, options.ocsp),
cert_chain: extract_cert_chain(session.peer_certificates(), options.show_extensions)?,
}) })
} }
@@ -427,26 +439,41 @@ fn socks5_target_host(proxy: &str, host: &str) -> (String, bool) {
(host.to_string(), remote_dns) (host.to_string(), remote_dns)
} }
fn extract_cert_chain(certs: Option<&[Certificate]>) -> Result<Vec<TlsCertSummary>, TlsError> { fn extract_cert_chain(
certs: Option<&[Certificate]>,
show_extensions: bool,
) -> Result<Vec<TlsCertSummary>, TlsError> {
let mut results = Vec::new(); let mut results = Vec::new();
if let Some(certs) = certs { if let Some(certs) = certs {
for cert in certs { for cert in certs {
let summary = parse_cert(&cert.0)?; let summary = parse_cert(&cert.0, show_extensions)?;
results.push(summary); results.push(summary);
} }
} }
Ok(results) Ok(results)
} }
fn parse_cert(der: &[u8]) -> Result<TlsCertSummary, TlsError> { fn parse_cert(der: &[u8], show_extensions: bool) -> Result<TlsCertSummary, TlsError> {
let (_, cert) = let (_, cert) =
X509Certificate::from_der(der).map_err(|err| TlsError::Parse(err.to_string()))?; X509Certificate::from_der(der).map_err(|err| TlsError::Parse(err.to_string()))?;
let (key_usage, extended_key_usage, signature_algorithm) = if show_extensions {
(
extract_key_usage(&cert),
extract_extended_key_usage(&cert),
Some(cert.signature_algorithm.algorithm.to_string()),
)
} else {
(None, None, None)
};
Ok(TlsCertSummary { Ok(TlsCertSummary {
subject: cert.subject().to_string(), subject: cert.subject().to_string(),
issuer: cert.issuer().to_string(), issuer: cert.issuer().to_string(),
not_before: cert.validity().not_before.to_string(), not_before: cert.validity().not_before.to_string(),
not_after: cert.validity().not_after.to_string(), not_after: cert.validity().not_after.to_string(),
san: extract_san(&cert), san: extract_san(&cert),
signature_algorithm,
key_usage,
extended_key_usage,
}) })
} }
@@ -460,6 +487,85 @@ fn extract_san(cert: &X509Certificate<'_>) -> Vec<String> {
result result
} }
fn extract_key_usage(cert: &X509Certificate<'_>) -> Option<Vec<String>> {
let ext = cert.key_usage().ok()??;
let mut result = Vec::new();
if ext.value.digital_signature() {
result.push("digitalSignature".to_string());
}
if ext.value.non_repudiation() {
result.push("nonRepudiation".to_string());
}
if ext.value.key_encipherment() {
result.push("keyEncipherment".to_string());
}
if ext.value.data_encipherment() {
result.push("dataEncipherment".to_string());
}
if ext.value.key_agreement() {
result.push("keyAgreement".to_string());
}
if ext.value.key_cert_sign() {
result.push("keyCertSign".to_string());
}
if ext.value.crl_sign() {
result.push("cRLSign".to_string());
}
if ext.value.encipher_only() {
result.push("encipherOnly".to_string());
}
if ext.value.decipher_only() {
result.push("decipherOnly".to_string());
}
if result.is_empty() {
None
} else {
Some(result)
}
}
fn extract_extended_key_usage(cert: &X509Certificate<'_>) -> Option<Vec<String>> {
let ext = cert.extended_key_usage().ok()??;
let mut result = Vec::new();
if ext.value.any {
result.push("any".to_string());
}
if ext.value.server_auth {
result.push("serverAuth".to_string());
}
if ext.value.client_auth {
result.push("clientAuth".to_string());
}
if ext.value.code_signing {
result.push("codeSigning".to_string());
}
if ext.value.email_protection {
result.push("emailProtection".to_string());
}
if ext.value.time_stamping {
result.push("timeStamping".to_string());
}
if ext.value.ocsp_signing {
result.push("ocspSigning".to_string());
}
for oid in &ext.value.other {
result.push(oid.to_string());
}
if result.is_empty() {
None
} else {
Some(result)
}
}
fn ocsp_status(_session: &rustls::ClientConnection, enabled: bool) -> Option<bool> {
if enabled {
None
} else {
None
}
}
struct NoVerifier; struct NoVerifier;
impl rustls::client::ServerCertVerifier for NoVerifier { impl rustls::client::ServerCertVerifier for NoVerifier {

69
docs/COMMANDS.md Normal file
View File

@@ -0,0 +1,69 @@
# WTFnet Commands
This document lists CLI commands and supported flags. Output defaults to text; use `--json` for structured output.
## Global flags
- `--json` / `--pretty`: emit JSON output (pretty-print if requested)
- `--no-color` / `--quiet`: disable ANSI colors / reduce stdout output
- `-v` / `-vv` / `--verbose`: increase log verbosity
- `--log-level <error|warn|info|debug|trace>`: set log level
- `--log-format <text|json>`: set log format
- `--log-file <path>`: write logs to file
- `NETTOOL_LOG_FILTER` or `RUST_LOG` can override log filters (ex: `maxminddb::decoder=debug`)
## sys
- `sys ifaces`: list network interfaces
- `sys ip` flags: `--all` (include link-local), `--iface <name>` (filter by interface)
- `sys route` flags: `--ipv4`, `--ipv6`, `--to <ip>` (filter by destination)
- `sys dns`: show DNS configuration
## ports
- `ports listen` flags: `--tcp`, `--udp`, `--port <n>` (filter by port)
- `ports who <port>`: find owning processes for a port
- `ports conns` flags: `--top <n>`, `--by-process` (summaries)
## neigh
- `neigh list` flags: `--ipv4`, `--ipv6`, `--iface <name>`
## cert
- `cert roots`: list trusted root certificates
- `cert baseline <path>`: write baseline JSON
- `cert diff <path>`: diff against baseline JSON
## geoip
- `geoip lookup <ip>`: lookup GeoIP
- `geoip status`: show GeoIP database status
## probe
- `probe ping <host>` flags: `--count <n>`, `--timeout-ms <n>`, `--interval-ms <n>`, `--no-geoip`
- `probe tcping <host:port>` flags: `--count <n>`, `--timeout-ms <n>`, `--socks5 <url>`, `--prefer-ipv4`, `--no-geoip`
- `probe trace <host>` flags: `--max-hops <n>`, `--per-hop <n>`, `--timeout-ms <n>`, `--udp`, `--port <n>`, `--rdns`, `--no-geoip`
## dns
- `dns query <domain> <type>` flags: `--server <ip[:port]>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--timeout-ms <n>`
- `dns detect <domain>` flags: `--servers <csv>`, `--transport <udp|tcp|dot|doh>`, `--tls-name <name>`, `--socks5 <url>`, `--prefer-ipv4`, `--repeat <n>`, `--timeout-ms <n>`
- `dns watch` flags: `--duration <Ns|Nms>`, `--follow` (run until Ctrl-C), `--iface <name>`, `--filter <pattern>`
- `dns leak status` flags: `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`
- `dns leak watch` flags: `--duration <Ns|Nms>`, `--follow` (run until Ctrl-C), `--iface <name>`, `--profile <full-tunnel|proxy-stub|split>`, `--policy <path>`, `--privacy <full|redacted|minimal>`, `--out <path>`, `--summary-only`, `--iface-diag` (list capture-capable interfaces)
- `dns leak report` flags: `<path>`, `--privacy <full|redacted|minimal>`
## http
- `http head|get <url>` flags: `--timeout-ms <n>`, `--follow-redirects <n>`, `--show-headers`, `--show-body`, `--max-body-bytes <n>`, `--http1-only`, `--http2-only`, `--http3` (required feature `http3`), `--http3-only` (required feature `http3`), `--geoip`, `--socks5 <url>`
## tls
- `tls handshake|cert|verify|alpn <host:port>` flags: `--sni <name>`, `--alpn <csv>`, `--timeout-ms <n>`, `--insecure`, `--socks5 <url>`, `--prefer-ipv4`, `--show-extensions`, `--ocsp`
## discover
- `discover mdns` flags: `--duration <Ns|Nms>`, `--service <type>`
- `discover ssdp` flags: `--duration <Ns|Nms>`
- `discover llmnr` flags: `--duration <Ns|Nms>`, `--name <host>`
- `discover nbns` flags: `--duration <Ns|Nms>`
## diag
- `diag` flags: `--out <path>`, `--bundle <path>`, `--dns-detect <domain>`, `--dns-timeout-ms <n>`, `--dns-repeat <n>`
## calc
- `calc subnet <cidr>|<ip> <mask>`
- `calc contains <a> <b>`
- `calc overlap <a> <b>`
- `calc summarize <cidr...>`

View File

@@ -0,0 +1,181 @@
# DNS Leak Detector - Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
`--follow` keeps the watch running by resolving the duration to a large
placeholder (one year in milliseconds) and then racing the watch against
`tokio::signal::ctrl_c()`; Ctrl-C returns early with a clean exit code so the
outer loop stops.
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output

View File

@@ -10,7 +10,7 @@ v0.3.0 focuses on improving diagnostic depth and fidelity of existing commands r
Major upgrades in this release: Major upgrades in this release:
- richer traceroute output and per-hop statistics - richer traceroute output and per-hop statistics
- HTTP timing breakdown accuracy (connect/tls stages) - HTTP timing breakdown accuracy (connect/tls stages)
- optional HTTP/3 support (best-effort) - optional HTTP/3 support (feature-gated; experimental)
- TLS diagnostics upgrades (OCSP stapling indicator, richer certificate parsing) - TLS diagnostics upgrades (OCSP stapling indicator, richer certificate parsing)
- ports connections view and summaries - ports connections view and summaries
- improved cert baseline/diff for system roots - improved cert baseline/diff for system roots
@@ -67,7 +67,7 @@ Acceptance:
- on timeout / failure, partial timing must still be meaningful. - on timeout / failure, partial timing must still be meaningful.
### 3.3 HTTP/3 (optional feature flag) (SHOULD) ### 3.3 HTTP/3 (optional feature flag) (SHOULD)
Current: HTTP/3 not implemented. Current: feature-gated HTTP/3 path exists but is incomplete; keep disabled in default builds.
Target: Target:
- add `--http3` support behind Cargo feature `http3` - add `--http3` support behind Cargo feature `http3`
- behavior: - behavior:
@@ -79,6 +79,7 @@ Target:
Acceptance: Acceptance:
- builds without `http3` feature still work - builds without `http3` feature still work
- with feature enabled, HTTP/3 works on at least one known compatible endpoint - with feature enabled, HTTP/3 works on at least one known compatible endpoint
- documented as experimental until stabilized
### 3.4 TLS extras: OCSP + richer cert parsing (MUST) ### 3.4 TLS extras: OCSP + richer cert parsing (MUST)
Current: `tls handshake/verify/cert/alpn` exists. Current: `tls handshake/verify/cert/alpn` exists.

154
docs/RELEASE_v0.4.0.md Normal file
View File

@@ -0,0 +1,154 @@
# WTFnet v0.4.0 - DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic -> classify (Plain DNS / DoT / DoH) ->
enrich with interface/route/process metadata -> evaluate leak definitions (A/B/C/D) ->
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / kill switch management (detection only)
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple -> process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--out <path>` (write JSON report/events)
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS -> subsequent TCP/TLS connection) for Leak-D mismatch indicator
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands

View File

@@ -3,42 +3,43 @@
This is a practical checklist to execute v0.3.0. This is a practical checklist to execute v0.3.0.
## 1) probe/trace upgrades ## 1) probe/trace upgrades
- [ ] add `--per-hop <n>` and store RTT samples per hop - [x] add `--per-hop <n>` and store RTT samples per hop
- [ ] compute loss% per hop - [x] compute loss% per hop
- [ ] add `--rdns` best-effort reverse lookup (cached + time-bounded) - [x] add `--rdns` best-effort reverse lookup (cached + time-bounded)
- [ ] improve hop formatting + JSON schema - [x] improve hop formatting + JSON schema
## 2) http timing improvements ## 2) http timing improvements
- [ ] implement `connect_ms` and `tls_ms` timing - [x] implement `connect_ms` and `tls_ms` timing
- [ ] report `null` + warning when measurement unavailable - [x] report `null` + warning when measurement unavailable
- [ ] keep current `dns_ms` and `ttfb_ms` - [ ] keep current `dns_ms` and `ttfb_ms`
## 3) optional HTTP/3 ## 3) tls extras
- [ ] add `http3` cargo feature + deps - [x] add OCSP stapling presence indicator (if available)
- [ ] implement `--http3` / `--http3-only` - [x] parse SANs and key usage / EKU best-effort
- [ ] define error classification for QUIC failures - [x] add `--show-extensions` and `--ocsp` flags
## 4) tls extras ## 4) ports conns
- [ ] add OCSP stapling presence indicator (if available) - [x] implement `wtfn ports conns`
- [ ] parse SANs and key usage / EKU best-effort - [x] add `--top <n>` and `--by-process`
- [ ] add `--show-extensions` and `--ocsp` flags - [x] best-effort PID mapping with warnings
## 5) ports conns ## 5) cert baseline/diff improvements
- [ ] implement `wtfn ports conns` - [x] baseline schema version
- [ ] add `--top <n>` and `--by-process` - [x] match by SHA256 fingerprint
- [ ] best-effort PID mapping with warnings - [x] diff categories: add/remove/expired/changed
## 6) cert baseline/diff improvements ## 6) optional LLMNR/NBNS
- [ ] baseline schema version - [x] implement `discover llmnr`
- [ ] match by SHA256 fingerprint - [x] implement `discover nbns`
- [ ] diff categories: add/remove/expired/changed - [x] bounded collection, low-noise
## 7) optional LLMNR/NBNS ## 7) docs updates
- [ ] implement `discover llmnr` - [x] update README roadmap
- [ ] implement `discover nbns` - [x] update COMMANDS.md with new flags/commands
- [ ] bounded collection, low-noise - [x] add RELEASE_v0.3.0.md
## 8) docs updates ## 8) optional HTTP/3 (last)
- [ ] update README roadmap - [x] add `http3` cargo feature + deps
- [ ] update COMMANDS.md with new flags/commands - [x] implement `--http3` / `--http3-only`
- [ ] add RELEASE_v0.3.0.md - [x] define error classification for QUIC failures
- [x] keep feature disabled in default builds until stabilized

33
docs/WORK_ITEMS_v0.4.0.md Normal file
View File

@@ -0,0 +1,33 @@
# WTFnet v0.4.0 - Work Items
This is a practical checklist to execute v0.4.0.
## 1) platform flow ownership
- [x] add FlowOwnerProvider trait + data types
- [x] implement Linux best-effort lookup
- [x] implement Windows best-effort lookup
## 2) new wtfnet-dnsleak crate
- [x] crate scaffold + pcap feature
- [x] UDP/TCP 53 capture + classify
- [x] DoT detection (TCP 853)
- [x] policy model + profiles
- [x] leak rules A/B/C (partial)
- [x] privacy modes
- [x] report + summary builder
## 3) CLI wiring
- [x] add `dns leak status`
- [x] add `dns leak watch`
- [x] add `dns leak report`
## 4) docs updates
- [x] add `docs/RELEASE_v0.4.0.md`
- [x] add `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
- [x] update README roadmap + flags
- [x] update COMMANDS.md
- [x] update status/implementation docs
## 5) follow-ups
- [ ] add DoH heuristic classification (optional)
- [x] add Leak-D mismatch correlation (optional)

View File

@@ -0,0 +1,723 @@
Below is a **high-level (language-agnostic)** design for a **client-side DNS leak detector** aimed at *censorship-resistance threat models*, i.e.:
> “Censor/ISP can observe/log DNS intent or infer proxy usage; we want to detect when DNS behavior escapes the intended protection path.”
Ill cover: **definitions**, **detection standards**, **workflow**, **modules**, **passive+active detection**, **outputs**, and **test methodology**.
---
# 1) Scope and goals
## Goals
Your detector should answer, with evidence:
1. **Did any DNS query leave the device outside the intended safe path?**
2. **Which domains leaked?** (when visible)
3. **Which transport leaked?** (UDP/53, TCP/53, DoT/853, DoH)
4. **Which interface leaked?** (Wi-Fi/Ethernet vs tunnel)
5. **Which process/app triggered it?** (if your OS allows attribution)
And in your censorship model, it should also detect:
6. **Split-policy intent leakage**: “unknown/sensitive domains were resolved using domestic/ISP-facing DNS.”
## Non-goals (be explicit)
* Not a censorship circumvention tool itself
* Not a full firewall manager (can suggest fixes, but detection is the core)
* Not perfect attribution on every OS (process mapping may be partial)
---
# 2) Define “DNS leak” precisely (your programs standard)
You need a **formal definition** because “DNS leak” is overloaded.
## Standard definition A (classic VPN / tunnel bypass)
A leak occurs if:
> **An unencrypted DNS query is sent outside the secure tunnel path**
> This is essentially how popular leak test sites define it (“unencrypted DNS query sent OUTSIDE the established VPN tunnel”). ([IP Leak][1])
Your detector should implement it in a machine-checkable way:
**Leak-A condition**
* DNS over **UDP/53 or TCP/53**
* Destination is **not** a “trusted resolver path” (e.g., not the tunnel interface, not loopback stub, not proxy channel)
* Interface is **not** the intended egress
✅ Strong for censorship: plaintext DNS exposes intent.
---
## Standard definition B (split-policy intent leak)
A leak occurs if:
> **A domain that should be “proxied / remote-resolved” was queried via local/ISP-facing DNS.**
This is the “proxy split rules still leak intent” case.
**Leak-B condition**
* Query name matches either:
* a “proxy-required set” (sensitive list, non-allowlist, unknown), or
* a policy rule (“everything except allowlist must resolve via proxy DNS”)
* And the query was observed going to:
* ISP resolver(s) / domestic resolver(s) / non-tunnel interface
✅ This is the leak most users in censorship settings care about.
---
## Standard definition C (encrypted DNS escape / bypass)
A leak occurs if:
> DNS was encrypted, but escaped the intended channel (e.g., app uses its own DoH directly to the Internet).
This matters because DoH hides the QNAME but still creates **observable behavior** and breaks your “DNS must follow proxy” invariant.
**Leak-C condition**
* DoH (RFC 8484) ([IETF Datatracker][2]) or DoT (RFC 7858) ([IETF Datatracker][3]) flow exists
* And it does **not** go through your approved egress path (tunnel/proxy)
✅ Detects “Firefox/Chrome built-in DoH bypass” style cases.
---
## Standard definition D (mismatch risk indicator)
Not a “leak” by itself, but a **proxy inference amplifier**:
> DNS egress region/path differs from traffic egress region/path.
This is a *censorship-resistance hygiene metric*, not a binary leak.
**Mismatch condition**
* Same domain produces:
* DNS resolution via path X
* TCP/TLS connection via path Y
* Where X ≠ Y (interface, ASN region, etc.)
✅ Helps catch “DNS direct, traffic proxy” or “DNS proxy, traffic direct” weirdness.
---
# 3) High-level architecture
## Core components
1. **Policy & Configuration**
* What counts as “safe DNS path”
* Which interfaces are “protected” (tunnel) vs “physical”
* Allowlist / proxy-required sets (optional)
* Known resolver lists (optional)
* Severity thresholds
2. **Traffic Sensor (Passive Monitor)**
* Captures outbound traffic metadata (and optionally payload for DNS parsing)
* Must cover:
* UDP/53, TCP/53
* TCP/853 (DoT)
* HTTPS flows that look like DoH (see below)
* Emits normalized events into a pipeline
3. **Classifier**
* Recognize DNS protocol types:
* Plain DNS
* DoT
* DoH
* Attach confidence scores (especially for DoH)
4. **DNS Parser (for plaintext DNS only)**
* Extract: QNAME, QTYPE, transaction IDs, response codes (optional)
* Store minimally (privacy-aware)
5. **Flow Tracker**
* Correlate packets into “flows”
* Map flow → interface → destination → process (if possible)
* Track timing correlation: DNS → connection attempts
6. **Leak Detector (Rules Engine)**
* Apply Leak-A/B/C/D definitions
* Produce leak events + severity + evidence chain
7. **Active Prober**
* Generates controlled DNS lookups to test behavior
* Can test fail-closed, bypasses, multi-interface behavior, etc.
8. **Report Generator**
* Human-readable summary
* Machine-readable logs (JSON)
* Recommendations (non-invasive)
---
# 4) Workflow (end-to-end)
## Workflow 0: Setup & baseline
1. Enumerate interfaces and routes
* Identify physical NICs
* Identify tunnel / proxy interface (or “expected egress destinations”)
2. Identify system DNS configuration
* Default resolvers per interface
* Local stub presence (127.0.0.1, etc.)
3. Load policy profile
* Full-tunnel, split-tunnel, or proxy-based
4. Start passive monitor
**Output:** “Current state snapshot” (useful even before testing).
---
## Workflow 1: Passive detection loop (always-on)
Continuously:
1. Capture outbound packets/flows
2. Classify as DNS-like (plain DNS / DoT / DoH / unknown)
3. If plaintext DNS → parse QNAME/QTYPE
4. Assign metadata:
* interface
* dst IP/port
* process (if possible)
* timestamp
5. Evaluate leak rules:
* Leak-A/B/C/D
6. Write event log + optional real-time alert
**Key design point:** passive mode should be able to detect leaks **without requiring any special test domain**.
---
## Workflow 2: Active test suite (on-demand)
Active tests exist because some leaks are intermittent or only happen under stress.
### Active Test A: “No plaintext DNS escape”
* Trigger a set of DNS queries (unique random domains)
* Verify **zero UDP/53 & TCP/53** leaves physical interfaces
### Active Test B: “Fail-closed test”
* Temporarily disrupt the “protected path” (e.g., tunnel down)
* Trigger lookups again
* Expected: DNS fails (no fallback to ISP DNS)
### Active Test C: “App bypass test”
* Launch test scenarios that mimic real apps
* Confirm no direct DoH/DoT flows go to public Internet outside the proxy path
### Active Test D: “Split-policy correctness”
* Query domains that should be:
* direct-allowed
* proxy-required
* unknown
* Confirm resolution path matches policy
---
# 5) How to recognize DNS transports (detection mechanics)
## Plain DNS (strongest signal)
**Match conditions**
* UDP dst port 53 OR TCP dst port 53
* Parse DNS header
* Extract QNAME/QTYPE
**Evidence strength:** high
**Intent visibility:** yes (domain visible)
---
## DoT (port-based, easy)
DoT is defined over TLS, typically port **853**. ([IETF Datatracker][3])
**Match conditions**
* TCP dst port 853
* Optionally confirm TLS handshake exists
**Evidence strength:** high
**Intent visibility:** no (domain hidden)
---
## DoH (harder; heuristic + optional allowlists)
DoH is DNS over HTTPS (RFC 8484). ([IETF Datatracker][2])
**Recognizers (from strongest to weakest):**
1. HTTP request with `Content-Type: application/dns-message`
2. Path/pattern common to DoH endpoints (optional list)
3. SNI matches known DoH providers (optional list)
4. Traffic resembles frequent small HTTPS POST/GET bursts typical of DoH (weak)
**Evidence strength:** medium
**Intent visibility:** no (domain hidden)
**Important for your use-case:** you may not need to *prove* its DoH; you mostly need to detect “DNS-like encrypted resolver traffic bypassing the proxy channel.”
---
# 6) Policy model: define “safe DNS path”
You need a simple abstraction users can configure:
### Safe DNS path can be defined by one or more of:
* **Allowed interfaces**
* loopback (local stub)
* tunnel interface
* **Allowed destination set**
* proxy server IP(s)
* internal resolver IP(s)
* **Allowed process**
* only your local stub + proxy allowed to resolve externally
* **Allowed port set**
* maybe only permit 443 to proxy server (if DNS rides inside it)
Then implement:
**A DNS event is a “leak” if it violates safe-path constraints.**
---
# 7) Leak severity model (useful for real-world debugging)
### Severity P0 (critical)
* Plaintext DNS (UDP/TCP 53) on physical interface to ISP/public resolver
* Especially if QNAME matches proxy-required/sensitive list
### Severity P1 (high)
* DoH/DoT bypassing proxy channel directly to public Internet
### Severity P2 (medium)
* Policy mismatch: domain resolved locally but connection later proxied (or vice versa)
### Severity P3 (low / info)
* Authoritative-side “resolver egress exposure” (less relevant for client-side leak detector)
* CDN performance mismatch indicators
---
# 8) Outputs and reporting
## Real-time console output (for debugging)
* “DNS leak detected: Plain DNS”
* domain (if visible)
* destination resolver IP
* interface
* process name (if available)
* policy rule violated
* suggested fix category (e.g., “force stub + block port 53”)
## Forensics log (machine-readable)
A single **LeakEvent** record could include:
* timestamp
* leak_type (A/B/C/D)
* transport (UDP53, TCP53, DoT, DoH)
* qname/qtype (nullable)
* src_iface / dst_ip / dst_port
* process_id/process_name (nullable)
* correlation_id (link DNS → subsequent connection attempt)
* confidence score (esp. DoH)
* raw evidence pointers (pcap offsets / event IDs)
## Summary report
* Leak counts by type
* Top leaking processes
* Top leaking resolver destinations
* Timeline view (bursts often indicate OS fallback behavior)
* “Pass/Fail” per policy definition
---
# 9) Validation strategy (“how do I know my detector is correct?”)
## Ground truth tests
1. **Known-leak scenario**
* intentionally set OS DNS to ISP DNS, no tunnel
* detector must catch plaintext DNS
2. **Known-safe scenario**
* local stub only + blocked outbound 53/853
* detector should show zero leaks
3. **Bypass scenario**
* enable browser built-in DoH directly
* detector should catch encrypted resolver bypass (Leak-C)
4. **Split-policy scenario**
* allowlist CN direct, everything else proxy-resolve
* detector should show:
* allowlist resolved direct
* unknown resolved via proxy path
---
# 10) Recommended “profiles” (makes tool usable)
Provide built-in presets:
### Profile 1: Full-tunnel VPN
* allow DNS only via tunnel interface or loopback stub
* any UDP/TCP 53 on physical NIC = leak
### Profile 2: Proxy + local stub (your case)
* allow DNS only to loopback stub
* allow stub upstream only via proxy server destinations
* flag any direct DoH/DoT to public endpoints
### Profile 3: Split tunnel (geoip + allowlist)
* allow plaintext DNS **only** for allowlisted domains (if user accepts risk)
* enforce “unknown → proxy-resolve”
* emphasize Leak-B correctness
---
Below is an updated **high-level design** (still language-agnostic) that integrates **process attribution** cleanly, including how it fits into the workflow and what to log.
---
# 1) New component: Process Attribution Engine (PAE)
## Purpose
When a DNS-like event is observed, the PAE tries to attach:
* **PID**
* **PPID**
* **process name**
* *(optional but extremely useful)* full command line, executable path, user, container/app package, etc.
This lets your logs answer:
> “Which program generated the leaked DNS request?”
> “Was it a browser, OS service, updater, antivirus, proxy itself, or some library?”
## Position in the pipeline
It sits between **Traffic Sensor** and **Leak Detector** as an “event enricher”:
**Traffic Event → (Classifier) → (Process Attribution) → Enriched Event → Leak Rules → Report**
---
# 2) Updated architecture (with process attribution)
### Existing modules (from earlier design)
1. Policy & Configuration
2. Traffic Sensor (packet/flow monitor)
3. Classifier (Plain DNS / DoT / DoH / Unknown)
4. DNS Parser (plaintext only)
5. Flow Tracker
6. Leak Detector (rules engine)
7. Active Prober
8. Report Generator
### New module
9. **Process Attribution Engine (PAE)**
* resolves “who owns this flow / packet”
* emits PID/PPID/name
* handles platform-specific differences and fallbacks
---
# 3) Workflow changes (what happens when a potential leak is seen)
## Passive detection loop (updated)
1. Capture outbound traffic event
2. Classify transport type:
* UDP/53, TCP/53 → plaintext DNS
* TCP/853 → DoT
* HTTPS patterns → DoH (heuristic)
3. Extract the **5-tuple**
* src IP:port, dst IP:port, protocol
4. **PAE lookup**
* resolve the owner process for this traffic
* attach PID/PPID/name (+ optional metadata)
5. Apply leak rules (A/B/C/D)
6. Emit:
* realtime log line (human readable)
* structured record (JSON/event log)
---
# 4) Process attribution: what to detect and how (high-level)
Process attribution always works on one core concept:
> **Map observed traffic (socket/flow) → owning process**
### Inputs PAE needs
* protocol (UDP/TCP)
* local src port
* local address
* timestamp
* optionally: connection state / flow ID
### Output from PAE
* `pid`, `ppid`, `process_name`
* optional enrichment:
* `exe_path`
* `cmdline`
* `user`
* “process tree chain” (for debugging: parent → child → …)
---
# 5) Platform support strategy (without implementation detail)
Process attribution is **OS-specific**, so structure it as:
## “Attribution Provider” interface
* Provider A: “kernel-level flow owner”
* Provider B: “socket table owner lookup”
* Provider C: “event tracing feed”
* Provider D: fallback “unknown / not supported”
Your main design goal is:
### Design rule
**Attribution must be best-effort + gracefully degrading**, never blocking detection.
So you always log the leak even if PID is unavailable:
* `pid=null, attribution_confidence=LOW`
---
# 6) Attribution confidence + race handling (important!)
Attribution can be tricky because:
* a process may exit quickly (“short-lived resolver helper”)
* ports can be reused
* NAT or local proxies may obscure the real origin
So log **confidence**:
* **HIGH**: direct mapping from kernel/socket owner at time of event
* **MEDIUM**: mapping by lookup shortly after event (possible race)
* **LOW**: inferred / uncertain
* **NONE**: not resolved
Also record *why* attribution failed:
* “permission denied”
* “flow already gone”
* “unsupported transport”
* “ambiguous mapping”
This makes debugging much easier.
---
# 7) What PID/PPID adds to your leak definitions
### Leak-A (plaintext DNS outside safe path)
Now you can say:
> “`svchost.exe (PID 1234)` sent UDP/53 to ISP resolver on Wi-Fi interface”
### Leak-B (split-policy intent leak)
You can catch:
* “game launcher looked up blocked domain”
* “system service triggered a sensitive name unexpectedly”
* “your proxy itself isnt actually resolving via its own channel”
### Leak-C (encrypted DNS bypass)
This becomes *very actionable*:
> “`firefox.exe` started direct DoH to resolver outside tunnel”
### Leak-D (mismatch indicator)
You can also correlate:
* DNS resolved by one process
* connection made by another process
(e.g., local stub vs app)
---
# 8) Reporting / realtime logging format (updated)
## Realtime log line (human readable)
Example (conceptual):
* **[P0][Leak-A] Plain DNS leaked**
* Domain: `example-sensitive.com` (A)
* From: `Wi-Fi` → To: `1.2.3.4:53`
* Process: `browser.exe` **PID=4321 PPID=1200**
* Policy violated: “No UDP/53 on physical NIC”
## Structured event (JSON-style fields)
Minimum recommended fields:
### Event identity
* `event_id`
* `timestamp`
### DNS identity
* `transport` (udp53/tcp53/dot/doh/unknown)
* `qname` (nullable)
* `qtype` (nullable)
### Network path
* `interface_name`
* `src_ip`, `src_port`
* `dst_ip`, `dst_port`
* `route_class` (tunnel / physical / loopback)
### Process identity (your requested additions)
* `pid`
* `ppid`
* `process_name`
* optional:
* `exe_path`
* `cmdline`
* `user`
### Detection result
* `leak_type` (A/B/C/D)
* `severity` (P0..P3)
* `policy_rule_id`
* `attribution_confidence`
---
# 9) Privacy and safety notes (important in a DNS tool)
Because youre logging **domains** and **process command lines**, this becomes sensitive.
Add a “privacy mode” policy:
* **Full**: store full domain + cmdline
* **Redacted**: hash domain; keep TLD only; truncate cmdline
* **Minimal**: only keep leak counts + resolver IPs + process name
Also allow “capture window” (rotate logs, avoid giant histories).
---
# 10) UX feature: “Show me the process tree”
When a leak happens, a good debugger view is:
* `PID: foo (pid 1000)`
* `PPID: bar (pid 900)`
* `PPID: systemd/svchost/etc`
This is extremely useful to identify:
* browsers spawning helpers
* OS DNS services
* containerized processes
* update agents / telemetry daemons
So your report generator should support:
**Process chain rendering** (where possible)
---
# 11) Practical edge cases you should detect (with PID helping)
1. **Local stub is fine, upstream isnt**
* Your local resolver process leaks upstream plaintext DNS
2. **Browser uses its own DoH**
* process attribution immediately reveals it
3. **Multiple interfaces**
* a leak only happens on Wi-Fi but not Ethernet
4. **Kill-switch failure**
* when tunnel drops, PID shows which app starts leaking first
---

View File

@@ -0,0 +1,55 @@
# DNS Leak Detection - Implementation Status
This document tracks the current DNS leak detector implementation against the design in
`docs/dns_leak_detection_design.md` and `docs/requirement_docs_v0.4.md`.
## Implemented
- New `wtfnet-dnsleak` crate with passive capture (pcap feature).
- Transport classification:
- Plain DNS (UDP/53, TCP/53) with qname/qtype parsing.
- DoT (TCP/853) detection.
- DoH detection is not implemented (skipped for now).
- Leak rules:
- Leak-A (plaintext DNS outside safe path).
- Leak-B (split-policy intent leak based on proxy-required/allowlist domains).
- Leak-C (encrypted DNS bypass for DoT).
- Leak-D (basic mismatch: DNS response IP -> outbound TCP SYN on different route).
- Policy profiles: `full-tunnel`, `proxy-stub`, `split`.
- Privacy modes: full/redacted/minimal (redacts qname).
- Process attribution:
- Best-effort `FlowOwnerProvider` with Linux `/proc` and Windows `netstat` lookups.
- Confidence levels and failure reasons exposed in events.
- CLI commands:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
- `dns leak watch --iface-diag` (diagnostics for capture-capable interfaces).
- `dns leak watch --follow` runs until Ctrl-C by combining a long duration with
a `tokio::signal::ctrl_c()` early-exit path.
- Interface selection:
- per-interface open timeout to avoid capture hangs
- ordered scan prefers non-loopback + named ethernet/wlan and interfaces with IPs
- verbose logging of interface selection attempts + failures (use `-v` / `-vv`)
- overall watch timeout accounts for worst-case interface scan time
- Capture loop:
- receiver runs in a worker thread; main loop polls with a short timeout to avoid blocking
## Partially implemented
- Route/interface classification: heuristic only (loopback/tunnel/physical by iface name).
- Safe path matching: allowed ifaces/dests/ports/processes; no route-based policy.
## Not implemented (v0.4 backlog)
- DoH heuristic detection (SNI/endpoint list/traffic shape).
- GeoIP enrichment of leak events.
- Process tree reporting (PPID chain).
## Known limitations
- On Windows, pcap capture may require selecting a specific NPF interface; use
`dns leak watch --iface-diag` to list interfaces that can be opened.
- Leak-D test attempts on Windows did not fire; see test notes below.
## Test notes
- `dns leak watch --duration 8s --summary-only --iface <NPF>` captured UDP/53 and produced Leak-A.
- `dns leak watch --duration 15s --iface <NPF>` with scripted DNS query + TCP connect:
- UDP/53 query/response captured (Leak-A).
- TCP SYNs observed, but did not match cached DNS response IPs, so Leak-D did not trigger.

View File

@@ -9,21 +9,22 @@ This document tracks current implementation status against the original design i
- GeoIP: local GeoLite2 Country + ASN support. - GeoIP: local GeoLite2 Country + ASN support.
- Probe: ping/tcping/trace with GeoIP enrichment. - Probe: ping/tcping/trace with GeoIP enrichment.
- DNS: Hickory-based query/detect with best-effort heuristics. - DNS: Hickory-based query/detect with best-effort heuristics.
- HTTP: head/get via reqwest. - DNS leak detection: new passive monitor with policy and privacy modes (best-effort).
- HTTP: head/get via reqwest with best-effort timing breakdown and optional HTTP/3 (feature-gated).
- TLS: rustls-based handshake/verify/cert/alpn. - TLS: rustls-based handshake/verify/cert/alpn.
- Discover: mDNS/SSDP bounded collection. - Discover: mDNS/SSDP bounded collection plus LLMNR/NBNS (best-effort).
- Diag: bundle export in zip. - Diag: bundle export in zip.
## Deviations or refinements ## Deviations or refinements
- DNS adds DoT/DoH and SOCKS5 proxy support. - DNS adds DoT/DoH and SOCKS5 proxy support.
- HTTP/TLS/TCP ping include SOCKS5 proxy support. - HTTP/TLS/TCP ping include SOCKS5 proxy support.
- HTTP timing breakdown is best-effort: `dns_ms` and `ttfb_ms` are captured; `connect_ms`/`tls_ms` remain placeholders. - HTTP timing breakdown is best-effort: `dns_ms`/`ttfb_ms` are captured; `connect_ms`/`tls_ms` are measured via a separate probe and can be `null` with warnings.
- DNS watch uses `pnet` and is feature-gated as best-effort. - DNS watch uses `pnet` and is feature-gated as best-effort.
## Gaps vs design (as of now) ## Gaps vs design (as of now)
- HTTP/3 not implemented. - HTTP/3 is feature-gated and best-effort; not enabled in default builds.
- TLS verification is rustls-based (no OS-native verifier). - TLS verification is rustls-based (no OS-native verifier).
- Discover does not include LLMNR/NBNS. - DNS leak DoH detection is heuristic and currently optional.
## Current stage summary ## Current stage summary
- v0.1 scope is complete. - v0.1 scope is complete.

View File

@@ -0,0 +1,484 @@
Yep — your DNS leak detector design **fits your current architecture cleanly**, and **making it a dedicated subcrate is the right move**.
Your existing workspace already follows “thin CLI + feature crates + platform traits + OS dispatch”, so the detector drops in naturally as **one new feature crate** plus a small extension to `wtfnet-platform` for **best-effort process attribution**. This matches the designs pipeline idea: **Traffic Sensor → Classifier → (Process Attribution) → Leak Rules → Report**
Below Ill do two things:
1. **Evaluate fit + integration plan** (what needs changing / what can be reused)
2. Provide **repo-ready v0.4.0 documents**, focused on implementing this detector.
---
# 1) Can this design fit your current architecture?
## ✅ Yes — it matches your crate boundaries almost 1:1
Your detector design already breaks down into modules that map directly into a new subcrate:
* Policy & Configuration
* Passive Traffic Sensor (UDP53/TCP53/853/“DoH-ish”)
* Classifier (DNS / DoT / DoH + confidence)
* DNS Parser (plaintext only)
* Flow Tracker (interface/route/process correlation)
* Leak Detector rules A/B/C/D
* Report generator + structured events
So: **new crate = `wtfnet-dnsleak`**, and keep `wtfnet-dns` for **active query/detect/watch** (already exists).
## ✅ What you can reuse immediately
### Reuse from your current codebase
* `wtfnet-dns watch` capture plumbing (you already have passive-ish capture)
* existing DNS parsing logic (for UDP/TCP 53)
* existing GeoIP enrichment pipeline (optional)
* `wtfnet-platform` interface snapshot + routing info (for “which interface leaked?”)
* your JSON output envelope + logging style
### Reuse from the detector design directly
* Leak definitions A/B/C/D (this is already precise and CLI-tool friendly)
* DoH recognition levels + confidence model (strong → weak)
* “safe DNS path” abstraction (interfaces/dests/process/ports)
* process attribution confidence levels and failure reasons
* privacy modes (Full/Redacted/Minimal)
## ✅ What youll need to add (small + contained)
### 1) New platform trait for flow → process owner (best effort)
Your design explicitly wants PID/PPID/process name to show “who leaked DNS” , and it recommends an OS-specific provider interface .
So extend `wtfnet-platform` with something like:
* `FlowOwnerProvider::owner_of(tuple, timestamp) -> ProcessInfo + confidence`
This stays consistent with your existing “platform traits + OS dispatch” architecture.
### 2) Route/interface classification (“tunnel vs physical vs loopback”)
Your event schema wants `route_class` (tunnel/physical/loopback) .
Implement this via **policy-configured interface labels** + a small heuristic fallback.
### 3) DoH detection will be heuristic (and thats OK)
Your design already plans for this: DoH classification is “hard”, confidence-based .
In v0.4, keep it simple:
* DoT = TCP/853
* Plain DNS = UDP/TCP 53 (+ parse QNAME)
* “DoH-ish” = TCP/443 to known resolver host/IP OR “small HTTPS bursts” pattern
…and attach confidence.
---
# 2) v0.4.0 documents (repo-ready)
Below are two docs you can drop into `docs/`.
---
## `docs/RELEASE_v0.4.0.md`
```markdown
# WTFnet v0.4.0 — DNS Leak Detection
v0.4.0 introduces a client-side DNS leak detector aimed at censorship-resistance threat models:
detect when DNS behavior escapes the intended safe path. The detector focuses on evidence:
transport, interface, destination, and (best-effort) process attribution.
This release does NOT include HTTP/3 or OS-native TLS verification.
---
## 0) Summary
New major capability: `dns leak` command group.
Core idea:
Passive monitor captures outbound DNS-like traffic → classify (Plain DNS / DoT / DoH) →
enrich with interface/route/process metadata → evaluate leak definitions (A/B/C/D) →
emit events + summary report.
Leak definitions are explicit:
- Leak-A: plaintext DNS outside safe path
- Leak-B: split-policy intent leak (proxy-required domains resolved via ISP/local path)
- Leak-C: encrypted DNS escape/bypass (DoH/DoT outside approved egress)
- Leak-D: mismatch risk indicator (DNS egress differs from TCP/TLS egress)
---
## 1) Goals
### G1. Detect DNS leaks without needing special test domains
Passive detection must work continuously and produce evidence.
### G2. Support censorship-resistance leak definitions
Include both classic VPN-bypass leaks and split-policy intent leaks.
### G3. Best-effort process attribution
Attach PID/PPID/process name when OS allows; degrade gracefully with confidence.
### G4. Privacy-aware by default
Support privacy modes: Full / Redacted / Minimal.
---
## 2) Non-goals (v0.4.0)
- No "doctor" / smart one-shot diagnosis command
- No shell completions / man pages
- No HTTP/3 support
- No OS-native TLS verifier integration
- No firewall modification / "kill switch" management (detection only)
---
## 3) New crates / architecture changes
### 3.1 New subcrate: `wtfnet-dnsleak`
Responsibilities:
- passive sensor (pcap/pnet feature-gated)
- DNS parser (plaintext only)
- transport classifier: udp53/tcp53/dot/doh (confidence)
- flow tracker + metadata enrichment
- process attribution integration
- leak rules engine (A/B/C/D)
- structured event + summary report builder
### 3.2 `wtfnet-platform` extension: flow ownership lookup
Add a new trait:
- FlowOwnerProvider: map observed traffic 5-tuple → process info (best-effort)
Return process attribution confidence:
HIGH/MEDIUM/LOW/NONE plus failure reason.
---
## 4) CLI scope
### 4.1 Commands
New command group:
#### `wtfn dns leak watch`
Start passive monitoring for a bounded duration (default 10s):
- classify transports (udp53/tcp53/dot/doh)
- apply leak rules and emit events + summary
#### `wtfn dns leak status`
Print baseline snapshot:
- interfaces + routes
- system DNS configuration
- active policy summary
#### `wtfn dns leak report`
Parse a saved events file and produce a human summary.
### 4.2 Flags (proposed)
Common:
- `--duration <Ns|Nms>` (default 10s)
- `--iface <name>` (optional capture interface)
- `--policy <path>` (JSON policy file)
- `--profile <full-tunnel|proxy-stub|split>` (built-in presets)
- `--privacy <full|redacted|minimal>` (default redacted)
- `--geoip` (include GeoIP in event outputs)
- `--out <path>` (write JSON report/events)
---
## 5) Policy model (v0.4.0)
Safe DNS path constraints can be defined by:
- allowed interfaces: loopback/tunnel
- allowed destination set: proxy IPs, internal resolvers
- allowed processes: only local stub/proxy can resolve upstream
- allowed ports: e.g. only 443 to proxy server
A DNS event is a leak if it violates safe-path constraints.
Built-in profiles:
1) full-tunnel VPN style
2) proxy + local stub (default, censorship model)
3) split policy
---
## 6) Outputs
### 6.1 Leak events (structured)
Each LeakEvent includes:
- timestamp
- transport: udp53/tcp53/dot/doh/unknown
- qname/qtype (nullable)
- interface + route_class
- dst ip:port
- process info (nullable) + attribution confidence
- leak_type: A/B/C/D
- severity: P0..P3
- evidence fields + optional geoip
### 6.2 Summary report
- leak counts by type
- top leaking processes (if available)
- top resolver destinations
- timeline/burst hints
---
## 7) Deliverables checklist
MUST:
- new `wtfnet-dnsleak` crate integrated into workspace + CLI
- passive capture for UDP/TCP 53 and TCP 853
- DoH heuristic classification (confidence-based)
- policy engine + Leak-A/B/C/D rules
- structured events + human summary
- privacy modes full/redacted/minimal
- best-effort process attribution with confidence and failure reason
SHOULD:
- saved report file support (`--out report.json`)
- route_class inference with policy hints + heuristics
NICE:
- correlation_id (DNS → subsequent TCP/TLS connection) for Leak-D mismatch indicator
---
## 8) Definition of Done
- v0.4.0 builds on Linux (Debian/Ubuntu) and Windows
- `wtfn dns leak watch` detects:
- plaintext DNS leaving physical interface (Leak-A)
- DoT traffic leaving outside approved egress (Leak-C)
- DoH-ish encrypted resolver traffic outside policy (Leak-C)
- events include interface + dst + (best-effort) PID/process info
- output remains stable and additive; no breaking change to v0.3 commands
```
---
## `docs/DNS_LEAK_DETECTOR_IMPLEMENTATION.md`
```markdown
# DNS Leak Detector — Implementation Guide (v0.4)
This document explains how to implement the DNS leak detector as a new subcrate in WTFnet.
---
## 1) New crate: `wtfnet-dnsleak`
### 1.1 Module layout
crates/wtfnet-dnsleak/src/
- lib.rs
- policy.rs # safe path constraints + presets
- sensor.rs # passive capture -> normalized TrafficEvent stream
- classify.rs # transport classification + confidence
- parse_dns.rs # plaintext DNS parser: qname/qtype
- attrib.rs # process attribution integration (platform provider)
- route.rs # interface/route classification (tunnel/physical/loopback)
- rules.rs # Leak-A/B/C/D evaluation
- report.rs # LeakEvent + SummaryReport builders
- privacy.rs # full/redacted/minimal redaction logic
---
## 2) Core data types
### 2.1 TrafficEvent (raw from sensor)
Fields:
- ts: timestamp
- proto: udp/tcp
- src_ip, src_port
- dst_ip, dst_port
- iface_name (capture interface if known)
- payload: optional bytes (only for plaintext DNS parsing)
### 2.2 ClassifiedEvent
Adds:
- transport: udp53/tcp53/dot/doh/unknown
- doh_confidence: HIGH/MEDIUM/LOW (only if doh)
- qname/qtype: nullable
### 2.3 EnrichedEvent
Adds:
- route_class: loopback/tunnel/physical/unknown
- process info: pid/ppid/name (nullable)
- attribution_confidence: HIGH/MEDIUM/LOW/NONE
- attrib_failure_reason: optional string
- geoip: optional
### 2.4 LeakEvent (final output)
Adds:
- leak_type: A/B/C/D
- severity: P0..P3
- policy_rule_id
- evidence: minimal structured evidence
---
## 3) Platform integration: Process Attribution Engine (PAE)
### 3.1 Trait addition (wtfnet-platform)
Add:
trait FlowOwnerProvider {
fn owner_of(
&self,
proto: Proto,
src_ip: IpAddr,
src_port: u16,
dst_ip: IpAddr,
dst_port: u16,
ts: SystemTime,
) -> FlowOwnerResult;
}
FlowOwnerResult:
- pid, ppid, process_name (optional)
- confidence: HIGH/MEDIUM/LOW/NONE
- failure_reason: optional string
Design rule: attribution is best-effort and never blocks leak detection.
---
## 4) Transport classification logic
### 4.1 Plain DNS
Match:
- UDP dst port 53 OR TCP dst port 53
Parse QNAME/QTYPE from payload.
### 4.2 DoT
Match:
- TCP dst port 853
### 4.3 DoH (heuristic)
Match candidates:
- TCP dst port 443 AND (one of):
- dst IP in configured DoH resolver list
- dst SNI matches known DoH provider list (if available)
- frequent small HTTPS bursts pattern (weak)
Attach confidence:
- MEDIUM: known endpoint match
- LOW: traffic-shape heuristic only
Important: you mostly need to detect encrypted resolver traffic bypassing the proxy channel,
not to fully prove DoH with payload inspection.
---
## 5) Policy model
Policy defines "safe DNS path" constraints:
- allowed interfaces
- allowed destinations (IP/CIDR)
- allowed processes
- allowed ports
A DNS event is a leak if it violates safe-path constraints.
### 5.1 Built-in profiles
full-tunnel:
- allow DNS only via tunnel iface or loopback stub
- any UDP/TCP 53 on physical iface => Leak-A
proxy-stub (default):
- allow DNS only to loopback stub
- allow stub upstream only to proxy destinations
- flag direct DoH/DoT outside proxy path => Leak-C
split:
- allow plaintext DNS only for allowlist
- enforce unknown => proxy resolve (Leak-B)
---
## 6) Leak rules (A/B/C/D)
Leak-A (plaintext escape):
- transport udp53/tcp53
- route_class != allowed
- dst not in allowed destination set
Leak-B (split policy intent leak):
- qname matches proxy-required set or "unknown"
- query observed going to ISP/domicile resolver or non-tunnel iface
Leak-C (encrypted bypass):
- DoT or DoH flow exists
- not via approved egress path (iface/destination)
Leak-D (mismatch indicator):
- correlate qname to later TCP/TLS flows (optional v0.4 NICE)
---
## 7) Privacy modes
Because domains and cmdlines are sensitive, support:
- Full: store full qname and cmdline
- Redacted (default): hash qname or keep eTLD+1 only; truncate cmdline
- Minimal: no domains/cmdline; keep leak counts + resolver IPs + process name
Privacy mode applies in report builder, not in sensor.
---
## 8) CLI integration
Add under `dns` command group:
- `dns leak status`
- `dns leak watch`
- `dns leak report`
`watch` returns:
- summary report (human) by default
- `--json` returns structured report with events list
---
## 9) Recommended incremental build plan
Phase 1 (core passive detection):
- sensor: udp/tcp capture
- classify: udp53/tcp53/dot
- parse plaintext qname/qtype
- policy: allowlist + allowed interfaces/dests
- leak rules: Leak-A + Leak-C (DoT)
- report: events + summary
Phase 2 (process attribution + DoH heuristics):
- platform FlowOwnerProvider impls
- DoH heuristic classification + confidence
- privacy modes
Phase 3 (optional correlation / Leak-D):
- flow tracker correlating DNS -> TCP/TLS connect events
- mismatch indicator output
```
---
# Small note about *where* it lives in your architecture
This design slots in with **minimal churn**:
* ✅ Add `crates/wtfnet-dnsleak`
* ✅ Add `dns leak ...` subcommands in CLI
* ✅ Add a **single** new platform trait for process attribution (best-effort)
* ✅ Reuse your existing `dns watch` capture approach as the sensor
…which is exactly what your design describes: passive monitoring + classification + rules + evidence output and the PAE “event enricher” location in the pipeline .
**If it's too hard to detect DoH traffic, skip it.**
---

View File

@@ -26,8 +26,13 @@ This document tracks the planned roadmap alongside the current implementation st
- TLS extras: OCSP stapling indicator, richer cert parsing - TLS extras: OCSP stapling indicator, richer cert parsing
- ports conns improvements (top talkers / summary) - ports conns improvements (top talkers / summary)
- better baseline/diff for system roots - better baseline/diff for system roots
- optional HTTP/3 (feature-gated)
- optional LLMNR/NBNS discovery - optional LLMNR/NBNS discovery
- optional HTTP/3 (feature-gated; experimental, incomplete)
### v0.4 (dns leak detection)
- dns leak detector (passive watch + report)
- process attribution (best-effort)
- policy profiles + privacy modes
## Current stage ## Current stage
@@ -63,13 +68,26 @@ This document tracks the planned roadmap alongside the current implementation st
- HTTP crate with head/get support, timing breakdown, optional GeoIP, and SOCKS5 proxy. - HTTP crate with head/get support, timing breakdown, optional GeoIP, and SOCKS5 proxy.
- TLS crate with handshake/verify/cert/alpn support in CLI (SOCKS5 proxy supported). - TLS crate with handshake/verify/cert/alpn support in CLI (SOCKS5 proxy supported).
- TCP ping supports SOCKS5 proxy. - TCP ping supports SOCKS5 proxy.
- v0.3: probe trace per-hop stats + rdns support.
- v0.3: http connect/tls timing best-effort with warnings.
- v0.3: ports conns (active TCP connections + summaries).
- v0.3: TLS extras (OCSP flag + richer cert parsing).
- v0.3: cert baseline/diff improvements.
- v0.3: HTTP/3 request path (feature-gated; experimental, incomplete).
- v0.3: HTTP/3 error classification (feature-gated).
- v0.4: platform flow-owner lookup (best-effort).
- v0.4: dns leak detector crate + CLI commands (status/watch/report).
- Discover crate with mdns/ssdp commands. - Discover crate with mdns/ssdp commands.
- Discover llmnr/nbns (best-effort).
- Diag crate with report and bundle export. - Diag crate with report and bundle export.
- Basic unit tests for calc and TLS parsing. - Basic unit tests for calc and TLS parsing.
### In progress ### In progress
- v0.3: probe trace upgrades (per-hop stats + rdns). - v0.4: DoH heuristic classification (optional).
- v0.4: Leak-D mismatch correlation (optional).
- v0.3: optional HTTP/3 (feature-gated; keep disabled until stabilized).
### Next ### Next
- Complete v0.3 trace upgrades and update CLI output. - Update docs/README/COMMANDS for v0.4.
- Add v0.2 tests (dns detect, basic http/tls smoke). - Add v0.2 tests (dns detect, basic http/tls smoke).
- Track DNS leak design status in `docs/dns_leak_implementation_status.md`.

13
justfile Normal file
View File

@@ -0,0 +1,13 @@
# justfile (cross-platform, no bash)
python := env_var_or_default("PYTHON", if os() == "windows" { "python" } else { "python3" })
dist_dir := "dist"
stage_root := "target/release-package"
default:
@just --list
release bin='' target='':
{{python}} scripts/release_meta.py --bin "{{bin}}" --target "{{target}}" --dist-dir "{{dist_dir}}" --stage-root "{{stage_root}}"
clean-dist:
{{python}} -c "import shutil; shutil.rmtree('dist', ignore_errors=True); shutil.rmtree('target/release-package', ignore_errors=True)"

175
scripts/release_meta.py Normal file
View File

@@ -0,0 +1,175 @@
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import json
import os
import platform
import shutil
import subprocess
import sys
import tarfile
import zipfile
from pathlib import Path
from typing import Any
def run(cmd: list[str], *, capture: bool = False) -> str:
if capture:
return subprocess.check_output(cmd, text=True).strip()
subprocess.check_call(cmd)
return ""
def cargo_metadata() -> dict[str, Any]:
out = run(["cargo", "metadata", "--no-deps", "--format-version", "1"], capture=True)
return json.loads(out)
def rustc_host_triple() -> str:
v = run(["rustc", "-vV"], capture=True)
for line in v.splitlines():
if line.startswith("host: "):
return line.split("host: ", 1)[1].strip()
raise RuntimeError("Could not determine host target triple from `rustc -vV`")
def is_windows_host() -> bool:
# Works for normal Windows Python and most MSYS/Cygwin Pythons too.
sp = sys.platform.lower()
ps = platform.system().lower()
return (
os.name == "nt"
or sp.startswith("win")
or sp.startswith("cygwin")
or sp.startswith("msys")
or "windows" in ps
or "cygwin" in ps
or "msys" in ps
)
def exe_suffix_for_target(target_triple: str) -> str:
return ".exe" if "windows" in target_triple else ""
def find_bin_targets(meta: dict[str, Any]) -> list[tuple[str, str, str]]:
bins: list[tuple[str, str, str]] = []
for p in meta.get("packages", []):
for t in p.get("targets", []):
if "bin" in t.get("kind", []):
bins.append((p["name"], p["version"], t["name"]))
bins.sort(key=lambda x: (x[0], x[2], x[1])) # stable deterministic choice
return bins
def find_owner_package_for_bin(meta: dict[str, Any], bin_name: str) -> tuple[str, str]:
for p in meta.get("packages", []):
for t in p.get("targets", []):
if t.get("name") == bin_name and "bin" in t.get("kind", []):
return p["name"], p["version"]
raise RuntimeError(f"Could not find a package providing bin '{bin_name}'")
def stage_and_archive(
*,
pkg_name: str,
pkg_version: str,
bin_path: Path,
data_dir: Path,
dist_dir: Path,
stage_root: Path,
target_triple_for_name: str,
) -> Path:
pkg_base = f"{pkg_name}-v{pkg_version}-{target_triple_for_name}"
stage_dir = stage_root / pkg_base
stage_data_dir = stage_dir / "data"
if stage_root.exists():
shutil.rmtree(stage_root)
stage_data_dir.mkdir(parents=True, exist_ok=True)
dist_dir.mkdir(parents=True, exist_ok=True)
shutil.copy2(bin_path, stage_dir / bin_path.name)
mmdbs = sorted(data_dir.glob("*.mmdb")) if data_dir.exists() else []
if mmdbs:
for f in mmdbs:
shutil.copy2(f, stage_data_dir / f.name)
else:
print("WARN: no ./data/*.mmdb found; packaging binary only.", file=sys.stderr)
if is_windows_host():
out = dist_dir / f"{pkg_base}.zip"
with zipfile.ZipFile(out, "w", compression=zipfile.ZIP_DEFLATED) as z:
for p in stage_dir.rglob("*"):
if p.is_file():
z.write(p, arcname=str(Path(pkg_base) / p.relative_to(stage_dir)))
return out
else:
out = dist_dir / f"{pkg_base}.tar.gz"
with tarfile.open(out, "w:gz") as tf:
tf.add(stage_dir, arcname=pkg_base)
return out
def main() -> int:
ap = argparse.ArgumentParser(description="Build and package Rust binary + data/*.mmdb")
ap.add_argument("--bin", default="", help="Binary target name (optional)")
ap.add_argument("--target", default="", help="Cargo target triple (optional)")
ap.add_argument("--dist-dir", default="dist", help="Output directory for archives")
ap.add_argument("--stage-root", default="target/release-package", help="Staging directory root")
ap.add_argument("--data-dir", default="data", help="Directory containing .mmdb files")
args = ap.parse_args()
meta = cargo_metadata()
bins = find_bin_targets(meta)
if not bins:
print("ERROR: no binary targets found in workspace.", file=sys.stderr)
return 2
bin_name = args.bin.strip()
if not bin_name:
_, _, bin_name = bins[0]
print(f"INFO: --bin not provided; defaulting to '{bin_name}'", file=sys.stderr)
pkg_name, pkg_version = find_owner_package_for_bin(meta, bin_name)
host_triple = rustc_host_triple()
target_triple_for_name = args.target.strip() or host_triple
# Build only the owning package
build_cmd = ["cargo", "build", "-p", pkg_name, "--release"]
if args.target.strip():
build_cmd += ["--target", args.target.strip()]
run(build_cmd)
# Locate binary
exe_suffix = exe_suffix_for_target(target_triple_for_name)
bin_dir = Path("target") / (args.target.strip() if args.target.strip() else "release") / "release" \
if args.target.strip() else Path("target") / "release"
if args.target.strip():
bin_dir = Path("target") / args.target.strip() / "release"
bin_path = bin_dir / f"{bin_name}{exe_suffix}"
if not bin_path.exists():
print(f"ERROR: built binary not found: {bin_path}", file=sys.stderr)
print("Hint: pass the correct bin target name: just release bin=<name>", file=sys.stderr)
return 3
out = stage_and_archive(
pkg_name=pkg_name,
pkg_version=pkg_version,
bin_path=bin_path,
data_dir=Path(args.data_dir),
dist_dir=Path(args.dist_dir),
stage_root=Path(args.stage_root),
target_triple_for_name=target_triple_for_name,
)
print(f"Created: {out}")
return 0
if __name__ == "__main__":
raise SystemExit(main())