diff --git a/Cargo.lock b/Cargo.lock index 1040625..ae584a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -78,6 +78,12 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + [[package]] name = "ipnetwork" version = "0.20.0" @@ -87,6 +93,12 @@ dependencies = [ "serde", ] +[[package]] +name = "itoa" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010" + [[package]] name = "libc" version = "0.2.178" @@ -129,10 +141,13 @@ version = "0.1.0" dependencies = [ "anyhow", "bytes", + "hex", "pcap", "pnet", "sawp", "sawp-modbus", + "serde", + "serde_json", ] [[package]] @@ -341,6 +356,12 @@ version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58" +[[package]] +name = "ryu" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea" + [[package]] name = "sawp" version = "0.13.1" @@ -390,6 +411,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ "serde_core", + "serde_derive", ] [[package]] @@ -412,6 +434,19 @@ dependencies = [ "syn 2.0.111", ] +[[package]] +name = "serde_json" +version = "1.0.145" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" +dependencies = [ + "itoa", + "memchr", + "ryu", + "serde", + "serde_core", +] + [[package]] name = "shlex" version = "1.3.0" diff --git a/network/Cargo.toml b/network/Cargo.toml index d919f41..1301a27 100644 --- a/network/Cargo.toml +++ b/network/Cargo.toml @@ -12,3 +12,6 @@ pnet = "0.35.0" anyhow = "1.0.100" bytes = "1.11.0" +serde = { version = "1", features = ["derive"] } +serde_json = "1" +hex = "0.4" diff --git a/network/src/types/modbus.rs b/network/src/types/modbus.rs index 43ca1ac..677c1d6 100644 --- a/network/src/types/modbus.rs +++ b/network/src/types/modbus.rs @@ -1,45 +1,101 @@ use anyhow::{Context, Error, Result, anyhow, bail}; -use bytes::Bytes; +use bytes::{Bytes, BytesMut}; use sawp::error::Error as SawpError; use sawp::error::ErrorKind; use sawp::parser::{Direction, Parse}; -use sawp_modbus::{Message, Modbus}; +use sawp_modbus::{Data, Message, Modbus}; +use serde::Deserialize; +use serde_json::{Value, json}; +use std::convert::TryInto; use pcap::Capture; use pcap::Offline; use pnet::packet::Packet; use pnet::packet::ethernet::{EtherTypes, EthernetPacket}; +use pnet::packet::ip::IpNextHeaderProtocols; use pnet::packet::ipv4::Ipv4Packet; use pnet::packet::ipv6::Ipv6Packet; use pnet::packet::tcp::TcpPacket; use std::collections::{HashMap, HashSet}; -fn parse_bytes(input: &[u8]) -> Result<&[u8]> { +#[derive(Debug, Deserialize)] +pub struct Config { + pub functions: Vec, +} + +#[derive(Debug, Deserialize)] +pub struct FunctionDescriptor { + pub function_code: u8, + pub name: Option, + pub request: Option>, + pub response: Option>, +} + +#[derive(Debug, Deserialize)] +pub struct FieldDescriptor { + pub name: String, + #[serde(rename = "type")] + pub ty: FieldType, + #[serde(default)] + pub length: Option, + #[serde(default)] + pub length_from: Option, + #[serde(default)] + pub scale: Option, + #[serde(default)] + pub enum_map: Option>, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "lowercase")] +pub enum FieldType { + U8, + U16, + U32, + I16, + Bytes, + Rest, +} + +// Build lookup map +type FuncMap = HashMap; + +fn parse_bytes_zero_copy(mut buf: BytesMut) -> Result { let modbus = Modbus::default(); - let mut bytes = input; - while bytes.len() > 0 { - // If we know that this is a request or response, change the Direction - // for a more accurate parsing - match modbus.parse(bytes, Direction::Unknown) { - // The parser succeeded and returned the remaining bytes and the parsed modbus message + + while !buf.is_empty() { + match modbus.parse(buf.as_ref(), Direction::Unknown) { Ok((rest, Some(message))) => { println!("Modbus message: {:?}", message); - bytes = rest; + // rest is a subslice of buf.as_ref(); compute how many bytes consumed + let consumed = unsafe { + // pointer difference: rest.as_ptr() - buf.as_ptr() + rest.as_ptr().offset_from(buf.as_ptr()) as usize + }; + // consume the prefix up to consumed + let _ = buf.split_to(consumed); + // continue with the remainder in buf (zero-copy) + } + Ok((rest, None)) => { + // parser needs more bytes; return remaining buffer as-is + // We want to keep the exact remaining bytes (rest points into buf) + // Compute offset of rest and split off the prefix, returning the suffix. + let offset = unsafe { rest.as_ptr().offset_from(buf.as_ptr()) as usize }; + // split off prefix, keep suffix in `suffix` + let _prefix = buf.split_to(offset); + return Ok(buf); } - // The parser recognized that this might be modbus and made some progress, - // but more bytes are needed - Ok((rest, None)) => return Ok(rest), - // The parser was unable to determine whether this was modbus or not and more - // bytes are needed Err(SawpError { kind: ErrorKind::Incomplete(_), - }) => return Ok(bytes), - // The parser determined that this was not modbus + }) => { + // need more bytes; return current buffer unchanged + return Ok(buf); + } Err(e) => return Err(anyhow::Error::new(e)).context("failed to parse modbus"), } } - Ok(bytes) + Ok(buf) } fn get_tcp_data_v4(buf: Bytes) -> Result { @@ -67,3 +123,254 @@ fn get_tcp_data_v6(buf: Bytes) -> Result { ..(tcp_payload.as_ptr() as usize - buf.as_ptr() as usize + tcp_payload.len()), )) } + +pub fn get_tcp_payload_from_eth(eth: &EthernetPacket) -> Result { + let pkt = eth.packet(); // returns &[u8] + match eth.get_ethertype() { + EtherTypes::Ipv4 => { + let ipv4 = Ipv4Packet::new(pkt).ok_or_else(|| anyhow!("failed to parse ipv4"))?; + if ipv4.get_next_level_protocol() != IpNextHeaderProtocols::Tcp { + return Err(anyhow!("not a TCP packet")); + } + let ipv4_payload = ipv4.payload(); + let tcp = TcpPacket::new(ipv4_payload).ok_or_else(|| anyhow!("failed to parse tcp"))?; + let tcp_payload = tcp.payload(); + + // compute byte offsets relative to the original packet slice + let base = pkt.as_ptr() as usize; + let start = tcp_payload.as_ptr() as usize - base; + let end = start + tcp_payload.len(); + Ok(Bytes::copy_from_slice(&pkt[start..end]).slice(0..tcp_payload.len())) + } + + EtherTypes::Ipv6 => { + let ipv6 = Ipv6Packet::new(pkt).ok_or_else(|| anyhow!("failed to parse ipv6"))?; + if ipv6.get_next_header() != IpNextHeaderProtocols::Tcp { + return Err(anyhow!("not a TCP packet")); + } + let ipv6_payload = ipv6.payload(); + let tcp = TcpPacket::new(ipv6_payload).ok_or_else(|| anyhow!("failed to parse tcp"))?; + let tcp_payload = tcp.payload(); + + let base = pkt.as_ptr() as usize; + let start = tcp_payload.as_ptr() as usize - base; + let end = start + tcp_payload.len(); + Ok(Bytes::copy_from_slice(&pkt[start..end]).slice(0..tcp_payload.len())) + } + + _ => Err(anyhow!("not IPv4/IPv6")), + } +} + +/// Top-level parse entry: takes sawp_modbus::Message and descriptor map. +/// Returns JSON object with parsed fields. +pub fn parse_sawp_message( + msg: &Message, + map: &FuncMap, + is_response: bool, +) -> Result { + // obtain function code and exception flag from msg.function + let mut fn_code = msg.function.raw; + let is_exception_fn = (fn_code & 0x80) != 0; + if is_exception_fn { + fn_code &= 0x7F; + } + + // Extract a byte-slice to feed generic PDU parser. + // Different Data variants carry bytes differently. + let pdu_bytes = match &msg.data { + Data::Exception(exc) => { + // Exception contains an exception code byte or similar; convert to vec + // If Exception type exposes code() or .0, adapt accordingly. + // Here we try to obtain a single byte; if Exception is an enum with u8 inside: + let code_byte = exc.raw; // if Exception(pub u8) + vec![code_byte] + } + Data::Diagnostic { data, .. } => data.clone(), + Data::MEI { data, .. } => data.clone(), + Data::Read(read) => { + // If Read provides a bytes() method or inner Vec, extract it. + // Adjust depending on actual Read struct — common shape: Read { byte_count, data: Vec } + // Try common fields: + if let Some(bytes) = try_extract_read_bytes(read) { + bytes + } else { + return Err("unsupported Read variant layout; adapt extraction".into()); + } + } + Data::Write(write) => { + if let Some(bytes) = try_extract_write_bytes(write) { + bytes + } else { + return Err("unsupported Write variant layout; adapt extraction".into()); + } + } + Data::ReadWrite { read, write } => { + // For ReadWrite, decide which side to parse based on is_response. + // If is_response==true, prefer read bytes; else use write bytes. + if is_response { + if let Some(bytes) = try_extract_read_bytes(read) { + bytes + } else { + return Err("unsupported Read variant in ReadWrite".into()); + } + } else { + if let Some(bytes) = try_extract_write_bytes(write) { + bytes + } else { + return Err("unsupported Write variant in ReadWrite".into()); + } + } + } + Data::ByteVec(v) => v.clone(), + Data::Empty => vec![], + }; + + // If exception function: treat as exception + if is_exception_fn { + if pdu_bytes.is_empty() { + return Err("exception message missing exception code".into()); + } + return Ok(json!({ + "unit": msg.unit_id, + "function": msg.function.raw, + "exception": pdu_bytes[0], + })); + } + + // Lookup descriptor and parse using generic byte-slice parser + let desc = map + .get(&fn_code) + .ok_or_else(|| format!("unknown function code: {}", fn_code))?; + let fields = if is_response { + desc.response.as_ref() + } else { + desc.request.as_ref() + } + .ok_or_else(|| "no descriptor for chosen direction".to_string())?; + + parse_with_descriptor(&pdu_bytes, msg.unit_id, msg.function.raw, fields) +} + +/// Generic parser: parse bytes per FieldDescriptor sequence. +fn parse_with_descriptor( + pdu: &[u8], + unit: u8, + function: u8, + fields: &Vec, +) -> Result { + let mut cursor = 0usize; + let mut out = serde_json::Map::new(); + + for fd in fields { + match fd.ty { + FieldType::U8 => { + if cursor + 1 > pdu.len() { + return Err(format!("field {} out of bounds", fd.name)); + } + let v = pdu[cursor] as u64; + cursor += 1; + insert_mapped(&mut out, fd, v)?; + } + FieldType::U16 => { + if cursor + 2 > pdu.len() { + return Err(format!("field {} out of bounds", fd.name)); + } + let be = &pdu[cursor..cursor + 2]; + let v = u16::from_be_bytes(be.try_into().unwrap()) as u64; + cursor += 2; + insert_mapped(&mut out, fd, v)?; + } + FieldType::U32 => { + if cursor + 4 > pdu.len() { + return Err(format!("field {} out of bounds", fd.name)); + } + let be = &pdu[cursor..cursor + 4]; + let v = u32::from_be_bytes(be.try_into().unwrap()) as u64; + cursor += 4; + insert_mapped(&mut out, fd, v)?; + } + FieldType::I16 => { + if cursor + 2 > pdu.len() { + return Err(format!("field {} out of bounds", fd.name)); + } + let be = &pdu[cursor..cursor + 2]; + let v = i16::from_be_bytes(be.try_into().unwrap()) as i64; + cursor += 2; + if let Some(scale) = fd.scale { + out.insert(fd.name.clone(), json!((v as f64) * scale)); + } else { + out.insert(fd.name.clone(), json!(v)); + } + } + FieldType::Bytes => { + let len = if let Some(ref lf) = fd.length_from { + let ref_val = out + .get(lf) + .ok_or_else(|| format!("length_from {} not parsed yet", lf))?; + let n = ref_val + .as_u64() + .ok_or_else(|| format!("length_from {} not integer", lf))? + as usize; + n + } else if let Some(l) = fd.length { + l + } else { + return Err(format!("bytes field {} missing length", fd.name)); + }; + if cursor + len > pdu.len() { + return Err(format!("field {} out of bounds", fd.name)); + } + let slice = &pdu[cursor..cursor + len]; + cursor += len; + out.insert(fd.name.clone(), json!(hex::encode(slice))); + } + FieldType::Rest => { + let slice = &pdu[cursor..]; + cursor = pdu.len(); + out.insert(fd.name.clone(), json!(hex::encode(slice))); + } + } + } + + if cursor != pdu.len() { + out.insert("_trailing".to_string(), json!(hex::encode(&pdu[cursor..]))); + } + + let mut root = serde_json::Map::new(); + root.insert("unit".to_string(), json!(unit)); + root.insert("function".to_string(), json!(function)); + root.insert("fields".to_string(), Value::Object(out)); + Ok(Value::Object(root)) +} + +fn insert_mapped( + map: &mut serde_json::Map, + fd: &FieldDescriptor, + raw: u64, +) -> Result<(), String> { + if let Some(ref enum_map) = fd.enum_map { + if let Some(name) = enum_map.get(&raw) { + map.insert(fd.name.clone(), json!(name)); + return Ok(()); + } + } + if let Some(scale) = fd.scale { + map.insert(fd.name.clone(), json!((raw as f64) * scale)); + } else { + map.insert(fd.name.clone(), json!(raw)); + } + Ok(()) +} + +/// Helper extraction functions: adapt these to the real Read/Write structs in your sawp_modbus version. +/// These try common shapes; replace with direct field access if necessary. +fn try_extract_read_bytes(_read: &T) -> Option> { + // Replace with actual extraction, for example: + // Some(read.data.clone()) or Some(read.bytes.clone()) + None +} +fn try_extract_write_bytes(_write: &T) -> Option> { + // Replace with actual extraction + None +}