forked from manbo/internal-docs
Fix: pictures misplacement, remove the lower is better column from the table
This commit is contained in:
@@ -98,12 +98,12 @@ We formalize each training instance as a fixed-length window of length We model
|
|||||||
|
|
||||||
A key empirical and methodological tension in ICS synthesis is that temporal realism and marginal/distributional realism can compete when optimized monolithically: sequence models trained primarily for regression often over-smooth heavy tails and intermittent bursts, while purely distribution-matching objectives can erode long-range structure. Diffusion models provide a principled route to rich distribution modeling through iterative denoising, but they do not, by themselves, resolve (i) the need for a stable low-frequency temporal scaffold, nor (ii) the discrete legality constraints for supervisory variables \cite{ho2020denoising,song2021score}. Recent time-series diffusion work further suggests that separating coarse structure from stochastic refinement can be an effective inductive bias for long-horizon realism \cite{kollovieh2023tsdiff,sikder2023transfusion}.
|
A key empirical and methodological tension in ICS synthesis is that temporal realism and marginal/distributional realism can compete when optimized monolithically: sequence models trained primarily for regression often over-smooth heavy tails and intermittent bursts, while purely distribution-matching objectives can erode long-range structure. Diffusion models provide a principled route to rich distribution modeling through iterative denoising, but they do not, by themselves, resolve (i) the need for a stable low-frequency temporal scaffold, nor (ii) the discrete legality constraints for supervisory variables \cite{ho2020denoising,song2021score}. Recent time-series diffusion work further suggests that separating coarse structure from stochastic refinement can be an effective inductive bias for long-horizon realism \cite{kollovieh2023tsdiff,sikder2023transfusion}.
|
||||||
|
|
||||||
\begin{figure}[htbp]
|
\begin{figure*}[t]
|
||||||
\centering
|
\centering
|
||||||
\includegraphics[width=0.8\textwidth]{fig-design-v2.png}
|
\includegraphics[width=\textwidth]{fig-design-v2.png}
|
||||||
% \caption{Description of the figure.}
|
% \caption{Description of the figure.}
|
||||||
\label{fig:design}
|
\label{fig:design}
|
||||||
\end{figure}
|
\end{figure*}
|
||||||
|
|
||||||
Motivated by these considerations, we propose Mask-DDPM, organized in the following order:
|
Motivated by these considerations, we propose Mask-DDPM, organized in the following order:
|
||||||
\begin{enumerate}
|
\begin{enumerate}
|
||||||
@@ -248,24 +248,24 @@ For continuous channels, we measure distributional alignment using the Kolmogoro
|
|||||||
\subsection{Quantitative results}
|
\subsection{Quantitative results}
|
||||||
\label{sec:benchmark-quant}
|
\label{sec:benchmark-quant}
|
||||||
Across all runs, the mean continuous KS is 0.3311 (std 0.0079) and the mean discrete JSD is 0.0284 (std 0.0073), indicating that the generator preserves both continuous marginals and discrete semantic distributions at the feature level. Temporal consistency is similarly stable across runs, with a mean lag-1 autocorrelation difference of 0.2684 (std 0.0027), suggesting that the synthesized windows retain short-horizon dynamical structure \cite{ni2021sigwasserstein} instead of collapsing to marginal matching alone. The best-performing instance (by mean KS) attains 0.3224, and the small inter-seed variance shows that the reported fidelity is reproducible rather than driven by a single favorable initialization.
|
Across all runs, the mean continuous KS is 0.3311 (std 0.0079) and the mean discrete JSD is 0.0284 (std 0.0073), indicating that the generator preserves both continuous marginals and discrete semantic distributions at the feature level. Temporal consistency is similarly stable across runs, with a mean lag-1 autocorrelation difference of 0.2684 (std 0.0027), suggesting that the synthesized windows retain short-horizon dynamical structure \cite{ni2021sigwasserstein} instead of collapsing to marginal matching alone. The best-performing instance (by mean KS) attains 0.3224, and the small inter-seed variance shows that the reported fidelity is reproducible rather than driven by a single favorable initialization.
|
||||||
\begin{figure}[htbp]
|
\begin{figure*}[t]
|
||||||
\centering
|
\centering
|
||||||
\includegraphics[width=0.8\textwidth]{fig-overall-benchmark-v1.png}
|
\includegraphics[width=\textwidth]{fig-overall-benchmark-v1.png}
|
||||||
% \caption{Description of the figure.}
|
% \caption{Description of the figure.}
|
||||||
\label{fig:benchmark}
|
\label{fig:benchmark}
|
||||||
\end{figure}
|
\end{figure*}
|
||||||
|
|
||||||
\begin{table}[htbp]
|
\begin{table}[htbp]
|
||||||
\centering
|
\centering
|
||||||
\caption{Summary of benchmark metrics. Lower values indicate better performance.}
|
\caption{Summary of benchmark metrics. Lower values indicate better performance.}
|
||||||
\label{tab:metrics}
|
\label{tab:metrics}
|
||||||
\begin{tabular}{@{}l l c c@{}}
|
\begin{tabular}{@{}l l c@{}}
|
||||||
\toprule
|
\toprule
|
||||||
\textbf{Metric} & \textbf{Aggregation} & \textbf{Lower is better} & \textbf{Mean $\pm$ Std} \\
|
\textbf{Metric} & \textbf{Aggregation} & \textbf{Mean $\pm$ Std} \\
|
||||||
\midrule
|
\midrule
|
||||||
KS (continuous) & mean over continuous features & \checkmark & 0.3311 $\pm$ 0.0079 \\
|
KS (continuous) & mean over continuous features & 0.3311 $\pm$ 0.0079 \\
|
||||||
JSD (discrete) & mean over discrete features & \checkmark & 0.0284 $\pm$ 0.0073 \\
|
JSD (discrete) & mean over discrete features & 0.0284 $\pm$ 0.0073 \\
|
||||||
Abs $\Delta$ lag-1 autocorr & mean over features & \checkmark & 0.2684 $\pm$ 0.0027 \\
|
Abs $\Delta$ lag-1 autocorr & mean over features & 0.2684 $\pm$ 0.0027 \\
|
||||||
\bottomrule
|
\bottomrule
|
||||||
\end{tabular}
|
\end{tabular}
|
||||||
\end{table}
|
\end{table}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
Reference for Methodology Part
|
Reference for Methodology Part
|
||||||
@inproceedings{vaswani2017attention,
|
@inproceedings{vaswani2017attention,
|
||||||
title={Attention Is All You Need},
|
title={Attention Is All You Need},
|
||||||
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
|
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
|
||||||
@@ -290,7 +290,7 @@ series = {SIGCOMM '06}
|
|||||||
ISSN={0167-4048},
|
ISSN={0167-4048},
|
||||||
url={http://dx.doi.org/10.1016/j.cose.2018.12.012},
|
url={http://dx.doi.org/10.1016/j.cose.2018.12.012},
|
||||||
DOI={10.1016/j.cose.2018.12.012},
|
DOI={10.1016/j.cose.2018.12.012},
|
||||||
journal={Computers & Security},
|
journal={Computers \& Security},
|
||||||
publisher={Elsevier BV},
|
publisher={Elsevier BV},
|
||||||
author={Ring, Markus and Schlör, Daniel and Landes, Dieter and Hotho, Andreas},
|
author={Ring, Markus and Schlör, Daniel and Landes, Dieter and Hotho, Andreas},
|
||||||
year={2019},
|
year={2019},
|
||||||
@@ -472,7 +472,7 @@ series = {CySWATER '17}
|
|||||||
}
|
}
|
||||||
|
|
||||||
@misc{godefroid2017learnfuzzmachinelearninginput,
|
@misc{godefroid2017learnfuzzmachinelearninginput,
|
||||||
title={Learn&Fuzz: Machine Learning for Input Fuzzing},
|
title={Learn\&Fuzz: Machine Learning for Input Fuzzing},
|
||||||
author={Patrice Godefroid and Hila Peleg and Rishabh Singh},
|
author={Patrice Godefroid and Hila Peleg and Rishabh Singh},
|
||||||
year={2017},
|
year={2017},
|
||||||
eprint={1701.07232},
|
eprint={1701.07232},
|
||||||
@@ -603,3 +603,7 @@ Reference for Benchmark
|
|||||||
year={2001},
|
year={2001},
|
||||||
publisher={Elsevier}
|
publisher={Elsevier}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user