Fix arXiv ref and the complie problem

This commit is contained in:
2026-04-20 18:06:41 +08:00
parent 98a67a25a7
commit 6e406bf6f8
8 changed files with 1680 additions and 334 deletions

View File

@@ -0,0 +1,164 @@
\relax
\citation{10.1007/s10844-022-00753-1,Nankya2023-gp}
\@writefile{toc}{\contentsline {title}{Mask-DDPM: Transformer-Conditioned Mixed-Type Diffusion for Semantically Valid ICS Telemetry Synthesis}{1}{}\protected@file@percent }
\@writefile{toc}{\authcount {4}}
\@writefile{toc}{\contentsline {author}{Zhenglan Chen \and Mingzhe Yang \and Hongyu Yan \and Huan Yang}{1}{}\protected@file@percent }
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}{}\protected@file@percent }
\newlabel{sec:intro}{{1}{1}{}{section.1}{}}
\citation{shin}
\citation{info16100910}
\citation{pmlr-v202-kotelnikov23a,rasul2021autoregressivedenoisingdiffusionmodels}
\citation{jiang2023netdiffusionnetworkdataaugmentation}
\citation{pmlr-v202-kotelnikov23a}
\citation{10.1145/1151659.1159928}
\citation{Ring_2019}
\citation{10.1145/3544216.3544251}
\citation{Lin_2020}
\citation{7469060,10.1145/3055366.3055375}
\citation{NEURIPS2020_4c5bcfec}
\citation{song2021scorebasedgenerativemodelingstochastic}
\citation{rasul2021autoregressivedenoisingdiffusionmodels}
\citation{tashiro2021csdiconditionalscorebaseddiffusion}
\citation{wen2024diffstgprobabilisticspatiotemporalgraph}
\citation{liu2023pristiconditionaldiffusionframework}
\citation{kong2021diffwaveversatilediffusionmodel}
\citation{11087622}
\@writefile{toc}{\contentsline {section}{\numberline {2}Related Work}{3}{}\protected@file@percent }
\newlabel{sec:related}{{2}{3}{}{section.2}{}}
\citation{austin2023structureddenoisingdiffusionmodels}
\citation{Lin_2020}
\citation{hoogeboom2021argmaxflowsmultinomialdiffusion}
\citation{li2022diffusionlmimprovescontrollabletext}
\citation{meng2025aflnetyearslatercoverageguided,godefroid2017learnfuzzmachinelearninginput,she2019neuzzefficientfuzzingneural}
\citation{dai2019transformerxlattentivelanguagemodels}
\citation{zhou2021informerefficienttransformerlong}
\citation{wu2022autoformerdecompositiontransformersautocorrelation}
\citation{zhou2022fedformerfrequencyenhanceddecomposed}
\citation{nie2023patchtst}
\citation{rasul2021autoregressivedenoisingdiffusionmodels,tashiro2021csdiconditionalscorebaseddiffusion,wen2024diffstgprobabilisticspatiotemporalgraph,liu2023pristiconditionaldiffusionframework,kong2021diffwaveversatilediffusionmodel,11087622}
\citation{nist2023sp80082}
\citation{ho2020denoising,song2021score}
\citation{kollovieh2023tsdiff,sikder2023transfusion}
\@writefile{toc}{\contentsline {section}{\numberline {3}Methodology}{5}{}\protected@file@percent }
\newlabel{sec:method}{{3}{5}{}{section.3}{}}
\citation{vaswani2017attention}
\citation{ho2020denoising,kollovieh2023tsdiff}
\citation{austin2021structured,shi2024simplified}
\citation{yuan2025ctu,sha2026ddpm}
\citation{vaswani2017attention}
\citation{vaswani2017attention,nist2023sp80082}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces Masked-DDPM: Unified Synthesis for ICS traffic}}{6}{}\protected@file@percent }
\newlabel{fig:design}{{1}{6}{}{figure.1}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.1}Transformer trend module for continuous dynamics}{6}{}\protected@file@percent }
\newlabel{sec:method-trans}{{3.1}{6}{}{subsection.3.1}{}}
\citation{kollovieh2023tsdiff,sikder2023transfusion}
\citation{vaswani2017attention,kollovieh2023tsdiff,yuan2025ctu}
\citation{ho2020denoising}
\citation{ho2020denoising,song2021score}
\citation{kollovieh2023tsdiff,sikder2023transfusion}
\newlabel{eq:additive_decomp}{{1}{7}{}{equation.1}{}}
\newlabel{eq:trend_prediction}{{2}{7}{}{equation.2}{}}
\newlabel{eq:trend_loss}{{3}{7}{}{equation.3}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.2}DDPM for continuous residual generation}{7}{}\protected@file@percent }
\newlabel{sec:method-ddpm}{{3.2}{7}{}{subsection.3.2}{}}
\citation{ho2020denoising,sikder2023transfusion}
\citation{hang2023efficient}
\citation{yuan2025ctu,sha2026ddpm}
\citation{austin2021structured,shi2024simplified}
\citation{nist2023sp80082}
\citation{shi2024simplified}
\newlabel{eq:forward_corruption}{{4}{8}{}{equation.4}{}}
\newlabel{eq:forward_corruption_eq}{{5}{8}{}{equation.5}{}}
\newlabel{eq:reverse_process}{{6}{8}{}{equation.6}{}}
\newlabel{eq:ddpm_loss}{{7}{8}{}{equation.7}{}}
\newlabel{eq:snr_loss}{{8}{8}{}{equation.8}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.3}Masked diffusion for discrete ICS variables}{8}{}\protected@file@percent }
\newlabel{sec:method-discrete}{{3.3}{8}{}{subsection.3.3}{}}
\citation{nist2023sp80082}
\citation{shi2024simplified,yuan2025ctu}
\citation{nist2023sp80082}
\newlabel{eq:masking_process}{{9}{9}{}{equation.9}{}}
\newlabel{eq:discrete_denoising}{{10}{9}{}{equation.10}{}}
\newlabel{eq:discrete_loss}{{11}{9}{}{equation.11}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.4}Type-aware decomposition as factorization and routing layer}{9}{}\protected@file@percent }
\newlabel{sec:method-types}{{3.4}{9}{}{subsection.3.4}{}}
\citation{shi2025tabdiff,yuan2025ctu,nist2023sp80082}
\citation{kollovieh2023tsdiff,sikder2023transfusion}
\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces Type assignment and six-type taxonomy.}}{11}{}\protected@file@percent }
\newlabel{fig:type_taxonomy}{{2}{11}{}{figure.2}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {3.5}Joint optimization and end-to-end sampling}{11}{}\protected@file@percent }
\newlabel{sec:method-joint}{{3.5}{11}{}{subsection.3.5}{}}
\citation{ho2020denoising,shi2024simplified,yuan2025ctu,nist2023sp80082}
\citation{coletta2023constrained,yang2001interlock,stenger2024survey}
\citation{lin1991divergence,yoon2019timegan}
\@writefile{toc}{\contentsline {section}{\numberline {4}Benchmark}{12}{}\protected@file@percent }
\newlabel{sec:benchmark}{{4}{12}{}{section.4}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.1}Core fidelity, legality, and reproducibility}{12}{}\protected@file@percent }
\newlabel{sec:benchmark-quant}{{4.1}{12}{}{subsection.4.1}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Benchmark evidence chain.}}{13}{}\protected@file@percent }
\newlabel{fig:benchmark_story}{{3}{13}{}{figure.3}{}}
\@writefile{lot}{\contentsline {table}{\numberline {1}{\ignorespaces Core benchmark summary. Lower is better except for validity rate.}}{13}{}\protected@file@percent }
\newlabel{tab:core_metrics}{{1}{13}{}{table.1}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.2}Type-aware diagnostics}{14}{}\protected@file@percent }
\newlabel{sec:benchmark-typed}{{4.2}{14}{}{subsection.4.2}{}}
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Type-aware diagnostic summary. Lower values indicate better alignment.}}{14}{}\protected@file@percent }
\newlabel{tab:typed_diagnostics}{{2}{14}{}{table.2}{}}
\@writefile{toc}{\contentsline {subsection}{\numberline {4.3}Ablation study}{14}{}\protected@file@percent }
\newlabel{sec:benchmark-ablation}{{4.3}{14}{}{subsection.4.3}{}}
\@writefile{lof}{\contentsline {figure}{\numberline {4}{\ignorespaces Ablation impact.}}{15}{}\protected@file@percent }
\newlabel{fig:ablation_impact}{{4}{15}{}{figure.4}{}}
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Ablation study. Lower is better except for anomaly AUPRC.}}{15}{}\protected@file@percent }
\newlabel{tab:ablation}{{3}{15}{}{table.3}{}}
\bibstyle{splncs04}
\bibdata{references}
\bibcite{10.1145/3055366.3055375}{1}
\bibcite{info16100910}{2}
\bibcite{austin2023structureddenoisingdiffusionmodels}{3}
\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusion and Future Work}{16}{}\protected@file@percent }
\newlabel{sec:conclusion}{{5}{16}{}{section.5}{}}
\bibcite{austin2021structured}{4}
\bibcite{coletta2023constrained}{5}
\bibcite{dai2019transformerxlattentivelanguagemodels}{6}
\bibcite{godefroid2017learnfuzzmachinelearninginput}{7}
\bibcite{hang2023efficient}{8}
\bibcite{NEURIPS2020_4c5bcfec}{9}
\bibcite{ho2020denoising}{10}
\bibcite{hoogeboom2021argmaxflowsmultinomialdiffusion}{11}
\bibcite{jiang2023netdiffusionnetworkdataaugmentation}{12}
\bibcite{10.1007/s10844-022-00753-1}{13}
\bibcite{kollovieh2023tsdiff}{14}
\bibcite{kong2021diffwaveversatilediffusionmodel}{15}
\bibcite{pmlr-v202-kotelnikov23a}{16}
\bibcite{li2022diffusionlmimprovescontrollabletext}{17}
\bibcite{lin1991divergence}{18}
\bibcite{Lin_2020}{19}
\bibcite{liu2023pristiconditionaldiffusionframework}{20}
\bibcite{11087622}{21}
\bibcite{7469060}{22}
\bibcite{meng2025aflnetyearslatercoverageguided}{23}
\bibcite{Nankya2023-gp}{24}
\bibcite{nist2023sp80082}{25}
\bibcite{nie2023patchtst}{26}
\bibcite{rasul2021autoregressivedenoisingdiffusionmodels}{27}
\bibcite{Ring_2019}{28}
\bibcite{sha2026ddpm}{29}
\bibcite{she2019neuzzefficientfuzzingneural}{30}
\bibcite{shi2024simplified}{31}
\bibcite{shi2025tabdiff}{32}
\bibcite{shin}{33}
\bibcite{sikder2023transfusion}{34}
\bibcite{song2021scorebasedgenerativemodelingstochastic}{35}
\bibcite{song2021score}{36}
\bibcite{stenger2024survey}{37}
\bibcite{tashiro2021csdiconditionalscorebaseddiffusion}{38}
\bibcite{vaswani2017attention}{39}
\bibcite{10.1145/1151659.1159928}{40}
\bibcite{wen2024diffstgprobabilisticspatiotemporalgraph}{41}
\bibcite{wu2022autoformerdecompositiontransformersautocorrelation}{42}
\bibcite{yang2001interlock}{43}
\bibcite{10.1145/3544216.3544251}{44}
\bibcite{yoon2019timegan}{45}
\bibcite{yuan2025ctu}{46}
\bibcite{zhou2021informerefficienttransformerlong}{47}
\bibcite{zhou2022fedformerfrequencyenhanceddecomposed}{48}
\gdef \@abspage@last{21}

View File

@@ -0,0 +1,328 @@
\begin{thebibliography}{10}
\providecommand{\url}[1]{\texttt{#1}}
\providecommand{\urlprefix}{URL }
\providecommand{\doi}[1]{https://doi.org/#1}
\bibitem{10.1145/3055366.3055375}
Ahmed, C.M., Palleti, V.R., Mathur, A.P.: Wadi: a water distribution testbed
for research in the design of secure cyber physical systems. In: Proceedings
of the 3rd International Workshop on Cyber-Physical Systems for Smart Water
Networks. p. 2528. CySWATER '17, Association for Computing Machinery, New
York, NY, USA (2017). \doi{10.1145/3055366.3055375},
\url{https://doi.org/10.1145/3055366.3055375}
\bibitem{info16100910}
Ali, J., Ali, S., Al~Balushi, T., Nadir, Z.: Intrusion detection in industrial
control systems using transfer learning guided by reinforcement learning.
Information \textbf{16}(10) (2025). \doi{10.3390/info16100910},
\url{https://www.mdpi.com/2078-2489/16/10/910}
\bibitem{austin2023structureddenoisingdiffusionmodels}
Austin, J., Johnson, D.D., Ho, J., Tarlow, D., van~den Berg, R.: Structured
denoising diffusion models in discrete state-spaces. In: Ranzato, M.,
Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in
Neural Information Processing Systems. vol.~34, pp. 17981--17993. Curran
Associates, Inc. (2021),
\url{https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf}
\bibitem{austin2021structured}
Austin, J., Johnson, D.D., Ho, J., Tarlow, D., van~den Berg, R.: Structured
denoising diffusion models in discrete state-spaces. In: Ranzato, M.,
Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in
Neural Information Processing Systems. vol.~34, pp. 17981--17993. Curran
Associates, Inc. (2021),
\url{https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf}
\bibitem{coletta2023constrained}
Coletta, A., Gopalakrishnan, S., Borrajo, D., Vyetrenko, S.: On the constrained
time-series generation problem. In: Oh, A., Naumann, T., Globerson, A.,
Saenko, K., Hardt, M., Levine, S. (eds.) Advances in Neural Information
Processing Systems. vol.~36, pp. 61048--61059. Curran Associates, Inc.
(2023),
\url{https://proceedings.neurips.cc/paper_files/paper/2023/file/bfb6a69c0d9e2bc596e1cd31f16fcdde-Paper-Conference.pdf}
\bibitem{dai2019transformerxlattentivelanguagemodels}
Dai, Z., Yang, Z., Yang, Y., Carbonell, J., Le, Q., Salakhutdinov, R.:
Transformer-{XL}: Attentive language models beyond a fixed-length context.
In: Korhonen, A., Traum, D., M{\`a}rquez, L. (eds.) Proceedings of the 57th
Annual Meeting of the Association for Computational Linguistics. pp.
2978--2988. Association for Computational Linguistics, Florence, Italy (Jul
2019). \doi{10.18653/v1/P19-1285}, \url{https://aclanthology.org/P19-1285/}
\bibitem{godefroid2017learnfuzzmachinelearninginput}
Godefroid, P., Peleg, H., Singh, R.: Learn\&fuzz: Machine learning for input
fuzzing. In: 2017 32nd IEEE/ACM International Conference on Automated
Software Engineering (ASE). pp. 50--59 (2017). \doi{10.1109/ASE.2017.8115618}
\bibitem{hang2023efficient}
Hang, T., Gu, S., Li, C., Bao, J., Chen, D., Hu, H., Geng, X., Guo, B.:
Efficient diffusion training via min-snr weighting strategy. In: Proceedings
of the IEEE/CVF International Conference on Computer Vision (ICCV). pp.
7441--7451 (October 2023)
\bibitem{NEURIPS2020_4c5bcfec}
Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In:
Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances
in Neural Information Processing Systems. vol.~33, pp. 6840--6851. Curran
Associates, Inc. (2020),
\url{https://proceedings.neurips.cc/paper_files/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf}
\bibitem{ho2020denoising}
Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In:
Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances
in Neural Information Processing Systems. vol.~33, pp. 6840--6851. Curran
Associates, Inc. (2020),
\url{https://proceedings.neurips.cc/paper_files/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf}
\bibitem{hoogeboom2021argmaxflowsmultinomialdiffusion}
Hoogeboom, E., Nielsen, D., Jaini, P., Forr\'{e}, P., Welling, M.: Argmax flows
and multinomial diffusion: Learning categorical distributions. In: Ranzato,
M., Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in
Neural Information Processing Systems. vol.~34, pp. 12454--12465. Curran
Associates, Inc. (2021),
\url{https://proceedings.neurips.cc/paper_files/paper/2021/file/67d96d458abdef21792e6d8e590244e7-Paper.pdf}
\bibitem{jiang2023netdiffusionnetworkdataaugmentation}
Jiang, X., Liu, S., Gember-Jacobson, A., Bhagoji, A.N., Schmitt, P., Bronzino,
F., Feamster, N.: Netdiffusion: Network data augmentation through
protocol-constrained traffic generation. Proc. ACM Meas. Anal. Comput. Syst.
\textbf{8}(1) (Feb 2024). \doi{10.1145/3639037},
\url{https://doi.org/10.1145/3639037}
\bibitem{10.1007/s10844-022-00753-1}
Koay, A.M., Ko, R.K.L., Hettema, H., Radke, K.: Machine learning in industrial
control system (ics) security: current landscape, opportunities and
challenges. Journal of Intelligent Information Systems \textbf{60}(2),
377--405 (2023)
\bibitem{kollovieh2023tsdiff}
Kollovieh, M., Ansari, A.F., Bohlke-Schneider, M., Zschiegner, J., Wang, H.,
Wang, Y.B.: Predict, refine, synthesize: Self-guiding diffusion models for
probabilistic time series forecasting. In: Oh, A., Naumann, T., Globerson,
A., Saenko, K., Hardt, M., Levine, S. (eds.) Advances in Neural Information
Processing Systems. vol.~36, pp. 28341--28364. Curran Associates, Inc.
(2023),
\url{https://proceedings.neurips.cc/paper_files/paper/2023/file/5a1a10c2c2c9b9af1514687bc24b8f3d-Paper-Conference.pdf}
\bibitem{kong2021diffwaveversatilediffusionmodel}
Kong, Z., Ping, W., Huang, J., Zhao, K., Catanzaro, B.: Diffwave: A versatile
diffusion model for audio synthesis (2021),
\url{https://arxiv.org/abs/2009.09761}
\bibitem{pmlr-v202-kotelnikov23a}
Kotelnikov, A., Baranchuk, D., Rubachev, I., Babenko, A.: {T}ab{DDPM}:
Modelling tabular data with diffusion models. In: Krause, A., Brunskill, E.,
Cho, K., Engelhardt, B., Sabato, S., Scarlett, J. (eds.) Proceedings of the
40th International Conference on Machine Learning. Proceedings of Machine
Learning Research, vol.~202, pp. 17564--17579. PMLR (23--29 Jul 2023),
\url{https://proceedings.mlr.press/v202/kotelnikov23a.html}
\bibitem{li2022diffusionlmimprovescontrollabletext}
Li, X., Thickstun, J., Gulrajani, I., Liang, P.S., Hashimoto, T.B.:
Diffusion-lm improves controllable text generation. In: Koyejo, S., Mohamed,
S., Agarwal, A., Belgrave, D., Cho, K., Oh, A. (eds.) Advances in Neural
Information Processing Systems. vol.~35, pp. 4328--4343. Curran Associates,
Inc. (2022),
\url{https://proceedings.neurips.cc/paper_files/paper/2022/file/1be5bc25d50895ee656b8c2d9eb89d6a-Paper-Conference.pdf}
\bibitem{lin1991divergence}
Lin, J.: Divergence measures based on the shannon entropy. IEEE Transactions on
Information Theory \textbf{37}(1), 145--151 (1991). \doi{10.1109/18.61115}
\bibitem{Lin_2020}
Lin, Z., Jain, A., Wang, C., Fanti, G., Sekar, V.: Using gans for sharing
networked time series data: Challenges, initial promise, and open questions.
In: Proceedings of the ACM Internet Measurement Conference. p. 464483. IMC
'20, Association for Computing Machinery, New York, NY, USA (2020).
\doi{10.1145/3419394.3423643}, \url{https://doi.org/10.1145/3419394.3423643}
\bibitem{liu2023pristiconditionaldiffusionframework}
Liu, M., Huang, H., Feng, H., Sun, L., Du, B., Fu, Y.: Pristi: A conditional
diffusion framework for spatiotemporal imputation. In: 2023 IEEE 39th
International Conference on Data Engineering (ICDE). pp. 1927--1939 (2023).
\doi{10.1109/ICDE55515.2023.00150}
\bibitem{11087622}
Liu, X., Xu, X., Liu, Z., Li, Z., Wu, K.: Spatio-temporal diffusion model for
cellular traffic generation. IEEE Transactions on Mobile Computing
\textbf{25}(1), 257--271 (2026). \doi{10.1109/TMC.2025.3591183}
\bibitem{7469060}
Mathur, A.P., Tippenhauer, N.O.: Swat: a water treatment testbed for research
and training on ics security. In: 2016 International Workshop on
Cyber-physical Systems for Smart Water Networks (CySWater). pp. 31--36
(2016). \doi{10.1109/CySWater.2016.7469060}
\bibitem{meng2025aflnetyearslatercoverageguided}
Meng, R., Pham, V.T., Böhme, M., Roychoudhury, A.: Aflnet five years later: On
coverage-guided protocol fuzzing. IEEE Transactions on Software Engineering
\textbf{51}(4), 960--974 (2025). \doi{10.1109/TSE.2025.3535925}
\bibitem{Nankya2023-gp}
Nankya, M., Chataut, R., Akl, R.: Securing industrial control systems:
Components, cyber threats, and machine learning-driven defense strategies.
Sensors \textbf{23}(21) (2023). \doi{10.3390/s23218840},
\url{https://www.mdpi.com/1424-8220/23/21/8840}
\bibitem{nist2023sp80082}
{National Institute of Standards and Technology}: Guide to operational
technology (ot) security. Special Publication 800-82 Rev. 3, NIST (sep 2023).
\doi{10.6028/NIST.SP.800-82r3},
\url{https://csrc.nist.gov/pubs/sp/800/82/r3/final}
\bibitem{nie2023patchtst}
Nie, Y., Nguyen, N.H., Sinthong, P., Kalagnanam, J.: A time series is worth 64
words: Long-term forecasting with transformers. In: International Conference
on Learning Representations (ICLR) (2023),
\url{https://arxiv.org/abs/2211.14730}
\bibitem{rasul2021autoregressivedenoisingdiffusionmodels}
Rasul, K., Seward, C., Schuster, I., Vollgraf, R.: Autoregressive denoising
diffusion models for multivariate probabilistic time series forecasting. In:
Meila, M., Zhang, T. (eds.) Proceedings of the 38th International Conference
on Machine Learning. Proceedings of Machine Learning Research, vol.~139, pp.
8857--8868. PMLR (18--24 Jul 2021),
\url{https://proceedings.mlr.press/v139/rasul21a.html}
\bibitem{Ring_2019}
Ring, M., Schlör, D., Landes, D., Hotho, A.: Flow-based network traffic
generation using generative adversarial networks. Computers \& Security
\textbf{82}, 156--172 (2019).
\doi{https://doi.org/10.1016/j.cose.2018.12.012},
\url{https://www.sciencedirect.com/science/article/pii/S0167404818308393}
\bibitem{sha2026ddpm}
Sha, Y., Yuan, Y., Wu, Y., Zhao, H.: Ddpm fusing mamba and adaptive attention:
An augmentation method for industrial control systems anomaly data (jan
2026). \doi{10.2139/ssrn.6055903},
\url{https://papers.ssrn.com/sol3/papers.cfm?abstract_id=6055903}, sSRN
Electronic Journal
\bibitem{she2019neuzzefficientfuzzingneural}
She, D., Pei, K., Epstein, D., Yang, J., Ray, B., Jana, S.: Neuzz: Efficient
fuzzing with neural program smoothing. In: 2019 IEEE Symposium on Security
and Privacy (SP). pp. 803--817 (2019). \doi{10.1109/SP.2019.00052}
\bibitem{shi2024simplified}
Shi, J., Han, K., Wang, Z., Doucet, A., Titsias, M.: Simplified and generalized
masked diffusion for discrete data. In: Globerson, A., Mackey, L., Belgrave,
D., Fan, A., Paquet, U., Tomczak, J., Zhang, C. (eds.) Advances in Neural
Information Processing Systems. vol.~37, pp. 103131--103167. Curran
Associates, Inc. (2024). \doi{10.52202/079017-3277},
\url{https://proceedings.neurips.cc/paper_files/paper/2024/file/bad233b9849f019aead5e5cc60cef70f-Paper-Conference.pdf}
\bibitem{shi2025tabdiff}
Shi, J., Xu, M., Hua, H., Zhang, H., Ermon, S., Leskovec, J.: Tabdiff: a
mixed-type diffusion model for tabular data generation (2025),
\url{https://arxiv.org/abs/2410.20626}
\bibitem{shin}
Shin, H.K., Lee, W., Choi, S., Yun, J.H., Min, B.G., Kim, H.: Hai security
dataset (2023). \doi{10.34740/kaggle/dsv/5821622},
\url{https://www.kaggle.com/dsv/5821622}
\bibitem{sikder2023transfusion}
Sikder, M.F., Ramachandranpillai, R., Heintz, F.: Transfusion: Generating long,
high fidelity time series using diffusion models with transformers. Machine
Learning with Applications \textbf{20}, 100652 (2025).
\doi{https://doi.org/10.1016/j.mlwa.2025.100652},
\url{https://www.sciencedirect.com/science/article/pii/S2666827025000350}
\bibitem{song2021scorebasedgenerativemodelingstochastic}
Song, Y., Sohl-Dickstein, J., Kingma, D.P., Kumar, A., Ermon, S., Poole, B.:
Score-based generative modeling through stochastic differential equations
(2021), \url{https://arxiv.org/abs/2011.13456}
\bibitem{song2021score}
Song, Y., Sohl-Dickstein, J., Kingma, D.P., Kumar, A., Ermon, S., Poole, B.:
Score-based generative modeling through stochastic differential equations
(2021), \url{https://arxiv.org/abs/2011.13456}
\bibitem{stenger2024survey}
Stenger, M., Leppich, R., Foster, I.T., Kounev, S., Bauer, A.: Evaluation is
key: a survey on evaluation measures for synthetic time series. Journal of
Big Data \textbf{11}(1), ~66 (2024)
\bibitem{tashiro2021csdiconditionalscorebaseddiffusion}
Tashiro, Y., Song, J., Song, Y., Ermon, S.: Csdi: Conditional score-based
diffusion models for probabilistic time series imputation. In: Ranzato, M.,
Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in
Neural Information Processing Systems. vol.~34, pp. 24804--24816. Curran
Associates, Inc. (2021),
\url{https://proceedings.neurips.cc/paper_files/paper/2021/file/cfe8504bda37b575c70ee1a8276f3486-Paper.pdf}
\bibitem{vaswani2017attention}
Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N.,
Kaiser, L.u., Polosukhin, I.: Attention is all you need. In: Guyon, I.,
Luxburg, U.V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S.,
Garnett, R. (eds.) Advances in Neural Information Processing Systems.
vol.~30. Curran Associates, Inc. (2017),
\url{https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf}
\bibitem{10.1145/1151659.1159928}
Vishwanath, K.V., Vahdat, A.: Realistic and responsive network traffic
generation. SIGCOMM Comput. Commun. Rev. \textbf{36}(4), 111122 (Aug
2006). \doi{10.1145/1151659.1159928},
\url{https://doi.org/10.1145/1151659.1159928}
\bibitem{wen2024diffstgprobabilisticspatiotemporalgraph}
Wen, H., Lin, Y., Xia, Y., Wan, H., Wen, Q., Zimmermann, R., Liang, Y.:
Diffstg: Probabilistic spatio-temporal graph forecasting with denoising
diffusion models. In: Proceedings of the 31st ACM International Conference on
Advances in Geographic Information Systems. SIGSPATIAL '23, Association for
Computing Machinery, New York, NY, USA (2023). \doi{10.1145/3589132.3625614},
\url{https://doi.org/10.1145/3589132.3625614}
\bibitem{wu2022autoformerdecompositiontransformersautocorrelation}
Wu, H., Xu, J., Wang, J., Long, M.: Autoformer: Decomposition transformers with
auto-correlation for long-term series forecasting. In: Ranzato, M.,
Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in
Neural Information Processing Systems. vol.~34, pp. 22419--22430. Curran
Associates, Inc. (2021),
\url{https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bdcd7c19a8ac0c2b-Paper.pdf}
\bibitem{yang2001interlock}
Yang, S., Tan, L., He, C.: Automatic verification of safety interlock systems
for industrial processes. Journal of Loss Prevention in the Process
Industries \textbf{14}(5), 379--386 (2001).
\doi{https://doi.org/10.1016/S0950-4230(01)00014-6},
\url{https://www.sciencedirect.com/science/article/pii/S0950423001000146}
\bibitem{10.1145/3544216.3544251}
Yin, Y., Lin, Z., Jin, M., Fanti, G., Sekar, V.: Practical gan-based synthetic
ip header trace generation using netshare. In: Proceedings of the ACM SIGCOMM
2022 Conference. p. 458472. SIGCOMM '22, Association for Computing
Machinery, New York, NY, USA (2022). \doi{10.1145/3544216.3544251},
\url{https://doi.org/10.1145/3544216.3544251}
\bibitem{yoon2019timegan}
Yoon, J., Jarrett, D., van~der Schaar, M.: Time-series generative adversarial
networks. In: Wallach, H., Larochelle, H., Beygelzimer, A., d\textquotesingle
Alch\'{e}-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information
Processing Systems. vol.~32. Curran Associates, Inc. (2019),
\url{https://proceedings.neurips.cc/paper_files/paper/2019/file/c9efe5f26cd17ba6216bbe2a7d26d490-Paper.pdf}
\bibitem{yuan2025ctu}
Yuan, Y., Sha, Y., Zhao, H.: Ctu-ddpm: Generating industrial control system
time-series data with a cnn-transformer hybrid diffusion model. In:
Proceedings of the 2025 International Symposium on Artificial Intelligence
and Computational Social Sciences. p. 547552. AICSS '25, Association for
Computing Machinery, New York, NY, USA (2025). \doi{10.1145/3776759.3776845},
\url{https://doi.org/10.1145/3776759.3776845}
\bibitem{zhou2021informerefficienttransformerlong}
Zhou, H., Zhang, S., Peng, J., Zhang, S., Li, J., Xiong, H., Zhang, W.:
Informer: Beyond efficient transformer for long sequence time-series
forecasting. Proceedings of the AAAI Conference on Artificial Intelligence
\textbf{35}(12), 11106--11115 (May 2021). \doi{10.1609/aaai.v35i12.17325},
\url{https://ojs.aaai.org/index.php/AAAI/article/view/17325}
\bibitem{zhou2022fedformerfrequencyenhanceddecomposed}
Zhou, T., Ma, Z., Wen, Q., Wang, X., Sun, L., Jin, R.: {FED}former: Frequency
enhanced decomposed transformer for long-term series forecasting. In:
Chaudhuri, K., Jegelka, S., Song, L., Szepesvari, C., Niu, G., Sabato, S.
(eds.) Proceedings of the 39th International Conference on Machine Learning.
Proceedings of Machine Learning Research, vol.~162, pp. 27268--27286. PMLR
(17--23 Jul 2022), \url{https://proceedings.mlr.press/v162/zhou22g.html}
\end{thebibliography}

View File

@@ -0,0 +1,48 @@
This is BibTeX, Version 0.99e
Capacity: max_strings=200000, hash_size=200000, hash_prime=170003
The top-level auxiliary file: main.aux
Reallocating 'name_of_file' (item size: 1) to 9 items.
The style file: splncs04.bst
Reallocating 'name_of_file' (item size: 1) to 11 items.
Database file #1: references.bib
You've used 48 entries,
2850 wiz_defined-function locations,
923 strings with 18565 characters,
and the built_in function-call counts, 36804 in all, are:
= -- 2972
> -- 1473
< -- 64
+ -- 575
- -- 526
* -- 2581
:= -- 4611
add.period$ -- 115
call.type$ -- 48
change.case$ -- 419
chr.to.int$ -- 0
cite$ -- 48
duplicate$ -- 3146
empty$ -- 3007
format.name$ -- 589
if$ -- 8031
int.to.chr$ -- 0
int.to.str$ -- 48
missing$ -- 693
newline$ -- 150
num.names$ -- 130
pop$ -- 1246
preamble$ -- 1
purify$ -- 314
quote$ -- 0
skip$ -- 960
stack$ -- 0
substring$ -- 1960
swap$ -- 1955
text.length$ -- 64
text.prefix$ -- 0
top$ -- 0
type$ -- 192
warning$ -- 0
while$ -- 241
width$ -- 50
write$ -- 595

View File

@@ -0,0 +1,752 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 20 APR 2026 18:03
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
**./main.tex
(main.tex
LaTeX2e <2025-11-01>
L3 programming layer <2025-12-24>
(llncs.cls
Document Class: llncs 2024/01/29 v2.24
LaTeX document class for Lecture Notes in Computer Science
(D:\MikTex\tex/latex/base\article.cls
Document Class: article 2025/01/22 v1.4n Standard LaTeX document class
(D:\MikTex\tex/latex/base\size10.clo
File: size10.clo 2025/01/22 v1.4n Standard LaTeX file (size option)
)
\c@part=\count275
\c@section=\count276
\c@subsection=\count277
\c@subsubsection=\count278
\c@paragraph=\count279
\c@subparagraph=\count280
\c@figure=\count281
\c@table=\count282
\abovecaptionskip=\skip49
\belowcaptionskip=\skip50
\bibindent=\dimen148
) (D:\MikTex\tex/latex/tools\multicol.sty
Package: multicol 2025/10/21 v2.0b multicolumn formatting (FMi)
\c@tracingmulticols=\count283
\mult@box=\box53
\multicol@leftmargin=\dimen149
\c@unbalance=\count284
\c@collectmore=\count285
\doublecol@number=\count286
\multicoltolerance=\count287
\multicolpretolerance=\count288
\full@width=\dimen150
\page@free=\dimen151
\premulticols=\dimen152
\postmulticols=\dimen153
\multicolsep=\skip51
\multicolbaselineskip=\skip52
\partial@page=\box54
\last@line=\box55
\mc@boxedresult=\box56
\maxbalancingoverflow=\dimen154
\mult@rightbox=\box57
\mult@grightbox=\box58
\mult@firstbox=\box59
\mult@gfirstbox=\box60
\@tempa=\box61
\@tempa=\box62
\@tempa=\box63
\@tempa=\box64
\@tempa=\box65
\@tempa=\box66
\@tempa=\box67
\@tempa=\box68
\@tempa=\box69
\@tempa=\box70
\@tempa=\box71
\@tempa=\box72
\@tempa=\box73
\@tempa=\box74
\@tempa=\box75
\@tempa=\box76
\@tempa=\box77
\@tempa=\box78
\@tempa=\box79
\@tempa=\box80
\@tempa=\box81
\@tempa=\box82
\@tempa=\box83
\@tempa=\box84
\@tempa=\box85
\@tempa=\box86
\@tempa=\box87
\@tempa=\box88
\@tempa=\box89
\@tempa=\box90
\@tempa=\box91
\@tempa=\box92
\@tempa=\box93
\@tempa=\box94
\@tempa=\box95
\@tempa=\box96
\c@minrows=\count289
\c@columnbadness=\count290
\c@finalcolumnbadness=\count291
\last@try=\dimen155
\multicolovershoot=\dimen156
\multicolundershoot=\dimen157
\mult@nat@firstbox=\box97
\colbreak@box=\box98
\mc@col@check@num=\count292
\g__mc_curr_col_int=\count293
) (D:\MikTex\tex/latex/oberdiek\aliascnt.sty
Package: aliascnt 2018/09/07 v1.5 Alias counters (HO)
)
\c@chapter=\count294
LaTeX Font Info: Redeclaring math symbol \Gamma on input line 379.
LaTeX Font Info: Redeclaring math symbol \Delta on input line 380.
LaTeX Font Info: Redeclaring math symbol \Theta on input line 381.
LaTeX Font Info: Redeclaring math symbol \Lambda on input line 382.
LaTeX Font Info: Redeclaring math symbol \Xi on input line 383.
LaTeX Font Info: Redeclaring math symbol \Pi on input line 384.
LaTeX Font Info: Redeclaring math symbol \Sigma on input line 385.
LaTeX Font Info: Redeclaring math symbol \Upsilon on input line 386.
LaTeX Font Info: Redeclaring math symbol \Phi on input line 387.
LaTeX Font Info: Redeclaring math symbol \Psi on input line 388.
LaTeX Font Info: Redeclaring math symbol \Omega on input line 389.
LaTeX Info: Redefining \vec on input line 394.
\tocchpnum=\dimen158
\tocsecnum=\dimen159
\tocsectotal=\dimen160
\tocsubsecnum=\dimen161
\tocsubsectotal=\dimen162
\tocsubsubsecnum=\dimen163
\tocsubsubsectotal=\dimen164
\tocparanum=\dimen165
\tocparatotal=\dimen166
\tocsubparanum=\dimen167
\@tempcntc=\count295
\fnindent=\dimen168
\c@@inst=\count296
\c@@auth=\count297
\c@auco=\count298
\instindent=\dimen169
\authrun=\box99
\authorrunning=\toks17
\tocauthor=\toks18
\titrun=\box100
\titlerunning=\toks19
\toctitle=\toks20
\c@theorem=\count299
\c@case=\count300
\c@conjecture=\count301
\c@corollary=\count302
\c@definition=\count303
\c@example=\count304
\c@exercise=\count305
\c@lemma=\count306
\c@note=\count307
\c@problem=\count308
\c@property=\count309
\c@proposition=\count310
\c@question=\count311
\c@solution=\count312
\c@remark=\count313
\headlineindent=\dimen170
)
(D:\MikTex\tex/latex/base\fontenc.sty
Package: fontenc 2025/07/18 v2.1d Standard LaTeX package
) (D:\MikTex\tex/latex/lm\lmodern.sty
Package: lmodern 2015/05/01 v1.6.1 Latin Modern Fonts
LaTeX Font Info: Overwriting symbol font `operators' in version `normal'
(Font) OT1/cmr/m/n --> OT1/lmr/m/n on input line 22.
LaTeX Font Info: Overwriting symbol font `letters' in version `normal'
(Font) OML/cmm/m/it --> OML/lmm/m/it on input line 23.
LaTeX Font Info: Overwriting symbol font `symbols' in version `normal'
(Font) OMS/cmsy/m/n --> OMS/lmsy/m/n on input line 24.
LaTeX Font Info: Overwriting symbol font `largesymbols' in version `normal'
(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 25.
LaTeX Font Info: Overwriting symbol font `operators' in version `bold'
(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 26.
LaTeX Font Info: Overwriting symbol font `letters' in version `bold'
(Font) OML/cmm/b/it --> OML/lmm/b/it on input line 27.
LaTeX Font Info: Overwriting symbol font `symbols' in version `bold'
(Font) OMS/cmsy/b/n --> OMS/lmsy/b/n on input line 28.
LaTeX Font Info: Overwriting symbol font `largesymbols' in version `bold'
(Font) OMX/cmex/m/n --> OMX/lmex/m/n on input line 29.
LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `normal'
(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 31.
LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `normal'
(Font) OT1/cmss/m/n --> OT1/lmss/m/n on input line 32.
LaTeX Font Info: Overwriting math alphabet `\mathit' in version `normal'
(Font) OT1/cmr/m/it --> OT1/lmr/m/it on input line 33.
LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `normal'
(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 34.
LaTeX Font Info: Overwriting math alphabet `\mathbf' in version `bold'
(Font) OT1/cmr/bx/n --> OT1/lmr/bx/n on input line 35.
LaTeX Font Info: Overwriting math alphabet `\mathsf' in version `bold'
(Font) OT1/cmss/bx/n --> OT1/lmss/bx/n on input line 36.
LaTeX Font Info: Overwriting math alphabet `\mathit' in version `bold'
(Font) OT1/cmr/bx/it --> OT1/lmr/bx/it on input line 37.
LaTeX Font Info: Overwriting math alphabet `\mathtt' in version `bold'
(Font) OT1/cmtt/m/n --> OT1/lmtt/m/n on input line 38.
)
(D:\MikTex\tex/latex/graphics\graphicx.sty
Package: graphicx 2024/12/31 v1.2e Enhanced LaTeX Graphics (DPC,SPQR)
(D:\MikTex\tex/latex/graphics\keyval.sty
Package: keyval 2022/05/29 v1.15 key=value parser (DPC)
\KV@toks@=\toks21
)
(D:\MikTex\tex/latex/graphics\graphics.sty
Package: graphics 2024/08/06 v1.4g Standard LaTeX Graphics (DPC,SPQR)
(D:\MikTex\tex/latex/graphics\trig.sty
Package: trig 2023/12/02 v1.11 sin cos tan (DPC)
)
(D:\MikTex\tex/latex/graphics-cfg\graphics.cfg
File: graphics.cfg 2016/06/04 v1.11 sample graphics configuration
)
Package graphics Info: Driver file: pdftex.def on input line 106.
(D:\MikTex\tex/latex/graphics-def\pdftex.def
File: pdftex.def 2025/09/29 v1.2d Graphics/color driver for pdftex
))
\Gin@req@height=\dimen171
\Gin@req@width=\dimen172
)
(D:\MikTex\tex/latex/amsmath\amsmath.sty
Package: amsmath 2025/07/09 v2.17z AMS math features
\@mathmargin=\skip53
For additional information on amsmath, use the `?' option.
(D:\MikTex\tex/latex/amsmath\amstext.sty
Package: amstext 2024/11/17 v2.01 AMS text
(D:\MikTex\tex/latex/amsmath\amsgen.sty
File: amsgen.sty 1999/11/30 v2.0 generic functions
\@emptytoks=\toks22
\ex@=\dimen173
))
(D:\MikTex\tex/latex/amsmath\amsbsy.sty
Package: amsbsy 1999/11/29 v1.2d Bold Symbols
\pmbraise@=\dimen174
)
(D:\MikTex\tex/latex/amsmath\amsopn.sty
Package: amsopn 2022/04/08 v2.04 operator names
)
\inf@bad=\count314
LaTeX Info: Redefining \frac on input line 233.
\uproot@=\count315
\leftroot@=\count316
LaTeX Info: Redefining \overline on input line 398.
LaTeX Info: Redefining \colon on input line 409.
\classnum@=\count317
\DOTSCASE@=\count318
LaTeX Info: Redefining \ldots on input line 495.
LaTeX Info: Redefining \dots on input line 498.
LaTeX Info: Redefining \cdots on input line 619.
\Mathstrutbox@=\box101
\strutbox@=\box102
LaTeX Info: Redefining \big on input line 721.
LaTeX Info: Redefining \Big on input line 722.
LaTeX Info: Redefining \bigg on input line 723.
LaTeX Info: Redefining \Bigg on input line 724.
\big@size=\dimen175
LaTeX Font Info: Redeclaring font encoding OML on input line 742.
LaTeX Font Info: Redeclaring font encoding OMS on input line 743.
Package amsmath Warning: Unable to redefine math accent \vec.
\macc@depth=\count319
LaTeX Info: Redefining \bmod on input line 904.
LaTeX Info: Redefining \pmod on input line 909.
LaTeX Info: Redefining \smash on input line 939.
LaTeX Info: Redefining \relbar on input line 969.
LaTeX Info: Redefining \Relbar on input line 970.
\c@MaxMatrixCols=\count320
\dotsspace@=\muskip17
\c@parentequation=\count321
\dspbrk@lvl=\count322
\tag@help=\toks23
\row@=\count323
\column@=\count324
\maxfields@=\count325
\andhelp@=\toks24
\eqnshift@=\dimen176
\alignsep@=\dimen177
\tagshift@=\dimen178
\tagwidth@=\dimen179
\totwidth@=\dimen180
\lineht@=\dimen181
\@envbody=\toks25
\multlinegap=\skip54
\multlinetaggap=\skip55
\mathdisplay@stack=\toks26
LaTeX Info: Redefining \[ on input line 2950.
LaTeX Info: Redefining \] on input line 2951.
) (D:\MikTex\tex/latex/amsfonts\amssymb.sty
Package: amssymb 2013/01/14 v3.01 AMS font symbols
(D:\MikTex\tex/latex/amsfonts\amsfonts.sty
Package: amsfonts 2013/01/14 v3.01 Basic AMSFonts support
\symAMSa=\mathgroup4
\symAMSb=\mathgroup5
LaTeX Font Info: Redeclaring math symbol \hbar on input line 98.
LaTeX Font Info: Overwriting math alphabet `\mathfrak' in version `bold'
(Font) U/euf/m/n --> U/euf/b/n on input line 106.
)) (D:\MikTex\tex/latex/tools\bm.sty
Package: bm 2025/10/21 v1.2g Bold Symbol Support (DPC/FMi)
\symboldoperators=\mathgroup6
\symboldletters=\mathgroup7
\symboldsymbols=\mathgroup8
Package bm Info: No bold for \OMX/lmex/m/n, using \pmb.
Package bm Info: No bold for \U/msa/m/n, using \pmb.
Package bm Info: No bold for \U/msb/m/n, using \pmb.
LaTeX Font Info: Redeclaring math alphabet \mathbf on input line 149.
) (D:\MikTex\tex/latex/tools\array.sty
Package: array 2025/09/25 v2.6n Tabular extension package (FMi)
\col@sep=\dimen182
\ar@mcellbox=\box103
\extrarowheight=\dimen183
\NC@list=\toks27
\extratabsurround=\skip56
\backup@length=\skip57
\ar@cellbox=\box104
)
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/booktabs\booktabs.sty
Package: booktabs 2020/01/12 v1.61803398 Publication quality tables
\heavyrulewidth=\dimen184
\lightrulewidth=\dimen185
\cmidrulewidth=\dimen186
\belowrulesep=\dimen187
\belowbottomsep=\dimen188
\aboverulesep=\dimen189
\abovetopsep=\dimen190
\cmidrulesep=\dimen191
\cmidrulekern=\dimen192
\defaultaddspace=\dimen193
\@cmidla=\count326
\@cmidlb=\count327
\@aboverulesep=\dimen194
\@belowrulesep=\dimen195
\@thisruleclass=\count328
\@lastruleclass=\count329
\@thisrulewidth=\dimen196
)
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/microtype\microtype.sty
Package: microtype 2026/03/01 v3.2d Micro-typographical refinements (RS)
(D:\MikTex\tex/latex/etoolbox\etoolbox.sty
Package: etoolbox 2025/10/02 v2.5m e-TeX tools for LaTeX (JAW)
\etb@tempcnta=\count330
)
\MT@toks=\toks28
\MT@tempbox=\box105
\MT@count=\count331
LaTeX Info: Redefining \noprotrusionifhmode on input line 1084.
LaTeX Info: Redefining \leftprotrusion on input line 1085.
\MT@prot@toks=\toks29
LaTeX Info: Redefining \rightprotrusion on input line 1104.
LaTeX Info: Redefining \textls on input line 1449.
\MT@outer@kern=\dimen197
LaTeX Info: Redefining \microtypecontext on input line 2053.
LaTeX Info: Redefining \textmicrotypecontext on input line 2070.
\MT@listname@count=\count332
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/microtype\microtype-pdftex
.def
File: microtype-pdftex.def 2026/03/01 v3.2d Definitions specific to pdftex (RS)
LaTeX Info: Redefining \lsstyle on input line 944.
LaTeX Info: Redefining \lslig on input line 944.
\MT@outer@space=\skip58
)
Package microtype Info: Loading configuration file microtype.cfg.
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/microtype\microtype.cfg
File: microtype.cfg 2026/03/01 v3.2d microtype main configuration file (RS)
)
LaTeX Info: Redefining \microtypesetup on input line 3065.
)
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/float\float.sty
Package: float 2001/11/08 v1.3d Float enhancements (AL)
\c@float@type=\count333
\float@exts=\toks30
\float@box=\box106
\@float@everytoks=\toks31
\@floatcapt=\box107
)
(D:\MikTex\tex/latex/url\url.sty
\Urlmuskip=\muskip18
Package: url 2013/09/16 ver 3.4 Verb mode for urls, etc.
)
LaTeX Font Info: Trying to load font information for T1+lmr on input line 30
.
(D:\MikTex\tex/latex/lm\t1lmr.fd
File: t1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
)
(D:\MikTex\tex/latex/l3backend\l3backend-pdftex.def
File: l3backend-pdftex.def 2025-10-09 L3 backend support: PDF output (pdfTeX)
\l__color_backend_stack_int=\count334
) (main.aux)
\openout1 = `main.aux'.
LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 30.
LaTeX Font Info: ... okay on input line 30.
LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 30.
LaTeX Font Info: ... okay on input line 30.
LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 30.
LaTeX Font Info: ... okay on input line 30.
LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 30.
LaTeX Font Info: ... okay on input line 30.
LaTeX Font Info: Checking defaults for TS1/cmr/m/n on input line 30.
LaTeX Font Info: ... okay on input line 30.
LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 30.
LaTeX Font Info: ... okay on input line 30.
LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 30.
LaTeX Font Info: ... okay on input line 30.
(D:\MikTex\tex/context/base/mkii\supp-pdf.mkii
[Loading MPS to PDF converter (version 2006.09.02).]
\scratchcounter=\count335
\scratchdimen=\dimen198
\scratchbox=\box108
\nofMPsegments=\count336
\nofMParguments=\count337
\everyMPshowfont=\toks32
\MPscratchCnt=\count338
\MPscratchDim=\dimen199
\MPnumerator=\count339
\makeMPintoPDFobject=\count340
\everyMPtoPDFconversion=\toks33
) (D:\MikTex\tex/latex/epstopdf-pkg\epstopdf-base.sty
Package: epstopdf-base 2020-01-24 v2.11 Base part for package epstopdf
Package epstopdf-base Info: Redefining graphics rule for `.eps' on input line 4
85.
(D:\MikTex\tex/latex/00miktex\epstopdf-sys.cfg
File: epstopdf-sys.cfg 2021/03/18 v2.0 Configuration of epstopdf for MiKTeX
))
LaTeX Info: Redefining \microtypecontext on input line 30.
Package microtype Info: Applying patch `item' on input line 30.
Package microtype Info: Applying patch `toc' on input line 30.
Package microtype Info: Applying patch `eqnum' on input line 30.
Package microtype Info: Applying patch `footnote' on input line 30.
Package microtype Info: Applying patch `verbatim' on input line 30.
LaTeX Info: Redefining \microtypesetup on input line 30.
Package microtype Info: Generating PDF output.
Package microtype Info: Character protrusion enabled (level 2).
Package microtype Info: Using default protrusion set `alltext'.
Package microtype Info: No font expansion.
Package microtype Info: No adjustment of tracking.
Package microtype Info: No adjustment of interword spacing.
Package microtype Info: No adjustment of character kerning.
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/microtype\mt-cmr.cfg
File: mt-cmr.cfg 2013/05/19 v2.2 microtype config. file: Computer Modern Roman
(RS)
)
LaTeX Font Info: Trying to load font information for OT1+lmr on input line 3
1.
(D:\MikTex\tex/latex/lm\ot1lmr.fd
File: ot1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
)
LaTeX Font Info: Trying to load font information for OML+lmm on input line 3
1.
(D:\MikTex\tex/latex/lm\omllmm.fd
File: omllmm.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
)
LaTeX Font Info: Trying to load font information for OMS+lmsy on input line
31.
(D:\MikTex\tex/latex/lm\omslmsy.fd
File: omslmsy.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
)
LaTeX Font Info: Trying to load font information for OMX+lmex on input line
31.
(D:\MikTex\tex/latex/lm\omxlmex.fd
File: omxlmex.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
)
LaTeX Font Info: External font `lmex10' loaded for size
(Font) <10> on input line 31.
LaTeX Font Info: External font `lmex10' loaded for size
(Font) <7> on input line 31.
LaTeX Font Info: External font `lmex10' loaded for size
(Font) <5> on input line 31.
LaTeX Font Info: Trying to load font information for U+msa on input line 31.
(D:\MikTex\tex/latex/amsfonts\umsa.fd
File: umsa.fd 2013/01/14 v3.01 AMS symbols A
)
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/microtype\mt-msa.cfg
File: mt-msa.cfg 2006/02/04 v1.1 microtype config. file: AMS symbols (a) (RS)
)
LaTeX Font Info: Trying to load font information for U+msb on input line 31.
(D:\MikTex\tex/latex/amsfonts\umsb.fd
File: umsb.fd 2013/01/14 v3.01 AMS symbols B
)
(C:\Users\Markyan04\AppData\Roaming\MiKTeX\tex/latex/microtype\mt-msb.cfg
File: mt-msb.cfg 2005/06/01 v1.0 microtype config. file: AMS symbols (b) (RS)
)
LaTeX Font Info: Trying to load font information for T1+lmtt on input line 3
1.
(D:\MikTex\tex/latex/lm\t1lmtt.fd
File: t1lmtt.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
)
Package microtype Info: Loading generic protrusion settings for font family
(microtype) `lmtt' (encoding: T1).
(microtype) For optimal results, create family-specific settings.
(microtype) See the microtype manual for details.
LaTeX Font Info: External font `lmex10' loaded for size
(Font) <9> on input line 31.
LaTeX Font Info: External font `lmex10' loaded for size
(Font) <6> on input line 31.
LaTeX Font Info: Trying to load font information for TS1+lmr on input line 3
6.
(D:\MikTex\tex/latex/lm\ts1lmr.fd
File: ts1lmr.fd 2015/05/01 v1.6.1 Font defs for Latin Modern
) [1
{C:/Users/Markyan04/AppData/Local/MiKTeX/fonts/map/pdftex/pdftex.map}{D:/MikTex
/fonts/enc/dvips/lm/lm-ec.enc}{D:/MikTex/fonts/enc/dvips/lm/lm-rm.enc}{D:/MikTe
x/fonts/enc/dvips/lm/lm-mathit.enc}{D:/MikTex/fonts/enc/dvips/lm/lm-mathsy.enc}
{D:/MikTex/fonts/enc/dvips/lm/lm-ts1.enc}]
Underfull \vbox (badness 5105) has occurred while \output is active []
[2]
[3]
Underfull \vbox (badness 10000) has occurred while \output is active []
[4]
<fig-design-v4-from-user-svg-cropped.pdf, id=26, 616.3025pt x 172.645pt>
File: fig-design-v4-from-user-svg-cropped.pdf Graphic file (type pdf)
<use fig-design-v4-from-user-svg-cropped.pdf>
Package pdftex.def Info: fig-design-v4-from-user-svg-cropped.pdf used on input
line 71.
(pdftex.def) Requested size: 277.69987pt x 77.79225pt.
[5] [6 <./fig-design-v4-from-user-svg-cropped.pdf>] [7{D:/MikTex/fonts/enc/dvip
s/lm/lm-mathex.enc}]
Overfull \hbox (8.34737pt too wide) in paragraph at lines 137--139
\T1/lmr/m/n/10 Because dif-fu-sion op-ti-miza-tion can ex-hibit timestep im-bal
-ance (i.e., some timesteps
[]
[8] [9]
<typeclass-cropped.pdf, id=141, 616.3025pt x 221.82875pt>
File: typeclass-cropped.pdf Graphic file (type pdf)
<use typeclass-cropped.pdf>
Package pdftex.def Info: typeclass-cropped.pdf used on input line 195.
(pdftex.def) Requested size: 340.17958pt x 122.4462pt.
Underfull \vbox (badness 10000) has occurred while \output is active []
[10]
[11 <./typeclass-cropped.pdf>] [12]
<fig-benchmark-story-v2.png, id=172, 1089.6345pt x 360.036pt>
File: fig-benchmark-story-v2.png Graphic file (type png)
<use fig-benchmark-story-v2.png>
Package pdftex.def Info: fig-benchmark-story-v2.png used on input line 225.
(pdftex.def) Requested size: 347.12354pt x 114.69197pt.
Underfull \vbox (badness 10000) has occurred while \output is active []
[13 <./fig-benchmark-story-v2.png>]
<fig-benchmark-ablations-v1.png, id=179, 727.299pt x 328.5pt>
File: fig-benchmark-ablations-v1.png Graphic file (type png)
<use fig-benchmark-ablations-v1.png>
Package pdftex.def Info: fig-benchmark-ablations-v1.png used on input line 279
.
(pdftex.def) Requested size: 347.12354pt x 156.78598pt.
[14] [15 <./fig-benchmark-ablations-v1.png>]
Overfull \hbox (5.31874pt too wide) in paragraph at lines 321--322
\T1/lmr/m/n/10 This pa-per ad-dresses the data scarcity and share-abil-ity bar-
ri-ers that limit machine-
[]
Overfull \hbox (2.54008pt too wide) in paragraph at lines 323--324
[]\T1/lmr/m/n/10 Overall, Mask-DDPM pro-vides a re-pro-ducible foun-da-tion for
gen-er-at-ing share-
[]
(main.bbl [16]
Underfull \hbox (badness 1609) in paragraph at lines 37--43
[]\T1/lmr/m/n/9 Coletta, A., Gopalakr-ish-nan, S., Bor-rajo, D., Vyetrenko, S.:
On the con-
[]
[17]
Underfull \hbox (badness 2653) in paragraph at lines 99--106
[]\T1/lmr/m/n/9 Kollovieh, M., Ansari, A.F., Bohlke-Schneider, M., Zschieg-ner,
J., Wang,
[]
Underfull \hbox (badness 2662) in paragraph at lines 99--106
\T1/lmr/m/n/9 H., Wang, Y.B.: Pre-dict, re-fine, syn-the-size: Self-guiding dif
-fu-sion mod-els
[]
Underfull \hbox (badness 2626) in paragraph at lines 121--127
\T1/lmr/m/n/9 wal, A., Bel-grave, D., Cho, K., Oh, A. (eds.) Ad-vances in Neu-r
al In-
[]
Underfull \hbox (badness 1946) in paragraph at lines 121--127
\T1/lmr/m/n/9 for-ma-tion Pro-cess-ing Sys-tems. vol. 35, pp. 4328--4343. Cur-r
an As-so-ciates,
[]
Underfull \hbox (badness 1603) in paragraph at lines 133--138
[]\T1/lmr/m/n/9 Lin, Z., Jain, A., Wang, C., Fanti, G., Sekar, V.: Us-ing gans
for shar-
[]
Underfull \hbox (badness 2310) in paragraph at lines 133--138
\T1/lmr/m/n/9 tions. In: Pro-ceed-ings of the ACM In-ter-net Mea-sure-ment Con-
fer-ence. p.
[]
Underfull \hbox (badness 1394) in paragraph at lines 133--138
\T1/lmr/m/n/9 464^^U483. IMC '20, As-so-ci-a-tion for Com-put-ing Ma-chin-ery,
New York, NY,
[]
Underfull \hbox (badness 5316) in paragraph at lines 133--138
\T1/lmr/m/n/9 USA (2020). https://doi.org/10.1145/3419394.3423643, $\T1/lmtt/m/
n/9 https : / / doi . org / 10 .
[]
Underfull \hbox (badness 1043) in paragraph at lines 140--144
[]\T1/lmr/m/n/9 Liu, M., Huang, H., Feng, H., Sun, L., Du, B., Fu, Y.: Pristi:
A con-di-
[]
[18]
Underfull \hbox (badness 5592) in paragraph at lines 168--172
[]\T1/lmr/m/n/9 National In-sti-tute of Stan-dards and Tech-nol-ogy: Guide to o
p-er-a-tional
[]
Underfull \hbox (badness 3514) in paragraph at lines 168--172
\T1/lmr/m/n/9 tech-nol-ogy (ot) se-cu-rity. Spe-cial Pub-li-ca-tion 800-82 Rev.
3, NIST (sep
[]
Underfull \hbox (badness 1253) in paragraph at lines 195--200
\T1/lmr/m/n/9 data (jan 2026). https://doi.org/10.2139/ssrn.6055903, $\T1/lmtt/
m/n/9 https : / / papers . ssrn .
[]
Underfull \hbox (badness 1226) in paragraph at lines 207--213
[]\T1/lmr/m/n/9 Shi, J., Han, K., Wang, Z., Doucet, A., Tit-sias, M.: Sim-pli-f
ied and gen-
[]
Underfull \hbox (badness 3954) in paragraph at lines 207--213
\T1/lmr/m/n/9 er-al-ized masked dif-fu-sion for dis-crete data. In: Glober-son,
A., Mackey,
[]
Underfull \hbox (badness 3701) in paragraph at lines 207--213
\T1/lmr/m/n/9 vances in Neu-ral In-for-ma-tion Pro-cess-ing Sys-tems. vol. 37,
pp. 103131--
[]
Underfull \hbox (badness 10000) in paragraph at lines 207--213
\T1/lmr/m/n/9 103167. Cur-ran As-so-ciates, Inc. (2024). https://doi.org/10.522
02/079017-
[]
Underfull \hbox (badness 10000) in paragraph at lines 207--213
\T1/lmr/m/n/9 3277, $\T1/lmtt/m/n/9 https : / / proceedings . neurips . cc / pa
per _ files / paper / 2024 / file /
[]
Underfull \hbox (badness 10000) in paragraph at lines 225--230
[]\T1/lmr/m/n/9 Sikder, M.F., Ra-machan-dran-pil-lai, R., Heintz, F.: Trans-fu-
sion: Gen-
[]
Underfull \hbox (badness 10000) in paragraph at lines 225--230
\T1/lmr/m/n/9 er-at-ing long, high fi-delity time se-ries us-ing dif-fu-sion mo
d-els with
[]
Underfull \hbox (badness 10000) in paragraph at lines 225--230
\T1/lmr/m/n/9 trans-form-ers. Ma-chine Learn-ing with Ap-pli-ca-tions \T1/lmr/b
x/n/9 20\T1/lmr/m/n/9 , 100652 (2025).
[]
Underfull \hbox (badness 10000) in paragraph at lines 225--230
\T1/lmr/m/n/9 https://doi.org/https://doi.org/10.1016/j.mlwa.2025.100652, $\T1/
lmtt/m/n/9 https : / / www .
[]
[19]
Underfull \hbox (badness 2229) in paragraph at lines 263--267
\T1/lmr/m/n/9 er-a-tion. SIG-COMM Com-put. Com-mun. Rev. \T1/lmr/bx/n/9 36\T1/l
mr/m/n/9 (4), 111^^U122 (Aug 2006).
[]
Underfull \hbox (badness 10000) in paragraph at lines 263--267
\T1/lmr/m/n/9 https://doi.org/10.1145/1151659.1159928, $\T1/lmtt/m/n/9 https :
/ / doi . org / 10 . 1145 / 1151659 .
[]
[20]) [21] (main.aux)
***********
LaTeX2e <2025-11-01>
L3 programming layer <2025-12-24>
***********
)
Here is how much of TeX's memory you used:
6343 strings out of 467871
97437 string characters out of 5435199
552261 words of memory out of 5000000
35107 multiletter control sequences out of 15000+600000
706871 words of font info for 99 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
57i,9n,65p,2477b,352s stack positions out of 10000i,1000n,20000p,200000b,200000s
<D:/MikTex/fonts/type1/public/lm/lmbx10.pfb><D:/MikTex/fonts/type1/public/lm/
lmbx12.pfb><D:/MikTex/fonts/type1/public/lm/lmbx9.pfb><D:/MikTex/fonts/type1/pu
blic/lm/lmex10.pfb><D:/MikTex/fonts/type1/public/lm/lmmi10.pfb><D:/MikTex/fonts
/type1/public/lm/lmmi5.pfb><D:/MikTex/fonts/type1/public/lm/lmmi7.pfb><D:/MikTe
x/fonts/type1/public/lm/lmmi9.pfb><D:/MikTex/fonts/type1/public/lm/lmmib10.pfb>
<D:/MikTex/fonts/type1/public/lm/lmmib7.pfb><D:/MikTex/fonts/type1/public/lm/lm
r10.pfb><D:/MikTex/fonts/type1/public/lm/lmr5.pfb><D:/MikTex/fonts/type1/public
/lm/lmr6.pfb><D:/MikTex/fonts/type1/public/lm/lmr7.pfb><D:/MikTex/fonts/type1/p
ublic/lm/lmr9.pfb><D:/MikTex/fonts/type1/public/lm/lmri10.pfb><D:/MikTex/fonts/
type1/public/lm/lmri9.pfb><D:/MikTex/fonts/type1/public/lm/lmsy10.pfb><D:/MikTe
x/fonts/type1/public/lm/lmsy7.pfb><D:/MikTex/fonts/type1/public/lm/lmsy9.pfb><D
:/MikTex/fonts/type1/public/lm/lmtt10.pfb><D:/MikTex/fonts/type1/public/lm/lmtt
9.pfb><D:/MikTex/fonts/type1/public/amsfonts/symbols/msbm10.pfb>
Output written on main.pdf (21 pages, 1116707 bytes).
PDF statistics:
317 PDF objects out of 1000 (max. 8388607)
0 named destinations out of 1000 (max. 500000)
13845 words of extra memory for PDF output out of 14400 (max. 10000000)

View File

@@ -1,5 +1,6 @@
\documentclass[runningheads]{llncs}
\usepackage[T1]{fontenc}
\usepackage{lmodern}
\usepackage{graphicx}
\usepackage{amsmath}
\usepackage{amssymb}
@@ -7,7 +8,7 @@
\usepackage{bm}
\usepackage{array}
\usepackage{booktabs}
\usepackage{microtype}
\usepackage[expansion=false]{microtype}
\usepackage{float}
\usepackage{url}
@@ -48,7 +49,7 @@ Despite these advances, most existing work either focuses on packet-level genera
% 2. Related Work
\section{Related Work}
\label{sec:related}
Early generation of network data oriented towards "realism" mostly remained at the packet/flow header level, either through replay or statistical synthesis based on single-point observations. Swing, in a closed-loop, network-responsive manner, extracts user/application/network distributions from single-point observations to reproduce burstiness and correlation across multiple time scales \citep{10.1145/1151659.1159928,10.1145/1159913.1159928}. Subsequently, a series of works advanced header synthesis to learning-based generation: the WGAN-based method added explicit verification of protocol field consistency to NetFlow/IPFIX \citep{Ring_2019}, NetShare reconstructed header modeling as flow-level time series and improved fidelity and scalability through domain encoding and parallel fine-tuning \citep{10.1145/3544216.3544251}, and DoppelGANger preserved the long-range structure and downstream sorting consistency of networked time series by decoupling attributes from sequences \citep{Lin_2020}. However, in industrial control system (ICS) scenarios, the original PCAP is usually not shareable, and public testbeds (such as SWaT, WADI) mostly provide process/monitoring telemetry and protocol interactions for security assessment, but public datasets emphasize operational variables rather than packet-level traces \citep{7469060,10.1145/3055366.3055375}. This makes "synthesis at the feature/telemetry level, aware of protocol and semantics" more feasible and necessary in practice: we are more concerned with reproducing high-level distributions and multi-scale temporal patterns according to operational semantics and physical constraints without relying on the original packets. From this perspective, the generation paradigm naturally shifts from "packet syntax reproduction" to "modeling of high-level spatio-temporal distributions and uncertainties", requiring stable training, strong distribution fitting, and interpretable uncertainty characterization.
Early generation of network data oriented towards "realism" mostly remained at the packet/flow header level, either through replay or statistical synthesis based on single-point observations. Swing, in a closed-loop, network-responsive manner, extracts user/application/network distributions from single-point observations to reproduce burstiness and correlation across multiple time scales \citep{10.1145/1151659.1159928}. Subsequently, a series of works advanced header synthesis to learning-based generation: the WGAN-based method added explicit verification of protocol field consistency to NetFlow/IPFIX \citep{Ring_2019}, NetShare reconstructed header modeling as flow-level time series and improved fidelity and scalability through domain encoding and parallel fine-tuning \citep{10.1145/3544216.3544251}, and DoppelGANger preserved the long-range structure and downstream sorting consistency of networked time series by decoupling attributes from sequences \citep{Lin_2020}. However, in industrial control system (ICS) scenarios, the original PCAP is usually not shareable, and public testbeds (such as SWaT, WADI) mostly provide process/monitoring telemetry and protocol interactions for security assessment, but public datasets emphasize operational variables rather than packet-level traces \citep{7469060,10.1145/3055366.3055375}. This makes "synthesis at the feature/telemetry level, aware of protocol and semantics" more feasible and necessary in practice: we are more concerned with reproducing high-level distributions and multi-scale temporal patterns according to operational semantics and physical constraints without relying on the original packets. From this perspective, the generation paradigm naturally shifts from "packet syntax reproduction" to "modeling of high-level spatio-temporal distributions and uncertainties", requiring stable training, strong distribution fitting, and interpretable uncertainty characterization.
Diffusion models exhibit good fit along this path: DDPM achieves high-quality sampling and stable optimization through efficient $\epsilon$ parameterization and weighted variational objectives \citep{NEURIPS2020_4c5bcfec}, the SDE perspective unifies score-based and diffusion, providing likelihood evaluation and prediction-correction sampling strategies based on probability flow ODEs \citep{song2021scorebasedgenerativemodelingstochastic}. For time series, TimeGrad replaces the constrained output distribution with conditional denoising, capturing high-dimensional correlations at each step \citep{rasul2021autoregressivedenoisingdiffusionmodels}; CSDI explicitly performs conditional diffusion and uses two-dimensional attention to simultaneously leverage temporal and cross-feature dependencies, suitable for conditioning and filling in missing values \citep{tashiro2021csdiconditionalscorebaseddiffusion}; in a more general spatio-temporal structure, DiffSTG generalizes diffusion to spatio-temporal graphs, combining TCN/GCN with denoising U-Net to improve CRPS and inference efficiency in a non-autoregressive manner \citep{wen2024diffstgprobabilisticspatiotemporalgraph}, and PriSTI further enhances conditional features and geographical relationships, maintaining robustness under high missing rates and sensor failures \citep{liu2023pristiconditionaldiffusionframework}; in long sequences and continuous domains, DiffWave verifies that diffusion can also match the quality of strong vocoders under non-autoregressive fast synthesis \citep{kong2021diffwaveversatilediffusionmodel}; studies on cellular communication traffic show that diffusion can recover spatio-temporal patterns and provide uncertainty characterization at the urban scale \citep{11087622}. These results overall point to a conclusion: when the research focus is on "telemetry/high-level features" rather than raw messages, diffusion models provide stable and fine-grained distribution fitting and uncertainty quantification, which is exactly in line with the requirements of ICS telemetry synthesis. Meanwhile, directly entrusting all structures to a "monolithic diffusion" is not advisable: long-range temporal skeletons and fine-grained marginal distributions often have optimization tensions, requiring explicit decoupling in modeling.

View File

@@ -1,96 +1,125 @@
@inproceedings{vaswani2017attention,
title={Attention Is All You Need},
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
booktitle={Advances in Neural Information Processing Systems (NeurIPS)},
author = {Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, \L ukasz and Polosukhin, Illia},
booktitle = {Advances in Neural Information Processing Systems},
editor = {I. Guyon and U. Von Luxburg and S. Bengio and H. Wallach and R. Fergus and S. Vishwanathan and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {Attention is All you Need},
url = {https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf},
volume = {30},
year={2017},
url={https://arxiv.org/abs/1706.03762}
year = {2017}
}
@inproceedings{ho2020denoising,
title={Denoising Diffusion Probabilistic Models},
author = {Ho, Jonathan and Jain, Ajay and Abbeel, Pieter},
booktitle={Advances in Neural Information Processing Systems (NeurIPS)},
volume={33},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin},
pages = {6840--6851},
year={2020},
url={https://arxiv.org/abs/2006.11239}
publisher = {Curran Associates, Inc.},
title = {Denoising Diffusion Probabilistic Models},
url = {https://proceedings.neurips.cc/paper_files/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf},
volume = {33},
year = {2020}
}
@inproceedings{austin2021structured,
title={Structured Denoising Diffusion Models in Discrete State-Spaces},
author={Austin, Jacob and Johnson, Daniel D and Ho, Jonathan and Tarlow, Daniel and van den Berg, Rianne},
booktitle={Advances in Neural Information Processing Systems (NeurIPS)},
volume={34},
author = {Austin, Jacob and Johnson, Daniel D. and Ho, Jonathan and Tarlow, Daniel and van den Berg, Rianne},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {17981--17993},
year={2021},
url={https://arxiv.org/abs/2107.03006}
publisher = {Curran Associates, Inc.},
title = {Structured Denoising Diffusion Models in Discrete State-Spaces},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf},
volume = {34},
year = {2021}
}
@article{shi2024simplified,
@inproceedings{shi2024simplified,
author = {Shi, Jiaxin and Han, Kehang and Wang, Zhe and Doucet, Arnaud and Titsias, Michalis},
booktitle = {Advances in Neural Information Processing Systems},
doi = {10.52202/079017-3277},
editor = {A. Globerson and L. Mackey and D. Belgrave and A. Fan and U. Paquet and J. Tomczak and C. Zhang},
pages = {103131--103167},
publisher = {Curran Associates, Inc.},
title = {Simplified and Generalized Masked Diffusion for Discrete Data},
author={Shi, Juntong and Han, Ke and Wang, Zinan and Doucet, Arnaud and Titsias, Michalis K},
journal={arXiv preprint},
eprint={2406.04329},
archivePrefix={arXiv},
year={2024},
url={https://arxiv.org/abs/2406.04329}
url = {https://proceedings.neurips.cc/paper_files/paper/2024/file/bad233b9849f019aead5e5cc60cef70f-Paper-Conference.pdf},
volume = {37},
year = {2024}
}
@inproceedings{hang2023efficient,
@InProceedings{hang2023efficient,
author = {Hang, Tiankai and Gu, Shuyang and Li, Chen and Bao, Jianmin and Chen, Dong and Hu, Han and Geng, Xin and Guo, Baining},
title = {Efficient Diffusion Training via Min-SNR Weighting Strategy},
author={Hang, Tianyu and Gu, Shuyang and Li, Chen and Bao, Jianmin and Chen, Dong and Hu, Han and Geng, Xin and Guo, Boxin},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
pages={7407--7417},
month = {October},
year = {2023},
doi={10.1109/ICCV51070.2023.00702},
url={https://arxiv.org/abs/2303.09556}
pages = {7441-7451}
}
@inproceedings{kollovieh2023tsdiff,
author = {Kollovieh, Marcel and Ansari, Abdul Fatir and Bohlke-Schneider, Michael and Zschiegner, Jasper and Wang, Hao and Wang, Yuyang (Bernie)},
booktitle = {Advances in Neural Information Processing Systems},
editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine},
pages = {28341--28364},
publisher = {Curran Associates, Inc.},
title = {Predict, Refine, Synthesize: Self-Guiding Diffusion Models for Probabilistic Time Series Forecasting},
author={Kollovieh, Marcel and Ansari, Abdul Fatir and Bohlke-Schneider, Michael and Fatir Ansari, Abdul and Salinas, David},
booktitle={Advances in Neural Information Processing Systems (NeurIPS)},
url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/5a1a10c2c2c9b9af1514687bc24b8f3d-Paper-Conference.pdf},
volume = {36},
year={2023},
url={https://arxiv.org/abs/2307.11494}
year = {2023}
}
@article{sikder2023transfusion,
title={TransFusion: Generating Long, High Fidelity Time Series using Diffusion Models with Transformers},
author={Sikder, M. F. and Ramachandranpillai, R. and Heintz, F.},
journal={arXiv preprint},
eprint={2307.12667},
archivePrefix={arXiv},
year={2023},
url={https://arxiv.org/abs/2307.12667}
}
@inproceedings{song2021score,
title={Score-Based Generative Modeling through Stochastic Differential Equations},
author={Song, Yang and Sohl-Dickstein, Jascha and Kingma, Diederik P and Kumar, Abhishek and Ermon, Stefano and Poole, Ben},
booktitle={International Conference on Learning Representations (ICLR)},
year={2021},
url={https://arxiv.org/abs/2011.13456}
}
@inproceedings{shi2025tabdiff,
title={TabDiff: A Mixed-type Diffusion Model for Tabular Data Generation},
author={Shi, Juntong and Xu, Minkai and Hua, Harper and Zhang, Hengrui and Ermon, Stefano and Leskovec, Jure},
booktitle={International Conference on Learning Representations (ICLR)},
title = {TransFusion: Generating long, high fidelity time series using diffusion models with transformers},
journal = {Machine Learning with Applications},
volume = {20},
pages = {100652},
year = {2025},
url={https://arxiv.org/abs/2410.20626}
issn = {2666-8270},
doi = {https://doi.org/10.1016/j.mlwa.2025.100652},
url = {https://www.sciencedirect.com/science/article/pii/S2666827025000350},
author = {Md Fahim Sikder and Resmi Ramachandranpillai and Fredrik Heintz},
keywords = {Time series generation, Generative models, Diffusion models, Synthetic data, Long-sequenced data},
abstract = {The generation of high-quality, long-sequenced time-series data is essential due to its wide range of applications. In the past, standalone Recurrent and Convolutional Neural Network-based Generative Adversarial Networks (GAN) were used to synthesize time-series data. However, they are inadequate for generating long sequences of time-series data due to limitations in the architecture, such as difficulties in capturing long-range dependencies, limited temporal coherence, and scalability challenges. Furthermore, GANs are well known for their training instability and mode collapse problem. To address this, we propose TransFusion, a diffusion, and transformers-based generative model to generate high-quality long-sequence time-series data. We extended the sequence length to 384, surpassing the previous limit, and successfully generated high-quality synthetic data. Also, we introduce two evaluation metrics to evaluate the quality of the synthetic data as well as its predictive characteristics. TransFusion is evaluated using a diverse set of visual and empirical metrics, consistently outperforming the previous state-of-the-art by a significant margin.}
}
@misc{song2021score,
title={Score-Based Generative Modeling through Stochastic Differential Equations},
author={Yang Song and Jascha Sohl-Dickstein and Diederik P. Kingma and Abhishek Kumar and Stefano Ermon and Ben Poole},
year={2021},
eprint={2011.13456},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2011.13456},
}
@misc{shi2025tabdiff,
title={TabDiff: a Mixed-type Diffusion Model for Tabular Data Generation},
author={Juntong Shi and Minkai Xu and Harper Hua and Hengrui Zhang and Stefano Ermon and Jure Leskovec},
year={2025},
eprint={2410.20626},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2410.20626},
}
@inproceedings{yuan2025ctu,
author = {Yuan, Yusong and Sha, Yun and Zhao, Haidong},
title = {CTU-DDPM: Generating Industrial Control System Time-Series Data with a CNN-Transformer Hybrid Diffusion Model},
author={Yuan, Yusong and Sha, Yun and Zhao, Wei and Zhang, Kun},
booktitle={Proceedings of the 2025 International Symposium on Artificial Intelligence and Computational Social Sciences (ACM AICSS)},
pages={123--132},
year = {2025},
isbn = {9798400721007},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3776759.3776845},
doi = {10.1145/3776759.3776845},
url={https://dl.acm.org/doi/10.1145/3776759.3776845}
abstract = {The security of Industrial Control Systems (ICS) is of paramount importance to national security. Anomaly detection, as a critical security measure, can effectively identify attack behaviors targeting ICS. However, the performance of anomaly detection methods is highly dependent on high-quality datasets, and real anomalous data, in particular, is often difficult to obtain due to its sensitive security implications. To address this challenge, this paper proposes CTU-DDPM, a method for generating multivariate time series data based on Diffusion Models. Our aim is to Generate high-quality industrial control time series data to enhance the performance of anomaly detection methods. This research constructs a diffusion model that fuses a Convolutional Neural Network (CNN) and a Transformer architecture. This hybrid approach is designed to achieve more precise and realistic data generation in complex industrial time series, thereby effectively compensating for the scarcity of authentic anomalous data and providing crucial data support for ICS security.},
booktitle = {Proceedings of the 2025 International Symposium on Artificial Intelligence and Computational Social Sciences},
pages = {547552},
numpages = {6},
keywords = {Convolutional Neural Network, Diffusion Model, Generation, Industrial Control Systems, Time Series Data, Transformer},
location = {
},
series = {AICSS '25}
}
@misc{sha2026ddpm,
@@ -117,73 +146,30 @@
url={https://csrc.nist.gov/pubs/sp/800/82/r3/final}
}
Reference for Introduction Part
@article{10.1007/s10844-022-00753-1,
author = {Koay, Abigail M. Y. and Ko, Ryan K. L and Hettema, Hinne and Radke, Kenneth},
title={Machine learning in industrial control system (ICS) security: current landscape, opportunities and challenges},
year = {2022},
issue_date = {Apr 2023},
publisher = {Kluwer Academic Publishers},
address = {USA},
author={Koay, Abigail MY and Ko, Ryan K L and Hettema, Hinne and Radke, Kenneth},
journal={Journal of Intelligent Information Systems},
volume={60},
number={2},
issn = {0925-9902},
url = {https://doi.org/10.1007/s10844-022-00753-1},
doi = {10.1007/s10844-022-00753-1},
abstract = {The advent of Industry 4.0 has led to a rapid increase in cyber attacks on industrial systems and processes, particularly on Industrial Control Systems (ICS). These systems are increasingly becoming prime targets for cyber criminals and nation-states looking to extort large ransoms or cause disruptions due to their ability to cause devastating impact whenever they cease working or malfunction. Although myriads of cyber attack detection systems have been proposed and developed, these detection systems still face many challenges that are typically not found in traditional detection systems. Motivated by the need to better understand these challenges to improve current approaches, this paper aims to (1) understand the current vulnerability landscape in ICS, (2) survey current advancements of Machine Learning (ML) based methods with respect to the usage of ML base classifiers (3) provide insights to benefits and limitations of recent advancement with respect to two performance vectors; detection accuracy and attack variety. Based on our findings, we present key open challenges which will represent exciting research opportunities for the research community.},
journal = {J. Intell. Inf. Syst.},
month = oct,
pages = {377405},
numpages = {29},
keywords = {Operational technology, Cyber security, Dataset, Industrial control systems, Machine learning, Critical infrastructure}
pages={377--405},
year={2023},
publisher={Springer}
}
@ARTICLE{Nankya2023-gp,
title = "Securing industrial Control Systems: Components, cyber threats,
and machine learning-driven defense strategies",
author = "Nankya, Mary and Chataut, Robin and Akl, Robert",
abstract = "Industrial Control Systems (ICS), which include Supervisory
Control and Data Acquisition (SCADA) systems, Distributed
Control Systems (DCS), and Programmable Logic Controllers (PLC),
play a crucial role in managing and regulating industrial
processes. However, ensuring the security of these systems is of
utmost importance due to the potentially severe consequences of
cyber attacks. This article presents an overview of ICS
security, covering its components, protocols, industrial
applications, and performance aspects. It also highlights the
typical threats and vulnerabilities faced by these systems.
Moreover, the article identifies key factors that influence the
design decisions concerning control, communication, reliability,
and redundancy properties of ICS, as these are critical in
determining the security needs of the system. The article
outlines existing security countermeasures, including network
segmentation, access control, patch management, and security
monitoring. Furthermore, the article explores the integration of
machine learning techniques to enhance the cybersecurity of ICS.
Machine learning offers several advantages, such as anomaly
detection, threat intelligence analysis, and predictive
maintenance. However, combining machine learning with other
security measures is essential to establish a comprehensive
defense strategy for ICS. The article also addresses the
challenges associated with existing measures and provides
recommendations for improving ICS security. This paper becomes a
valuable reference for researchers aiming to make meaningful
contributions within the constantly evolving ICS domain by
providing an in-depth examination of the present state,
challenges, and potential future advancements.",
journal = "Sensors (Basel)",
publisher = "MDPI AG",
volume = 23,
number = 21,
pages = "8840",
month = oct,
year = 2023,
keywords = "SCADA; anomaly detection; artificial intelligence; attacks;
cyber defense; cyber threats; industrial control systems;
security; vulnerabilities",
copyright = "https://creativecommons.org/licenses/by/4.0/",
language = "en"
@Article{Nankya2023-gp,
AUTHOR = {Nankya, Mary and Chataut, Robin and Akl, Robert},
TITLE = {Securing Industrial Control Systems: Components, Cyber Threats, and Machine Learning-Driven Defense Strategies},
JOURNAL = {Sensors},
VOLUME = {23},
YEAR = {2023},
NUMBER = {21},
ARTICLE-NUMBER = {8840},
URL = {https://www.mdpi.com/1424-8220/23/21/8840},
PubMedID = {37960539},
ISSN = {1424-8220},
ABSTRACT = {Industrial Control Systems (ICS), which include Supervisory Control and Data Acquisition (SCADA) systems, Distributed Control Systems (DCS), and Programmable Logic Controllers (PLC), play a crucial role in managing and regulating industrial processes. However, ensuring the security of these systems is of utmost importance due to the potentially severe consequences of cyber attacks. This article presents an overview of ICS security, covering its components, protocols, industrial applications, and performance aspects. It also highlights the typical threats and vulnerabilities faced by these systems. Moreover, the article identifies key factors that influence the design decisions concerning control, communication, reliability, and redundancy properties of ICS, as these are critical in determining the security needs of the system. The article outlines existing security countermeasures, including network segmentation, access control, patch management, and security monitoring. Furthermore, the article explores the integration of machine learning techniques to enhance the cybersecurity of ICS. Machine learning offers several advantages, such as anomaly detection, threat intelligence analysis, and predictive maintenance. However, combining machine learning with other security measures is essential to establish a comprehensive defense strategy for ICS. The article also addresses the challenges associated with existing measures and provides recommendations for improving ICS security. This paper becomes a valuable reference for researchers aiming to make meaningful contributions within the constantly evolving ICS domain by providing an in-depth examination of the present state, challenges, and potential future advancements.},
DOI = {10.3390/s23218840}
}
@misc{shin,
@@ -225,57 +211,53 @@ DOI = {10.3390/info16100910}
abstract = {Denoising diffusion probabilistic models are becoming the leading generative modeling paradigm for many important data modalities. Being the most prevalent in the computer vision community, diffusion models have recently gained some attention in other domains, including speech, NLP, and graph-like data. In this work, we investigate if the framework of diffusion models can be advantageous for general tabular problems, where data points are typically represented by vectors of heterogeneous features. The inherent heterogeneity of tabular data makes it quite challenging for accurate modeling since the individual features can be of a completely different nature, i.e., some of them can be continuous and some can be discrete. To address such data types, we introduce TabDDPM — a diffusion model that can be universally applied to any tabular dataset and handles any feature types. We extensively evaluate TabDDPM on a wide set of benchmarks and demonstrate its superiority over existing GAN/VAE alternatives, which is consistent with the advantage of diffusion models in other fields.}
}
@misc{rasul2021autoregressivedenoisingdiffusionmodels,
@InProceedings{rasul2021autoregressivedenoisingdiffusionmodels,
title = {Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting},
author={Kashif Rasul and Calvin Seward and Ingmar Schuster and Roland Vollgraf},
author = {Rasul, Kashif and Seward, Calvin and Schuster, Ingmar and Vollgraf, Roland},
booktitle = {Proceedings of the 38th International Conference on Machine Learning},
pages = {8857--8868},
year = {2021},
eprint={2101.12072},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2101.12072},
editor = {Meila, Marina and Zhang, Tong},
volume = {139},
series = {Proceedings of Machine Learning Research},
month = {18--24 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v139/rasul21a/rasul21a.pdf},
url = {https://proceedings.mlr.press/v139/rasul21a.html},
abstract = {In this work, we propose TimeGrad, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.}
}
@misc{jiang2023netdiffusionnetworkdataaugmentation,
@article{jiang2023netdiffusionnetworkdataaugmentation,
author = {Jiang, Xi and Liu, Shinan and Gember-Jacobson, Aaron and Bhagoji, Arjun Nitin and Schmitt, Paul and Bronzino, Francesco and Feamster, Nick},
title = {NetDiffusion: Network Data Augmentation Through Protocol-Constrained Traffic Generation},
author={Xi Jiang and Shinan Liu and Aaron Gember-Jacobson and Arjun Nitin Bhagoji and Paul Schmitt and Francesco Bronzino and Nick Feamster},
year={2023},
eprint={2310.08543},
archivePrefix={arXiv},
primaryClass={cs.NI},
url={https://arxiv.org/abs/2310.08543},
}
Reference for Related Work
@inproceedings{10.1145/1159913.1159928,
author = {Vishwanath, Kashi Venkatesh and Vahdat, Amin},
title = {Realistic and responsive network traffic generation},
year = {2006},
isbn = {1595933085},
year = {2024},
issue_date = {March 2024},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/1159913.1159928},
doi = {10.1145/1159913.1159928},
abstract = {This paper presents Swing, a closed-loop, network-responsive traffic generator that accurately captures the packet interactions of a range of applications using a simple structural model. Starting from observed traffic at a single point in the network, Swing automatically extracts distributions for user, application, and network behavior. It then generates live traffic corresponding to the underlying models in a network emulation environment running commodity network protocol stacks. We find that the generated traces are statistically similar to the original traces. Further, to the best of our knowledge, we are the first to reproduce burstiness in traffic across a range of timescales using a model applicable to a variety of network settings. An initial sensitivity analysis reveals the importance of capturing and recreating user, application, and network characteristics to accurately reproduce such burstiness. Finally, we explore Swing's ability to vary user characteristics, application properties, and wide-area network conditions to project traffic characteristics into alternate scenarios.},
booktitle = {Proceedings of the 2006 Conference on Applications, Technologies, Architectures, and Protocols for Computer Communications},
pages = {111122},
numpages = {12},
keywords = {burstiness, energy plot, generator, internet, modeling, structural model, traffic, wavelets},
location = {Pisa, Italy},
series = {SIGCOMM '06}
volume = {8},
number = {1},
url = {https://doi.org/10.1145/3639037},
doi = {10.1145/3639037},
abstract = {Datasets of labeled network traces are essential for a multitude of machine learning (ML) tasks in networking, yet their availability is hindered by privacy and maintenance concerns, such as data staleness. To overcome this limitation, synthetic network traces can often augment existing datasets. Unfortunately, current synthetic trace generation methods, which typically produce only aggregated flow statistics or a few selected packet attributes, do not always suffice, especially when model training relies on having features that are only available from packet traces. This shortfall manifests in both insufficient statistical resemblance to real traces and suboptimal performance on ML tasks when employed for data augmentation. In this paper, we apply diffusion models to generate high-resolution synthetic network traffic traces. We present NetDiffusion1, a tool that uses a finely-tuned, controlled variant of a Stable Diffusion model to generate synthetic network traffic that is high fidelity and conforms to protocol specifications. Our evaluation demonstrates that packet captures generated from NetDiffusion can achieve higher statistical similarity to real data and improved ML model performance than current state-of-the-art approaches (e.g., GAN-based approaches). Furthermore, our synthetic traces are compatible with common network analysis tools and support a myriad of network tasks, suggesting that NetDiffusion can serve a broader spectrum of network analysis and testing tasks, extending beyond ML-centric applications.},
journal = {Proc. ACM Meas. Anal. Comput. Syst.},
month = feb,
articleno = {11},
numpages = {32},
keywords = {diffusion model, network traffic, synthesis}
}
@article{Ring_2019,
title = {Flow-based network traffic generation using Generative Adversarial Networks},
volume={82},
ISSN={0167-4048},
url={http://dx.doi.org/10.1016/j.cose.2018.12.012},
DOI={10.1016/j.cose.2018.12.012},
journal = {Computers \& Security},
publisher={Elsevier BV},
author={Ring, Markus and Schlör, Daniel and Landes, Dieter and Hotho, Andreas},
volume = {82},
pages = {156-172},
year = {2019},
month=may, pages={156172}
issn = {0167-4048},
doi = {https://doi.org/10.1016/j.cose.2018.12.012},
url = {https://www.sciencedirect.com/science/article/pii/S0167404818308393},
author = {Markus Ring and Daniel Schlör and Dieter Landes and Andreas Hotho},
keywords = {GANs, TTUR WGAN-GP, NetFlow, Generation, IDS},
abstract = {Flow-based data sets are necessary for evaluating network-based intrusion detection systems (NIDS). In this work, we propose a novel methodology for generating realistic flow-based network traffic. Our approach is based on Generative Adversarial Networks (GANs) which achieve good results for image generation. A major challenge lies in the fact that GANs can only process continuous attributes. However, flow-based data inevitably contain categorical attributes such as IP addresses or port numbers. Therefore, we propose three different preprocessing approaches for flow-based data in order to transform them into continuous values. Further, we present a new method for evaluating the generated flow-based network traffic which uses domain knowledge to define quality tests. We use the three approaches for generating flow-based network traffic based on the CIDDS-001 data set. Experiments indicate that two of the three approaches are able to generate high quality data.}
}
@inproceedings{10.1145/3544216.3544251,
@@ -296,16 +278,22 @@ location = {Amsterdam, Netherlands},
series = {SIGCOMM '22}
}
@inproceedings{Lin_2020, series={IMC 20},
title={Using GANs for Sharing Networked Time Series Data: Challenges, Initial Promise, and Open Questions},
url={http://dx.doi.org/10.1145/3419394.3423643},
DOI={10.1145/3419394.3423643},
booktitle={Proceedings of the ACM Internet Measurement Conference},
publisher={ACM},
@inproceedings{Lin_2020,
author = {Lin, Zinan and Jain, Alankar and Wang, Chen and Fanti, Giulia and Sekar, Vyas},
title = {Using GANs for Sharing Networked Time Series Data: Challenges, Initial Promise, and Open Questions},
year = {2020},
month=oct, pages={464483},
collection={IMC 20}
isbn = {9781450381383},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3419394.3423643},
doi = {10.1145/3419394.3423643},
abstract = {Limited data access is a longstanding barrier to data-driven research and development in the networked systems community. In this work, we explore if and how generative adversarial networks (GANs) can be used to incentivize data sharing by enabling a generic framework for sharing synthetic datasets with minimal expert knowledge. As a specific target, our focus in this paper is on time series datasets with metadata (e.g., packet loss rate measurements with corresponding ISPs). We identify key challenges of existing GAN approaches for such workloads with respect to fidelity (e.g., long-term dependencies, complex multidimensional relationships, mode collapse) and privacy (i.e., existing guarantees are poorly understood and can sacrifice fidelity). To improve fidelity, we design a custom workflow called DoppelGANger (DG) and demonstrate that across diverse real-world datasets (e.g., bandwidth measurements, cluster requests, web sessions) and use cases (e.g., structural characterization, predictive modeling, algorithm comparison), DG achieves up to 43\% better fidelity than baseline models. Although we do not resolve the privacy problem in this work, we identify fundamental challenges with both classical notions of privacy and recent advances to improve the privacy properties of GANs, and suggest a potential roadmap for addressing these challenges. By shedding light on the promise and challenges, we hope our work can rekindle the conversation on workflows for data sharing.},
booktitle = {Proceedings of the ACM Internet Measurement Conference},
pages = {464483},
numpages = {20},
keywords = {generative adversarial networks, privacy, synthetic data generation, time series},
location = {Virtual Event, USA},
series = {IMC '20}
}
@INPROCEEDINGS{7469060,
@@ -338,35 +326,47 @@ location = {Pittsburgh, Pennsylvania},
series = {CySWATER '17}
}
@misc{tashiro2021csdiconditionalscorebaseddiffusion,
title={CSDI Conditional Score-based Diffusion Models for Probabilistic Time Series Imputation},
author={Yusuke Tashiro and Jiaming Song and Yang Song and Stefano Ermon},
year={2021},
eprint={2107.03502},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={httpsarxiv.orgabs2107.03502},
@inproceedings{tashiro2021csdiconditionalscorebaseddiffusion,
author = {Tashiro, Yusuke and Song, Jiaming and Song, Yang and Ermon, Stefano},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {24804--24816},
publisher = {Curran Associates, Inc.},
title = {CSDI: Conditional Score-based Diffusion Models for Probabilistic Time Series Imputation},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/cfe8504bda37b575c70ee1a8276f3486-Paper.pdf},
volume = {34},
year = {2021}
}
@misc{wen2024diffstgprobabilisticspatiotemporalgraph,
@inproceedings{wen2024diffstgprobabilisticspatiotemporalgraph,
author = {Wen, Haomin and Lin, Youfang and Xia, Yutong and Wan, Huaiyu and Wen, Qingsong and Zimmermann, Roger and Liang, Yuxuan},
title = {DiffSTG: Probabilistic Spatio-Temporal Graph Forecasting with Denoising Diffusion Models},
author={Haomin Wen and Youfang Lin and Yutong Xia and Huaiyu Wan and Qingsong Wen and Roger Zimmermann and Yuxuan Liang},
year={2024},
eprint={2301.13629},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2301.13629},
year = {2023},
isbn = {9798400701689},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3589132.3625614},
doi = {10.1145/3589132.3625614},
abstract = {Spatio-temporal graph neural networks (STGNN) have emerged as the dominant model for spatio-temporal graph (STG) forecasting. Despite their success, they fail to model intrinsic uncertainties within STG data, which cripples their practicality in downstream tasks for decision-making. To this end, this paper focuses on probabilistic STG forecasting, which is challenging due to the difficulty in modeling uncertainties and complex ST dependencies. In this study, we present the first attempt to generalize the popular de-noising diffusion probabilistic models to STGs, leading to a novel non-autoregressive framework called DiffSTG, along with the first denoising network UGnet for STG in the framework. Our approach combines the spatio-temporal learning capabilities of STGNNs with the uncertainty measurements of diffusion models. Extensive experiments validate that DiffSTG reduces the Continuous Ranked Probability Score (CRPS) by 4\%-14\%, and Root Mean Squared Error (RMSE) by 2\%-7\% over existing methods on three real-world datasets.},
booktitle = {Proceedings of the 31st ACM International Conference on Advances in Geographic Information Systems},
articleno = {60},
numpages = {12},
keywords = {spatio-temporal graph forecasting, probabilistic forecasting, diffusion model},
location = {Hamburg, Germany},
series = {SIGSPATIAL '23}
}
@misc{liu2023pristiconditionaldiffusionframework,
@INPROCEEDINGS{liu2023pristiconditionaldiffusionframework,
author={Liu, Mingzhe and Huang, Han and Feng, Hao and Sun, Leilei and Du, Bowen and Fu, Yanjie},
booktitle={2023 IEEE 39th International Conference on Data Engineering (ICDE)},
title={PriSTI: A Conditional Diffusion Framework for Spatiotemporal Imputation},
author={Mingzhe Liu and Han Huang and Hao Feng and Leilei Sun and Bowen Du and Yanjie Fu},
year={2023},
eprint={2302.09746},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2302.09746},
}
volume={},
number={},
pages={1927-1939},
keywords={Correlation;Scalability;Transforms;Predictive models;Feature extraction;Propagation losses;Probabilistic logic;Spatiotemporal Imputation;Diffusion Model;Spatiotemporal Dependency Learning},
doi={10.1109/ICDE55515.2023.00150}}
@misc{kong2021diffwaveversatilediffusionmodel,
title={DiffWave: A Versatile Diffusion Model for Audio Synthesis},
@@ -390,94 +390,125 @@ series = {CySWATER '17}
doi={10.1109/TMC.2025.3591183}
}
@misc{hoogeboom2021argmaxflowsmultinomialdiffusion,
@inproceedings{hoogeboom2021argmaxflowsmultinomialdiffusion,
author = {Hoogeboom, Emiel and Nielsen, Didrik and Jaini, Priyank and Forr\'{e}, Patrick and Welling, Max},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {12454--12465},
publisher = {Curran Associates, Inc.},
title = {Argmax Flows and Multinomial Diffusion: Learning Categorical Distributions},
author={Emiel Hoogeboom and Didrik Nielsen and Priyank Jaini and Patrick Forré and Max Welling},
year={2021},
eprint={2102.05379},
archivePrefix={arXiv},
primaryClass={stat.ML},
url={https://arxiv.org/abs/2102.05379},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/67d96d458abdef21792e6d8e590244e7-Paper.pdf},
volume = {34},
year = {2021}
}
@misc{li2022diffusionlmimprovescontrollabletext,
@inproceedings{li2022diffusionlmimprovescontrollabletext,
author = {Li, Xiang and Thickstun, John and Gulrajani, Ishaan and Liang, Percy S and Hashimoto, Tatsunori B},
booktitle = {Advances in Neural Information Processing Systems},
editor = {S. Koyejo and S. Mohamed and A. Agarwal and D. Belgrave and K. Cho and A. Oh},
pages = {4328--4343},
publisher = {Curran Associates, Inc.},
title = {Diffusion-LM Improves Controllable Text Generation},
author={Xiang Lisa Li and John Thickstun and Ishaan Gulrajani and Percy Liang and Tatsunori B. Hashimoto},
year={2022},
eprint={2205.14217},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={httpsarxiv.orgabs2205.14217},
url = {https://proceedings.neurips.cc/paper_files/paper/2022/file/1be5bc25d50895ee656b8c2d9eb89d6a-Paper-Conference.pdf},
volume = {35},
year = {2022}
}
@misc{meng2025aflnetyearslatercoverageguided,
@ARTICLE{meng2025aflnetyearslatercoverageguided,
author={Meng, Ruijie and Pham, Van-Thuan and Böhme, Marcel and Roychoudhury, Abhik},
journal={IEEE Transactions on Software Engineering},
title={AFLNet Five Years Later: On Coverage-Guided Protocol Fuzzing},
author={Ruijie Meng and Van-Thuan Pham and Marcel Böhme and Abhik Roychoudhury},
year={2025},
eprint={2412.20324},
archivePrefix={arXiv},
primaryClass={cs.SE},
url={https://arxiv.org/abs/2412.20324},
}
volume={51},
number={4},
pages={960-974},
keywords={Protocols;Servers;Fuzzing;Codes;Security;Data models;Source coding;Computer bugs;Software systems;Reliability;Greybox fuzzing;network protocol testing;stateful fuzzing},
doi={10.1109/TSE.2025.3535925}}
@misc{godefroid2017learnfuzzmachinelearninginput,
title={Learn\&Fuzz: Machine Learning for Input Fuzzing},
author={Patrice Godefroid and Hila Peleg and Rishabh Singh},
@INPROCEEDINGS{godefroid2017learnfuzzmachinelearninginput,
author={Godefroid, Patrice and Peleg, Hila and Singh, Rishabh},
booktitle={2017 32nd IEEE/ACM International Conference on Automated Software Engineering (ASE)},
title={Learn\&Fuzz: Machine learning for input fuzzing},
year={2017},
eprint={1701.07232},
archivePrefix={arXiv},
primaryClass={cs.AI},
url={https://arxiv.org/abs/1701.07232},
}
volume={},
number={},
pages={50-59},
keywords={Portable document format;Grammar;Training;Probability distribution;Recurrent neural networks;Fuzzing;Deep Learning;Grammar-based Fuzzing;Grammar Learning},
doi={10.1109/ASE.2017.8115618}}
@misc{she2019neuzzefficientfuzzingneural,
@INPROCEEDINGS{she2019neuzzefficientfuzzingneural,
author={She, Dongdong and Pei, Kexin and Epstein, Dave and Yang, Junfeng and Ray, Baishakhi and Jana, Suman},
booktitle={2019 IEEE Symposium on Security and Privacy (SP)},
title={NEUZZ: Efficient Fuzzing with Neural Program Smoothing},
author={Dongdong She and Kexin Pei and Dave Epstein and Junfeng Yang and Baishakhi Ray and Suman Jana},
year={2019},
eprint={1807.05620},
archivePrefix={arXiv},
primaryClass={cs.CR},
url={https://arxiv.org/abs/1807.05620},
volume={},
number={},
pages={803-817},
keywords={Optimization;Fuzzing;Computer bugs;Artificial neural networks;Smoothing methods;Evolutionary computation;fuzzing;-neural-program-smoothing;-gradient-guided-mutation},
doi={10.1109/SP.2019.00052}}
@inproceedings{dai2019transformerxlattentivelanguagemodels,
title = "Transformer-{XL}: Attentive Language Models beyond a Fixed-Length Context",
author = "Dai, Zihang and
Yang, Zhilin and
Yang, Yiming and
Carbonell, Jaime and
Le, Quoc and
Salakhutdinov, Ruslan",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1285/",
doi = "10.18653/v1/P19-1285",
pages = "2978--2988",
abstract = "Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80{\%} longer than RNNs and 450{\%} longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch."
}
@misc{dai2019transformerxlattentivelanguagemodels,
title={Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context},
author={Zihang Dai and Zhilin Yang and Yiming Yang and Jaime Carbonell and Quoc V. Le and Ruslan Salakhutdinov},
year={2019},
eprint={1901.02860},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/1901.02860},
}
@misc{zhou2021informerefficienttransformerlong,
@article{zhou2021informerefficienttransformerlong,
title={Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting},
author={Haoyi Zhou and Shanghang Zhang and Jieqi Peng and Shuai Zhang and Jianxin Li and Hui Xiong and Wancai Zhang},
volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17325},
DOI={10.1609/aaai.v35i12.17325},
abstractNote={Many real-world applications require the prediction of long sequence time-series, such as electricity consumption planning. Long sequence time-series forecasting (LSTF) demands a high prediction capacity of the model, which is the ability to capture precise long-range dependency coupling between output and input efficiently. Recent studies have shown the potential of Transformer to increase the prediction capacity. However, there are several severe issues with Transformer that prevent it from being directly applicable to LSTF, including quadratic time complexity, high memory usage, and inherent limitation of the encoder-decoder architecture. To address these issues, we design an efficient transformer-based model for LSTF, named Informer, with three distinctive characteristics: (i) a ProbSparse self-attention mechanism, which achieves O(L log L) in time complexity and memory usage, and has comparable performance on sequences dependency alignment. (ii) the self-attention distilling highlights dominating attention by halving cascading layer input, and efficiently handles extreme long input sequences. (iii) the generative style decoder, while conceptually simple, predicts the long time-series sequences at one forward operation rather than a step-by-step way, which drastically improves the inference speed of long-sequence predictions. Extensive experiments on four large-scale datasets demonstrate that Informer significantly outperforms existing methods and provides a new solution to the LSTF problem.},
number={12},
journal={Proceedings of the AAAI Conference on Artificial Intelligence},
author={Zhou, Haoyi and Zhang, Shanghang and Peng, Jieqi and Zhang, Shuai and Li, Jianxin and Xiong, Hui and Zhang, Wancai},
year={2021},
eprint={2012.07436},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2012.07436},
month={May},
pages={11106-11115}
}
@misc{wu2022autoformerdecompositiontransformersautocorrelation,
@inproceedings{wu2022autoformerdecompositiontransformersautocorrelation,
author = {Wu, Haixu and Xu, Jiehui and Wang, Jianmin and Long, Mingsheng},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {22419--22430},
publisher = {Curran Associates, Inc.},
title = {Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting},
author={Haixu Wu and Jiehui Xu and Jianmin Wang and Mingsheng Long},
year={2022},
eprint={2106.13008},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2106.13008},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bdcd7c19a8ac0c2b-Paper.pdf},
volume = {34},
year = {2021}
}
@misc{zhou2022fedformerfrequencyenhanceddecomposed,
title={FEDformer: Frequency Enhanced Decomposed Transformer for Long-term Series Forecasting},
author={Tian Zhou and Ziqing Ma and Qingsong Wen and Xue Wang and Liang Sun and Rong Jin},
@InProceedings{zhou2022fedformerfrequencyenhanceddecomposed,
title = {{FED}former: Frequency Enhanced Decomposed Transformer for Long-term Series Forecasting},
author = {Zhou, Tian and Ma, Ziqing and Wen, Qingsong and Wang, Xue and Sun, Liang and Jin, Rong},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
pages = {27268--27286},
year = {2022},
eprint={2201.12740},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2201.12740},
editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},
volume = {162},
series = {Proceedings of Machine Learning Research},
month = {17--23 Jul},
publisher = {PMLR},
pdf = {https://proceedings.mlr.press/v162/zhou22g/zhou22g.pdf},
url = {https://proceedings.mlr.press/v162/zhou22g.html},
abstract = {Long-term time series forecasting is challenging since prediction accuracy tends to decrease dramatically with the increasing horizon. Although Transformer-based methods have significantly improved state-of-the-art results for long-term forecasting, they are not only computationally expensive but more importantly, are unable to capture the global view of time series (e.g. overall trend). To address these problems, we propose to combine Transformer with the seasonal-trend decomposition method, in which the decomposition method captures the global profile of time series while Transformers capture more detailed structures. To further enhance the performance of Transformer for long-term prediction, we exploit the fact that most time series tend to have a sparse representation in a well-known basis such as Fourier transform, and develop a frequency enhanced Transformer. Besides being more effective, the proposed method, termed as Frequency Enhanced Decomposed Transformer (FEDformer), is more efficient than standard Transformer with a linear complexity to the sequence length. Our empirical studies with six benchmark datasets show that compared with state-of-the-art methods, Fedformer can reduce prediction error by 14.8% and 22.6% for multivariate and univariate time series, respectively. Code is publicly available at https://github.com/MAZiqing/FEDformer.}
}
@article{2023,
@@ -494,8 +525,6 @@ series = {CySWATER '17}
pages={197202}
}
Reference for Benchmark
@article{stenger2024survey,
title={Evaluation is key: a survey on evaluation measures for synthetic time series},
author={Stenger, Michael and Leppich, Robert and Foster, Ian T and Kounev, Samuel and Bauer, Andre},
@@ -507,61 +536,84 @@ Reference for Benchmark
publisher={Springer}
}
@article{lin1991divergence,
title={Divergence measures based on the Shannon entropy},
author={Lin, Jianhua},
@ARTICLE{lin1991divergence,
author={Lin, J.},
journal={IEEE Transactions on Information Theory},
title={Divergence measures based on the Shannon entropy},
year={1991},
volume={37},
number={1},
pages={145--151},
year={1991}
}
pages={145-151},
keywords={Entropy;Probability distribution;Upper bound;Pattern analysis;Signal analysis;Signal processing;Pattern recognition;Taxonomy;Genetics;Computer science},
doi={10.1109/18.61115}}
@inproceedings{yoon2019timegan,
title={Time-series generative adversarial networks},
author = {Yoon, Jinsung and Jarrett, Daniel and van der Schaar, Mihaela},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {Time-series Generative Adversarial Networks},
url = {https://proceedings.neurips.cc/paper_files/paper/2019/file/c9efe5f26cd17ba6216bbe2a7d26d490-Paper.pdf},
volume = {32},
year = {2019}
}
@article{ni2021sigwasserstein,
title={Sig-Wasserstein GANs for time series generation},
author={Ni, Hao and Szpruch, Lukasz and Wiese, Magnus and Liao, Shujian and Xiao, Baoren},
journal={Proceedings of the ACM on Measurement and Analysis of Computing Systems},
volume={5},
number={3},
pages={1--25},
year={2021}
@inproceedings{10.1145/3490354.3494393,
author = {Ni, Hao and Szpruch, Lukasz and Sabate-Vidales, Marc and Xiao, Baoren and Wiese, Magnus and Liao, Shujian},
title = {Sig-wasserstein GANs for time series generation},
year = {2022},
isbn = {9781450391481},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3490354.3494393},
doi = {10.1145/3490354.3494393},
abstract = {Synthetic data is an emerging technology that can significantly accelerate the development and deployment of AI machine learning pipelines. In this work, we develop high-fidelity time-series generators, the SigWGAN, by combining continuous-time stochastic models with the newly proposed signature W1 metric. The former are the Logsig-RNN models based on the stochastic differential equations, whereas the latter originates from the universal and principled mathematical features to characterize the measure induced by time series. SigWGAN allows turning computationally challenging GAN min-max problem into supervised learning while generating high fidelity samples. We validate the proposed model on both synthetic data generated by popular quantitative risk models and empirical financial data. Codes are available at https://github.com/SigCGANs/Sig-Wasserstein-GANs.git},
booktitle = {Proceedings of the Second ACM International Conference on AI in Finance},
articleno = {28},
numpages = {8},
keywords = {signatures, neural networks, generative modelling},
location = {Virtual Event},
series = {ICAIF '21}
}
@inproceedings{coletta2023constrained,
title={On the constrained time-series generation problem},
author={Coletta, Alessandro and Rossi, Roberto and others},
author = {Coletta, Andrea and Gopalakrishnan, Sriram and Borrajo, Daniel and Vyetrenko, Svitlana},
booktitle = {Advances in Neural Information Processing Systems},
editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine},
pages = {61048--61059},
publisher = {Curran Associates, Inc.},
title = {On the Constrained Time-Series Generation Problem},
url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/bfb6a69c0d9e2bc596e1cd31f16fcdde-Paper-Conference.pdf},
volume = {36},
year = {2023}
}
@article{yang2001interlock,
title = {Automatic verification of safety interlock systems for industrial processes},
author={Yang, Sheng-Hong and Hsieh, Min-Chi},
journal = {Journal of Loss Prevention in the Process Industries},
volume = {14},
number={6},
pages={473--483},
number = {5},
pages = {379-386},
year = {2001},
publisher={Elsevier}
issn = {0950-4230},
doi = {https://doi.org/10.1016/S0950-4230(01)00014-6},
url = {https://www.sciencedirect.com/science/article/pii/S0950423001000146},
author = {S.H. Yang and L.S. Tan and C.H. He},
keywords = {Safety interlock system, Symbolic model checking, Safety verification, Industrial processes},
abstract = {The safety interlock system (SIS) is one of the most important protective measurements in industrial processes that provide automatic actions to correct an abnormal plant event. This paper considers the use of formal techniques based on symbolic model checking and computation tree logic (CTL) in the specification to automatically verify the SIS for industrial processes. It addresses the problem of modelling industrial processes and presenting the SIS in CTL. It shows how symbolic model checking can be used efficiently in the verification of a SIS. A transferring system for a penicillin process is used as a case study.}
}
@misc{austin2023structureddenoisingdiffusionmodels,
@inproceedings{austin2023structureddenoisingdiffusionmodels,
author = {Austin, Jacob and Johnson, Daniel D. and Ho, Jonathan and Tarlow, Daniel and van den Berg, Rianne},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {17981--17993},
publisher = {Curran Associates, Inc.},
title = {Structured Denoising Diffusion Models in Discrete State-Spaces},
author={Jacob Austin and Daniel D. Johnson and Jonathan Ho and Daniel Tarlow and Rianne van den Berg},
year={2023},
eprint={2107.03006},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2107.03006},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf},
volume = {34},
year = {2021}
}
@article{10.1145/1151659.1159928,
@@ -605,6 +657,7 @@ keywords = {burstiness, energy plot, generator, internet, modeling, structural m
primaryClass={cs.LG},
url={https://arxiv.org/abs/2011.13456},
}
@inproceedings{nie2023patchtst,
title={A Time Series is Worth 64 Words: Long-term Forecasting with Transformers},
author={Nie, Yuqi and Nguyen, Nam H. and Sinthong, Phanwadee and Kalagnanam, Jayant},

View File

@@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 17 APR 2026 17:18
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 20 APR 2026 17:58
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.