newest pdf

This commit is contained in:
2026-04-21 00:13:04 +08:00
parent 6f908a493b
commit 012f34ac45
7 changed files with 112 additions and 133 deletions

View File

@@ -25,7 +25,7 @@
\citation{11087622} \citation{11087622}
\@writefile{toc}{\contentsline {section}{\numberline {2}Related Work}{3}{}\protected@file@percent } \@writefile{toc}{\contentsline {section}{\numberline {2}Related Work}{3}{}\protected@file@percent }
\newlabel{sec:related}{{2}{3}{}{section.2}{}} \newlabel{sec:related}{{2}{3}{}{section.2}{}}
\citation{austin2023structureddenoisingdiffusionmodels} \citation{austin2021structured}
\citation{Lin_2020} \citation{Lin_2020}
\citation{hoogeboom2021argmaxflowsmultinomialdiffusion} \citation{hoogeboom2021argmaxflowsmultinomialdiffusion}
\citation{li2022diffusionlmimprovescontrollabletext} \citation{li2022diffusionlmimprovescontrollabletext}
@@ -113,51 +113,50 @@
\bibdata{references} \bibdata{references}
\bibcite{10.1145/3055366.3055375}{1} \bibcite{10.1145/3055366.3055375}{1}
\bibcite{info16100910}{2} \bibcite{info16100910}{2}
\bibcite{austin2023structureddenoisingdiffusionmodels}{3} \bibcite{austin2021structured}{3}
\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusion and Future Work}{16}{}\protected@file@percent } \@writefile{toc}{\contentsline {section}{\numberline {5}Conclusion and Future Work}{16}{}\protected@file@percent }
\newlabel{sec:conclusion}{{5}{16}{}{section.5}{}} \newlabel{sec:conclusion}{{5}{16}{}{section.5}{}}
\bibcite{austin2021structured}{4} \bibcite{coletta2023constrained}{4}
\bibcite{coletta2023constrained}{5} \bibcite{dai2019transformerxlattentivelanguagemodels}{5}
\bibcite{dai2019transformerxlattentivelanguagemodels}{6} \bibcite{godefroid2017learnfuzzmachinelearninginput}{6}
\bibcite{godefroid2017learnfuzzmachinelearninginput}{7} \bibcite{hang2023efficient}{7}
\bibcite{hang2023efficient}{8} \bibcite{NEURIPS2020_4c5bcfec}{8}
\bibcite{NEURIPS2020_4c5bcfec}{9} \bibcite{ho2020denoising}{9}
\bibcite{ho2020denoising}{10} \bibcite{hoogeboom2021argmaxflowsmultinomialdiffusion}{10}
\bibcite{hoogeboom2021argmaxflowsmultinomialdiffusion}{11} \bibcite{jiang2023netdiffusionnetworkdataaugmentation}{11}
\bibcite{jiang2023netdiffusionnetworkdataaugmentation}{12} \bibcite{10.1007/s10844-022-00753-1}{12}
\bibcite{10.1007/s10844-022-00753-1}{13} \bibcite{kollovieh2023tsdiff}{13}
\bibcite{kollovieh2023tsdiff}{14} \bibcite{kong2021diffwaveversatilediffusionmodel}{14}
\bibcite{kong2021diffwaveversatilediffusionmodel}{15} \bibcite{pmlr-v202-kotelnikov23a}{15}
\bibcite{pmlr-v202-kotelnikov23a}{16} \bibcite{li2022diffusionlmimprovescontrollabletext}{16}
\bibcite{li2022diffusionlmimprovescontrollabletext}{17} \bibcite{lin1991divergence}{17}
\bibcite{lin1991divergence}{18} \bibcite{Lin_2020}{18}
\bibcite{Lin_2020}{19} \bibcite{liu2023pristiconditionaldiffusionframework}{19}
\bibcite{liu2023pristiconditionaldiffusionframework}{20} \bibcite{11087622}{20}
\bibcite{11087622}{21} \bibcite{7469060}{21}
\bibcite{7469060}{22} \bibcite{meng2025aflnetyearslatercoverageguided}{22}
\bibcite{meng2025aflnetyearslatercoverageguided}{23} \bibcite{Nankya2023-gp}{23}
\bibcite{Nankya2023-gp}{24} \bibcite{nist2023sp80082}{24}
\bibcite{nist2023sp80082}{25} \bibcite{nie2023patchtst}{25}
\bibcite{nie2023patchtst}{26} \bibcite{rasul2021autoregressivedenoisingdiffusionmodels}{26}
\bibcite{rasul2021autoregressivedenoisingdiffusionmodels}{27} \bibcite{Ring_2019}{27}
\bibcite{Ring_2019}{28} \bibcite{sha2026ddpm}{28}
\bibcite{sha2026ddpm}{29} \bibcite{she2019neuzzefficientfuzzingneural}{29}
\bibcite{she2019neuzzefficientfuzzingneural}{30} \bibcite{shi2024simplified}{30}
\bibcite{shi2024simplified}{31} \bibcite{shi2025tabdiff}{31}
\bibcite{shi2025tabdiff}{32} \bibcite{shin}{32}
\bibcite{shin}{33} \bibcite{sikder2023transfusion}{33}
\bibcite{sikder2023transfusion}{34} \bibcite{song2021score}{34}
\bibcite{song2021score}{35} \bibcite{stenger2024survey}{35}
\bibcite{stenger2024survey}{36} \bibcite{tashiro2021csdiconditionalscorebaseddiffusion}{36}
\bibcite{tashiro2021csdiconditionalscorebaseddiffusion}{37} \bibcite{vaswani2017attention}{37}
\bibcite{vaswani2017attention}{38} \bibcite{10.1145/1151659.1159928}{38}
\bibcite{10.1145/1151659.1159928}{39} \bibcite{wen2024diffstgprobabilisticspatiotemporalgraph}{39}
\bibcite{wen2024diffstgprobabilisticspatiotemporalgraph}{40} \bibcite{wu2022autoformerdecompositiontransformersautocorrelation}{40}
\bibcite{wu2022autoformerdecompositiontransformersautocorrelation}{41} \bibcite{yang2001interlock}{41}
\bibcite{yang2001interlock}{42} \bibcite{10.1145/3544216.3544251}{42}
\bibcite{10.1145/3544216.3544251}{43} \bibcite{yoon2019timegan}{43}
\bibcite{yoon2019timegan}{44} \bibcite{yuan2025ctu}{44}
\bibcite{yuan2025ctu}{45} \bibcite{zhou2021informerefficienttransformerlong}{45}
\bibcite{zhou2021informerefficienttransformerlong}{46} \bibcite{zhou2022fedformerfrequencyenhanceddecomposed}{46}
\bibcite{zhou2022fedformerfrequencyenhanceddecomposed}{47}
\gdef \@abspage@last{21} \gdef \@abspage@last{21}

View File

@@ -17,14 +17,6 @@ Ali, J., Ali, S., Al~Balushi, T., Nadir, Z.: Intrusion detection in industrial
Information \textbf{16}(10) (2025). \doi{10.3390/info16100910}, Information \textbf{16}(10) (2025). \doi{10.3390/info16100910},
\url{https://www.mdpi.com/2078-2489/16/10/910} \url{https://www.mdpi.com/2078-2489/16/10/910}
\bibitem{austin2023structureddenoisingdiffusionmodels}
Austin, J., Johnson, D.D., Ho, J., Tarlow, D., van~den Berg, R.: Structured
denoising diffusion models in discrete state-spaces. In: Ranzato, M.,
Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in
Neural Information Processing Systems. vol.~34, pp. 17981--17993. Curran
Associates, Inc. (2021),
\url{https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf}
\bibitem{austin2021structured} \bibitem{austin2021structured}
Austin, J., Johnson, D.D., Ho, J., Tarlow, D., van~den Berg, R.: Structured Austin, J., Johnson, D.D., Ho, J., Tarlow, D., van~den Berg, R.: Structured
denoising diffusion models in discrete state-spaces. In: Ranzato, M., denoising diffusion models in discrete state-spaces. In: Ranzato, M.,

View File

@@ -5,44 +5,44 @@ Reallocating 'name_of_file' (item size: 1) to 9 items.
The style file: splncs04.bst The style file: splncs04.bst
Reallocating 'name_of_file' (item size: 1) to 11 items. Reallocating 'name_of_file' (item size: 1) to 11 items.
Database file #1: references.bib Database file #1: references.bib
You've used 47 entries, You've used 46 entries,
2850 wiz_defined-function locations, 2850 wiz_defined-function locations,
922 strings with 18519 characters, 921 strings with 18475 characters,
and the built_in function-call counts, 36282 in all, are: and the built_in function-call counts, 35282 in all, are:
= -- 2938 = -- 2857
> -- 1441 > -- 1398
< -- 64 < -- 62
+ -- 562 + -- 546
- -- 514 - -- 499
* -- 2543 * -- 2472
:= -- 4540 := -- 4418
add.period$ -- 115 add.period$ -- 112
call.type$ -- 47 call.type$ -- 46
change.case$ -- 410 change.case$ -- 400
chr.to.int$ -- 0 chr.to.int$ -- 0
cite$ -- 47 cite$ -- 46
duplicate$ -- 3107 duplicate$ -- 3025
empty$ -- 2964 empty$ -- 2886
format.name$ -- 576 format.name$ -- 559
if$ -- 7925 if$ -- 7706
int.to.chr$ -- 0 int.to.chr$ -- 0
int.to.str$ -- 47 int.to.str$ -- 46
missing$ -- 683 missing$ -- 663
newline$ -- 147 newline$ -- 144
num.names$ -- 128 num.names$ -- 124
pop$ -- 1219 pop$ -- 1184
preamble$ -- 1 preamble$ -- 1
purify$ -- 306 purify$ -- 299
quote$ -- 0 quote$ -- 0
skip$ -- 951 skip$ -- 925
stack$ -- 0 stack$ -- 0
substring$ -- 1947 substring$ -- 1886
swap$ -- 1934 swap$ -- 1879
text.length$ -- 64 text.length$ -- 62
text.prefix$ -- 0 text.prefix$ -- 0
top$ -- 0 top$ -- 0
type$ -- 188 type$ -- 184
warning$ -- 0 warning$ -- 0
while$ -- 238 while$ -- 231
width$ -- 49 width$ -- 48
write$ -- 587 write$ -- 574

View File

@@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 21 APR 2026 00:08 This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 21 APR 2026 00:12
entering extended mode entering extended mode
restricted \write18 enabled. restricted \write18 enabled.
%&-line parsing enabled. %&-line parsing enabled.
@@ -574,145 +574,145 @@ Overfull \hbox (2.54008pt too wide) in paragraph at lines 323--324
[] []
(main.bbl [16] (main.bbl [16]
Underfull \hbox (badness 1609) in paragraph at lines 37--43 Underfull \hbox (badness 1609) in paragraph at lines 29--35
[]\T1/lmr/m/n/9 Coletta, A., Gopalakr-ish-nan, S., Bor-rajo, D., Vyetrenko, S.: []\T1/lmr/m/n/9 Coletta, A., Gopalakr-ish-nan, S., Bor-rajo, D., Vyetrenko, S.:
On the con- On the con-
[] []
[17]
Underfull \hbox (badness 2653) in paragraph at lines 99--106 Underfull \hbox (badness 2653) in paragraph at lines 91--98
[]\T1/lmr/m/n/9 Kollovieh, M., Ansari, A.F., Bohlke-Schneider, M., Zschieg-ner, []\T1/lmr/m/n/9 Kollovieh, M., Ansari, A.F., Bohlke-Schneider, M., Zschieg-ner,
J., Wang, J., Wang,
[] []
Underfull \hbox (badness 2662) in paragraph at lines 99--106 Underfull \hbox (badness 2662) in paragraph at lines 91--98
\T1/lmr/m/n/9 H., Wang, Y.B.: Pre-dict, re-fine, syn-the-size: Self-guiding dif \T1/lmr/m/n/9 H., Wang, Y.B.: Pre-dict, re-fine, syn-the-size: Self-guiding dif
-fu-sion mod-els -fu-sion mod-els
[] []
[17]
Underfull \hbox (badness 2626) in paragraph at lines 121--127 Underfull \hbox (badness 2626) in paragraph at lines 113--119
\T1/lmr/m/n/9 wal, A., Bel-grave, D., Cho, K., Oh, A. (eds.) Ad-vances in Neu-r \T1/lmr/m/n/9 wal, A., Bel-grave, D., Cho, K., Oh, A. (eds.) Ad-vances in Neu-r
al In- al In-
[] []
Underfull \hbox (badness 1946) in paragraph at lines 121--127 Underfull \hbox (badness 1946) in paragraph at lines 113--119
\T1/lmr/m/n/9 for-ma-tion Pro-cess-ing Sys-tems. vol. 35, pp. 4328--4343. Cur-r \T1/lmr/m/n/9 for-ma-tion Pro-cess-ing Sys-tems. vol. 35, pp. 4328--4343. Cur-r
an As-so-ciates, an As-so-ciates,
[] []
Underfull \hbox (badness 1603) in paragraph at lines 133--138 Underfull \hbox (badness 1603) in paragraph at lines 125--130
[]\T1/lmr/m/n/9 Lin, Z., Jain, A., Wang, C., Fanti, G., Sekar, V.: Us-ing gans []\T1/lmr/m/n/9 Lin, Z., Jain, A., Wang, C., Fanti, G., Sekar, V.: Us-ing gans
for shar- for shar-
[] []
Underfull \hbox (badness 2310) in paragraph at lines 133--138 Underfull \hbox (badness 2310) in paragraph at lines 125--130
\T1/lmr/m/n/9 tions. In: Pro-ceed-ings of the ACM In-ter-net Mea-sure-ment Con- \T1/lmr/m/n/9 tions. In: Pro-ceed-ings of the ACM In-ter-net Mea-sure-ment Con-
fer-ence. p. fer-ence. p.
[] []
Underfull \hbox (badness 1394) in paragraph at lines 133--138 Underfull \hbox (badness 1394) in paragraph at lines 125--130
\T1/lmr/m/n/9 464^^U483. IMC '20, As-so-ci-a-tion for Com-put-ing Ma-chin-ery, \T1/lmr/m/n/9 464^^U483. IMC '20, As-so-ci-a-tion for Com-put-ing Ma-chin-ery,
New York, NY, New York, NY,
[] []
Underfull \hbox (badness 5316) in paragraph at lines 133--138 Underfull \hbox (badness 5316) in paragraph at lines 125--130
\T1/lmr/m/n/9 USA (2020). https://doi.org/10.1145/3419394.3423643, $\T1/lmtt/m/ \T1/lmr/m/n/9 USA (2020). https://doi.org/10.1145/3419394.3423643, $\T1/lmtt/m/
n/9 https : / / doi . org / 10 . n/9 https : / / doi . org / 10 .
[] []
Underfull \hbox (badness 1043) in paragraph at lines 140--144 Underfull \hbox (badness 1043) in paragraph at lines 132--136
[]\T1/lmr/m/n/9 Liu, M., Huang, H., Feng, H., Sun, L., Du, B., Fu, Y.: Pristi: []\T1/lmr/m/n/9 Liu, M., Huang, H., Feng, H., Sun, L., Du, B., Fu, Y.: Pristi:
A con-di- A con-di-
[] []
[18]
Underfull \hbox (badness 5592) in paragraph at lines 168--172 Underfull \hbox (badness 5592) in paragraph at lines 160--164
[]\T1/lmr/m/n/9 National In-sti-tute of Stan-dards and Tech-nol-ogy: Guide to o []\T1/lmr/m/n/9 National In-sti-tute of Stan-dards and Tech-nol-ogy: Guide to o
p-er-a-tional p-er-a-tional
[] []
Underfull \hbox (badness 3514) in paragraph at lines 168--172 Underfull \hbox (badness 3514) in paragraph at lines 160--164
\T1/lmr/m/n/9 tech-nol-ogy (ot) se-cu-rity. Spe-cial Pub-li-ca-tion 800-82 Rev. \T1/lmr/m/n/9 tech-nol-ogy (ot) se-cu-rity. Spe-cial Pub-li-ca-tion 800-82 Rev.
3, NIST (sep 3, NIST (sep
[] []
[18]
Underfull \hbox (badness 1253) in paragraph at lines 195--200 Underfull \hbox (badness 1253) in paragraph at lines 187--192
\T1/lmr/m/n/9 data (jan 2026). https://doi.org/10.2139/ssrn.6055903, $\T1/lmtt/ \T1/lmr/m/n/9 data (jan 2026). https://doi.org/10.2139/ssrn.6055903, $\T1/lmtt/
m/n/9 https : / / papers . ssrn . m/n/9 https : / / papers . ssrn .
[] []
Underfull \hbox (badness 1226) in paragraph at lines 207--213 Underfull \hbox (badness 1226) in paragraph at lines 199--205
[]\T1/lmr/m/n/9 Shi, J., Han, K., Wang, Z., Doucet, A., Tit-sias, M.: Sim-pli-f []\T1/lmr/m/n/9 Shi, J., Han, K., Wang, Z., Doucet, A., Tit-sias, M.: Sim-pli-f
ied and gen- ied and gen-
[] []
Underfull \hbox (badness 3954) in paragraph at lines 207--213 Underfull \hbox (badness 3954) in paragraph at lines 199--205
\T1/lmr/m/n/9 er-al-ized masked dif-fu-sion for dis-crete data. In: Glober-son, \T1/lmr/m/n/9 er-al-ized masked dif-fu-sion for dis-crete data. In: Glober-son,
A., Mackey, A., Mackey,
[] []
Underfull \hbox (badness 3701) in paragraph at lines 207--213 Underfull \hbox (badness 3701) in paragraph at lines 199--205
\T1/lmr/m/n/9 vances in Neu-ral In-for-ma-tion Pro-cess-ing Sys-tems. vol. 37, \T1/lmr/m/n/9 vances in Neu-ral In-for-ma-tion Pro-cess-ing Sys-tems. vol. 37,
pp. 103131-- pp. 103131--
[] []
Underfull \hbox (badness 10000) in paragraph at lines 207--213 Underfull \hbox (badness 10000) in paragraph at lines 199--205
\T1/lmr/m/n/9 103167. Cur-ran As-so-ciates, Inc. (2024). https://doi.org/10.522 \T1/lmr/m/n/9 103167. Cur-ran As-so-ciates, Inc. (2024). https://doi.org/10.522
02/079017- 02/079017-
[] []
Underfull \hbox (badness 10000) in paragraph at lines 207--213 Underfull \hbox (badness 10000) in paragraph at lines 199--205
\T1/lmr/m/n/9 3277, $\T1/lmtt/m/n/9 https : / / proceedings . neurips . cc / pa \T1/lmr/m/n/9 3277, $\T1/lmtt/m/n/9 https : / / proceedings . neurips . cc / pa
per _ files / paper / 2024 / file / per _ files / paper / 2024 / file /
[] []
Underfull \hbox (badness 10000) in paragraph at lines 225--230 Underfull \hbox (badness 10000) in paragraph at lines 217--222
[]\T1/lmr/m/n/9 Sikder, M.F., Ra-machan-dran-pil-lai, R., Heintz, F.: Trans-fu- []\T1/lmr/m/n/9 Sikder, M.F., Ra-machan-dran-pil-lai, R., Heintz, F.: Trans-fu-
sion: Gen- sion: Gen-
[] []
Underfull \hbox (badness 10000) in paragraph at lines 225--230 Underfull \hbox (badness 10000) in paragraph at lines 217--222
\T1/lmr/m/n/9 er-at-ing long, high fi-delity time se-ries us-ing dif-fu-sion mo \T1/lmr/m/n/9 er-at-ing long, high fi-delity time se-ries us-ing dif-fu-sion mo
d-els with d-els with
[] []
Underfull \hbox (badness 10000) in paragraph at lines 225--230 Underfull \hbox (badness 10000) in paragraph at lines 217--222
\T1/lmr/m/n/9 trans-form-ers. Ma-chine Learn-ing with Ap-pli-ca-tions \T1/lmr/b \T1/lmr/m/n/9 trans-form-ers. Ma-chine Learn-ing with Ap-pli-ca-tions \T1/lmr/b
x/n/9 20\T1/lmr/m/n/9 , 100652 (2025). x/n/9 20\T1/lmr/m/n/9 , 100652 (2025).
[] []
Underfull \hbox (badness 10000) in paragraph at lines 225--230 Underfull \hbox (badness 10000) in paragraph at lines 217--222
\T1/lmr/m/n/9 https://doi.org/https://doi.org/10.1016/j.mlwa.2025.100652, $\T1/ \T1/lmr/m/n/9 https://doi.org/https://doi.org/10.1016/j.mlwa.2025.100652, $\T1/
lmtt/m/n/9 https : / / www . lmtt/m/n/9 https : / / www .
[] []
[19] [19]
Underfull \hbox (badness 2229) in paragraph at lines 258--262 Underfull \hbox (badness 2229) in paragraph at lines 250--254
\T1/lmr/m/n/9 er-a-tion. SIG-COMM Com-put. Com-mun. Rev. \T1/lmr/bx/n/9 36\T1/l \T1/lmr/m/n/9 er-a-tion. SIG-COMM Com-put. Com-mun. Rev. \T1/lmr/bx/n/9 36\T1/l
mr/m/n/9 (4), 111^^U122 (Aug 2006). mr/m/n/9 (4), 111^^U122 (Aug 2006).
[] []
Underfull \hbox (badness 10000) in paragraph at lines 258--262 Underfull \hbox (badness 10000) in paragraph at lines 250--254
\T1/lmr/m/n/9 https://doi.org/10.1145/1151659.1159928, $\T1/lmtt/m/n/9 https : \T1/lmr/m/n/9 https://doi.org/10.1145/1151659.1159928, $\T1/lmtt/m/n/9 https :
/ / doi . org / 10 . 1145 / 1151659 . / / doi . org / 10 . 1145 / 1151659 .
[] []
@@ -724,10 +724,10 @@ L3 programming layer <2025-12-24>
*********** ***********
) )
Here is how much of TeX's memory you used: Here is how much of TeX's memory you used:
6342 strings out of 467871 6341 strings out of 467871
97389 string characters out of 5435199 97343 string characters out of 5435199
552257 words of memory out of 5000000 552253 words of memory out of 5000000
35106 multiletter control sequences out of 15000+600000 35105 multiletter control sequences out of 15000+600000
706871 words of font info for 99 fonts, out of 8000000 for 9000 706871 words of font info for 99 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191 1141 hyphenation exceptions out of 8191
57i,9n,65p,2477b,352s stack positions out of 10000i,1000n,20000p,200000b,200000s 57i,9n,65p,2477b,352s stack positions out of 10000i,1000n,20000p,200000b,200000s
@@ -744,7 +744,7 @@ type1/public/lm/lmri9.pfb><D:/MikTex/fonts/type1/public/lm/lmsy10.pfb><D:/MikTe
x/fonts/type1/public/lm/lmsy7.pfb><D:/MikTex/fonts/type1/public/lm/lmsy9.pfb><D x/fonts/type1/public/lm/lmsy7.pfb><D:/MikTex/fonts/type1/public/lm/lmsy9.pfb><D
:/MikTex/fonts/type1/public/lm/lmtt10.pfb><D:/MikTex/fonts/type1/public/lm/lmtt :/MikTex/fonts/type1/public/lm/lmtt10.pfb><D:/MikTex/fonts/type1/public/lm/lmtt
9.pfb><D:/MikTex/fonts/type1/public/amsfonts/symbols/msbm10.pfb> 9.pfb><D:/MikTex/fonts/type1/public/amsfonts/symbols/msbm10.pfb>
Output written on main.pdf (21 pages, 1116645 bytes). Output written on main.pdf (21 pages, 1116663 bytes).
PDF statistics: PDF statistics:
317 PDF objects out of 1000 (max. 8388607) 317 PDF objects out of 1000 (max. 8388607)
0 named destinations out of 1000 (max. 500000) 0 named destinations out of 1000 (max. 500000)

View File

@@ -53,7 +53,7 @@ Early generation of network data oriented towards "realism" mostly remained at t
Diffusion models exhibit good fit along this path: DDPM achieves high-quality sampling and stable optimization through efficient $\epsilon$ parameterization and weighted variational objectives \citep{NEURIPS2020_4c5bcfec}, the SDE perspective unifies score-based and diffusion, providing likelihood evaluation and prediction-correction sampling strategies based on probability flow ODEs \citep{song2021score}. For time series, TimeGrad replaces the constrained output distribution with conditional denoising, capturing high-dimensional correlations at each step \citep{rasul2021autoregressivedenoisingdiffusionmodels}; CSDI explicitly performs conditional diffusion and uses two-dimensional attention to simultaneously leverage temporal and cross-feature dependencies, suitable for conditioning and filling in missing values \citep{tashiro2021csdiconditionalscorebaseddiffusion}; in a more general spatio-temporal structure, DiffSTG generalizes diffusion to spatio-temporal graphs, combining TCN/GCN with denoising U-Net to improve CRPS and inference efficiency in a non-autoregressive manner \citep{wen2024diffstgprobabilisticspatiotemporalgraph}, and PriSTI further enhances conditional features and geographical relationships, maintaining robustness under high missing rates and sensor failures \citep{liu2023pristiconditionaldiffusionframework}; in long sequences and continuous domains, DiffWave verifies that diffusion can also match the quality of strong vocoders under non-autoregressive fast synthesis \citep{kong2021diffwaveversatilediffusionmodel}; studies on cellular communication traffic show that diffusion can recover spatio-temporal patterns and provide uncertainty characterization at the urban scale \citep{11087622}. These results overall point to a conclusion: when the research focus is on "telemetry/high-level features" rather than raw messages, diffusion models provide stable and fine-grained distribution fitting and uncertainty quantification, which is exactly in line with the requirements of ICS telemetry synthesis. Meanwhile, directly entrusting all structures to a "monolithic diffusion" is not advisable: long-range temporal skeletons and fine-grained marginal distributions often have optimization tensions, requiring explicit decoupling in modeling. Diffusion models exhibit good fit along this path: DDPM achieves high-quality sampling and stable optimization through efficient $\epsilon$ parameterization and weighted variational objectives \citep{NEURIPS2020_4c5bcfec}, the SDE perspective unifies score-based and diffusion, providing likelihood evaluation and prediction-correction sampling strategies based on probability flow ODEs \citep{song2021score}. For time series, TimeGrad replaces the constrained output distribution with conditional denoising, capturing high-dimensional correlations at each step \citep{rasul2021autoregressivedenoisingdiffusionmodels}; CSDI explicitly performs conditional diffusion and uses two-dimensional attention to simultaneously leverage temporal and cross-feature dependencies, suitable for conditioning and filling in missing values \citep{tashiro2021csdiconditionalscorebaseddiffusion}; in a more general spatio-temporal structure, DiffSTG generalizes diffusion to spatio-temporal graphs, combining TCN/GCN with denoising U-Net to improve CRPS and inference efficiency in a non-autoregressive manner \citep{wen2024diffstgprobabilisticspatiotemporalgraph}, and PriSTI further enhances conditional features and geographical relationships, maintaining robustness under high missing rates and sensor failures \citep{liu2023pristiconditionaldiffusionframework}; in long sequences and continuous domains, DiffWave verifies that diffusion can also match the quality of strong vocoders under non-autoregressive fast synthesis \citep{kong2021diffwaveversatilediffusionmodel}; studies on cellular communication traffic show that diffusion can recover spatio-temporal patterns and provide uncertainty characterization at the urban scale \citep{11087622}. These results overall point to a conclusion: when the research focus is on "telemetry/high-level features" rather than raw messages, diffusion models provide stable and fine-grained distribution fitting and uncertainty quantification, which is exactly in line with the requirements of ICS telemetry synthesis. Meanwhile, directly entrusting all structures to a "monolithic diffusion" is not advisable: long-range temporal skeletons and fine-grained marginal distributions often have optimization tensions, requiring explicit decoupling in modeling.
Looking further into the mechanism complexity of ICS: its channel types are inherently mixed, containing both continuous process trajectories and discrete supervision/status variables, and discrete channels must be "legal" under operational constraints. The aforementioned progress in time series diffusion has mainly occurred in continuous spaces, but discrete diffusion has also developed systematic methods: D3PM improves sampling quality and likelihood through absorption/masking and structured transitions in discrete state spaces \citep{austin2023structureddenoisingdiffusionmodels}, subsequent masked diffusion provides stable reconstruction on categorical data in a more simplified form \citep{Lin_2020}, multinomial diffusion directly defines diffusion on a finite vocabulary through mechanisms such as argmax flows \citep{hoogeboom2021argmaxflowsmultinomialdiffusion}, and Diffusion-LM demonstrates an effective path for controllable text generation by imposing gradient constraints in continuous latent spaces \citep{li2022diffusionlmimprovescontrollabletext}. From the perspectives of protocols and finite-state machines, coverage-guided fuzz testing emphasizes the criticality of "sequence legality and state coverage" \citep{meng2025aflnetyearslatercoverageguided,godefroid2017learnfuzzmachinelearninginput,she2019neuzzefficientfuzzingneural}, echoing the concept of "legality by construction" in discrete diffusion: preferentially adopting absorption/masking diffusion on discrete channels, supplemented by type-aware conditioning and sampling constraints, to avoid semantic invalidity and marginal distortion caused by post hoc thresholding. Looking further into the mechanism complexity of ICS: its channel types are inherently mixed, containing both continuous process trajectories and discrete supervision/status variables, and discrete channels must be "legal" under operational constraints. The aforementioned progress in time series diffusion has mainly occurred in continuous spaces, but discrete diffusion has also developed systematic methods: D3PM improves sampling quality and likelihood through absorption/masking and structured transitions in discrete state spaces \citep{austin2021structured}, subsequent masked diffusion provides stable reconstruction on categorical data in a more simplified form \citep{Lin_2020}, multinomial diffusion directly defines diffusion on a finite vocabulary through mechanisms such as argmax flows \citep{hoogeboom2021argmaxflowsmultinomialdiffusion}, and Diffusion-LM demonstrates an effective path for controllable text generation by imposing gradient constraints in continuous latent spaces \citep{li2022diffusionlmimprovescontrollabletext}. From the perspectives of protocols and finite-state machines, coverage-guided fuzz testing emphasizes the criticality of "sequence legality and state coverage" \citep{meng2025aflnetyearslatercoverageguided,godefroid2017learnfuzzmachinelearninginput,she2019neuzzefficientfuzzingneural}, echoing the concept of "legality by construction" in discrete diffusion: preferentially adopting absorption/masking diffusion on discrete channels, supplemented by type-aware conditioning and sampling constraints, to avoid semantic invalidity and marginal distortion caused by post hoc thresholding.
From the perspective of high-level synthesis, the temporal structure is equally indispensable: ICS control often involves delay effects, phased operating conditions, and cross-channel coupling, requiring models to be able to characterize low-frequency, long-range dependencies while also overlaying multi-facated fine-grained fluctuations on them. The Transformer series has provided sufficient evidence in long-sequence time series tasks: Transformer-XL breaks through the fixed-length context limitation through a reusable memory mechanism and significantly enhances long-range dependency expression \citep{dai2019transformerxlattentivelanguagemodels}; Informer uses ProbSparse attention and efficient decoding to balance span and efficiency in long-sequence prediction \citep{zhou2021informerefficienttransformerlong}; Autoformer robustly models long-term seasonality and trends through autocorrelation and decomposition mechanisms \citep{wu2022autoformerdecompositiontransformersautocorrelation}; FEDformer further improves long-period prediction performance in frequency domain enhancement and decomposition \citep{zhou2022fedformerfrequencyenhanceddecomposed}; PatchTST enhances the stability and generalization of long-sequence multivariate prediction through local patch-based representation and channel-independent modeling \citep{nie2023patchtst}. Combining our previous positioning of diffusion, this chain of evidence points to a natural division of labor: using attention-based sequence models to first extract stable low-frequency trends/conditions (long-range skeletons), and then allowing diffusion to focus on margins and details in the residual space; meanwhile, discrete masking/absorbing diffusion is applied to supervised/pattern variables to ensure vocabulary legality by construction. This design not only inherits the advantages of time series diffusion in distribution fitting and uncertainty characterization \citep{rasul2021autoregressivedenoisingdiffusionmodels,tashiro2021csdiconditionalscorebaseddiffusion,wen2024diffstgprobabilisticspatiotemporalgraph,liu2023pristiconditionaldiffusionframework,kong2021diffwaveversatilediffusionmodel,11087622}, but also stabilizes the macroscopic temporal support through the long-range attention of Transformer, enabling the formation of an operational integrated generation pipeline under the mixed types and multi-scale dynamics of ICS. From the perspective of high-level synthesis, the temporal structure is equally indispensable: ICS control often involves delay effects, phased operating conditions, and cross-channel coupling, requiring models to be able to characterize low-frequency, long-range dependencies while also overlaying multi-facated fine-grained fluctuations on them. The Transformer series has provided sufficient evidence in long-sequence time series tasks: Transformer-XL breaks through the fixed-length context limitation through a reusable memory mechanism and significantly enhances long-range dependency expression \citep{dai2019transformerxlattentivelanguagemodels}; Informer uses ProbSparse attention and efficient decoding to balance span and efficiency in long-sequence prediction \citep{zhou2021informerefficienttransformerlong}; Autoformer robustly models long-term seasonality and trends through autocorrelation and decomposition mechanisms \citep{wu2022autoformerdecompositiontransformersautocorrelation}; FEDformer further improves long-period prediction performance in frequency domain enhancement and decomposition \citep{zhou2022fedformerfrequencyenhanceddecomposed}; PatchTST enhances the stability and generalization of long-sequence multivariate prediction through local patch-based representation and channel-independent modeling \citep{nie2023patchtst}. Combining our previous positioning of diffusion, this chain of evidence points to a natural division of labor: using attention-based sequence models to first extract stable low-frequency trends/conditions (long-range skeletons), and then allowing diffusion to focus on margins and details in the residual space; meanwhile, discrete masking/absorbing diffusion is applied to supervised/pattern variables to ensure vocabulary legality by construction. This design not only inherits the advantages of time series diffusion in distribution fitting and uncertainty characterization \citep{rasul2021autoregressivedenoisingdiffusionmodels,tashiro2021csdiconditionalscorebaseddiffusion,wen2024diffstgprobabilisticspatiotemporalgraph,liu2023pristiconditionaldiffusionframework,kong2021diffwaveversatilediffusionmodel,11087622}, but also stabilizes the macroscopic temporal support through the long-range attention of Transformer, enabling the formation of an operational integrated generation pipeline under the mixed types and multi-scale dynamics of ICS.

View File

@@ -604,18 +604,6 @@ keywords = {Safety interlock system, Symbolic model checking, Safety verificatio
abstract = {The safety interlock system (SIS) is one of the most important protective measurements in industrial processes that provide automatic actions to correct an abnormal plant event. This paper considers the use of formal techniques based on symbolic model checking and computation tree logic (CTL) in the specification to automatically verify the SIS for industrial processes. It addresses the problem of modelling industrial processes and presenting the SIS in CTL. It shows how symbolic model checking can be used efficiently in the verification of a SIS. A transferring system for a penicillin process is used as a case study.} abstract = {The safety interlock system (SIS) is one of the most important protective measurements in industrial processes that provide automatic actions to correct an abnormal plant event. This paper considers the use of formal techniques based on symbolic model checking and computation tree logic (CTL) in the specification to automatically verify the SIS for industrial processes. It addresses the problem of modelling industrial processes and presenting the SIS in CTL. It shows how symbolic model checking can be used efficiently in the verification of a SIS. A transferring system for a penicillin process is used as a case study.}
} }
@inproceedings{austin2023structureddenoisingdiffusionmodels,
author = {Austin, Jacob and Johnson, Daniel D. and Ho, Jonathan and Tarlow, Daniel and van den Berg, Rianne},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {17981--17993},
publisher = {Curran Associates, Inc.},
title = {Structured Denoising Diffusion Models in Discrete State-Spaces},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf},
volume = {34},
year = {2021}
}
@article{10.1145/1151659.1159928, @article{10.1145/1151659.1159928,
author = {Vishwanath, Kashi Venkatesh and Vahdat, Amin}, author = {Vishwanath, Kashi Venkatesh and Vahdat, Amin},
title = {Realistic and responsive network traffic generation}, title = {Realistic and responsive network traffic generation},