This commit is contained in:
2026-04-21 00:20:06 +08:00
parent 012f34ac45
commit 5f4458f77b
7 changed files with 105 additions and 125 deletions

View File

@@ -15,7 +15,7 @@
\citation{10.1145/3544216.3544251}
\citation{Lin_2020}
\citation{7469060,10.1145/3055366.3055375}
\citation{NEURIPS2020_4c5bcfec}
\citation{ho2020denoising}
\citation{song2021score}
\citation{rasul2021autoregressivedenoisingdiffusionmodels}
\citation{tashiro2021csdiconditionalscorebaseddiffusion}
@@ -120,43 +120,42 @@
\bibcite{dai2019transformerxlattentivelanguagemodels}{5}
\bibcite{godefroid2017learnfuzzmachinelearninginput}{6}
\bibcite{hang2023efficient}{7}
\bibcite{NEURIPS2020_4c5bcfec}{8}
\bibcite{ho2020denoising}{9}
\bibcite{hoogeboom2021argmaxflowsmultinomialdiffusion}{10}
\bibcite{jiang2023netdiffusionnetworkdataaugmentation}{11}
\bibcite{10.1007/s10844-022-00753-1}{12}
\bibcite{kollovieh2023tsdiff}{13}
\bibcite{kong2021diffwaveversatilediffusionmodel}{14}
\bibcite{pmlr-v202-kotelnikov23a}{15}
\bibcite{li2022diffusionlmimprovescontrollabletext}{16}
\bibcite{lin1991divergence}{17}
\bibcite{Lin_2020}{18}
\bibcite{liu2023pristiconditionaldiffusionframework}{19}
\bibcite{11087622}{20}
\bibcite{7469060}{21}
\bibcite{meng2025aflnetyearslatercoverageguided}{22}
\bibcite{Nankya2023-gp}{23}
\bibcite{nist2023sp80082}{24}
\bibcite{nie2023patchtst}{25}
\bibcite{rasul2021autoregressivedenoisingdiffusionmodels}{26}
\bibcite{Ring_2019}{27}
\bibcite{sha2026ddpm}{28}
\bibcite{she2019neuzzefficientfuzzingneural}{29}
\bibcite{shi2024simplified}{30}
\bibcite{shi2025tabdiff}{31}
\bibcite{shin}{32}
\bibcite{sikder2023transfusion}{33}
\bibcite{song2021score}{34}
\bibcite{stenger2024survey}{35}
\bibcite{tashiro2021csdiconditionalscorebaseddiffusion}{36}
\bibcite{vaswani2017attention}{37}
\bibcite{10.1145/1151659.1159928}{38}
\bibcite{wen2024diffstgprobabilisticspatiotemporalgraph}{39}
\bibcite{wu2022autoformerdecompositiontransformersautocorrelation}{40}
\bibcite{yang2001interlock}{41}
\bibcite{10.1145/3544216.3544251}{42}
\bibcite{yoon2019timegan}{43}
\bibcite{yuan2025ctu}{44}
\bibcite{zhou2021informerefficienttransformerlong}{45}
\bibcite{zhou2022fedformerfrequencyenhanceddecomposed}{46}
\gdef \@abspage@last{21}
\bibcite{ho2020denoising}{8}
\bibcite{hoogeboom2021argmaxflowsmultinomialdiffusion}{9}
\bibcite{jiang2023netdiffusionnetworkdataaugmentation}{10}
\bibcite{10.1007/s10844-022-00753-1}{11}
\bibcite{kollovieh2023tsdiff}{12}
\bibcite{kong2021diffwaveversatilediffusionmodel}{13}
\bibcite{pmlr-v202-kotelnikov23a}{14}
\bibcite{li2022diffusionlmimprovescontrollabletext}{15}
\bibcite{lin1991divergence}{16}
\bibcite{Lin_2020}{17}
\bibcite{liu2023pristiconditionaldiffusionframework}{18}
\bibcite{11087622}{19}
\bibcite{7469060}{20}
\bibcite{meng2025aflnetyearslatercoverageguided}{21}
\bibcite{Nankya2023-gp}{22}
\bibcite{nist2023sp80082}{23}
\bibcite{nie2023patchtst}{24}
\bibcite{rasul2021autoregressivedenoisingdiffusionmodels}{25}
\bibcite{Ring_2019}{26}
\bibcite{sha2026ddpm}{27}
\bibcite{she2019neuzzefficientfuzzingneural}{28}
\bibcite{shi2024simplified}{29}
\bibcite{shi2025tabdiff}{30}
\bibcite{shin}{31}
\bibcite{sikder2023transfusion}{32}
\bibcite{song2021score}{33}
\bibcite{stenger2024survey}{34}
\bibcite{tashiro2021csdiconditionalscorebaseddiffusion}{35}
\bibcite{vaswani2017attention}{36}
\bibcite{10.1145/1151659.1159928}{37}
\bibcite{wen2024diffstgprobabilisticspatiotemporalgraph}{38}
\bibcite{wu2022autoformerdecompositiontransformersautocorrelation}{39}
\bibcite{yang2001interlock}{40}
\bibcite{10.1145/3544216.3544251}{41}
\bibcite{yoon2019timegan}{42}
\bibcite{yuan2025ctu}{43}
\bibcite{zhou2021informerefficienttransformerlong}{44}
\bibcite{zhou2022fedformerfrequencyenhanceddecomposed}{45}
\gdef \@abspage@last{20}

View File

@@ -52,13 +52,6 @@ Hang, T., Gu, S., Li, C., Bao, J., Chen, D., Hu, H., Geng, X., Guo, B.:
of the IEEE/CVF International Conference on Computer Vision (ICCV). pp.
7441--7451 (October 2023)
\bibitem{NEURIPS2020_4c5bcfec}
Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In:
Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances
in Neural Information Processing Systems. vol.~33, pp. 6840--6851. Curran
Associates, Inc. (2020),
\url{https://proceedings.neurips.cc/paper_files/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf}
\bibitem{ho2020denoising}
Ho, J., Jain, A., Abbeel, P.: Denoising diffusion probabilistic models. In:
Larochelle, H., Ranzato, M., Hadsell, R., Balcan, M., Lin, H. (eds.) Advances

View File

@@ -5,44 +5,44 @@ Reallocating 'name_of_file' (item size: 1) to 9 items.
The style file: splncs04.bst
Reallocating 'name_of_file' (item size: 1) to 11 items.
Database file #1: references.bib
You've used 46 entries,
You've used 45 entries,
2850 wiz_defined-function locations,
921 strings with 18475 characters,
and the built_in function-call counts, 35282 in all, are:
= -- 2857
> -- 1398
< -- 62
+ -- 546
- -- 499
* -- 2472
:= -- 4418
add.period$ -- 112
call.type$ -- 46
change.case$ -- 400
919 strings with 18435 characters,
and the built_in function-call counts, 34400 in all, are:
= -- 2785
> -- 1365
< -- 60
+ -- 534
- -- 488
* -- 2411
:= -- 4311
add.period$ -- 109
call.type$ -- 45
change.case$ -- 392
chr.to.int$ -- 0
cite$ -- 46
duplicate$ -- 3025
empty$ -- 2886
format.name$ -- 559
if$ -- 7706
cite$ -- 45
duplicate$ -- 2949
empty$ -- 2813
format.name$ -- 546
if$ -- 7511
int.to.chr$ -- 0
int.to.str$ -- 46
missing$ -- 663
newline$ -- 144
num.names$ -- 124
pop$ -- 1184
int.to.str$ -- 45
missing$ -- 645
newline$ -- 141
num.names$ -- 120
pop$ -- 1153
preamble$ -- 1
purify$ -- 299
purify$ -- 294
quote$ -- 0
skip$ -- 925
skip$ -- 902
stack$ -- 0
substring$ -- 1886
swap$ -- 1879
text.length$ -- 62
substring$ -- 1833
swap$ -- 1830
text.length$ -- 60
text.prefix$ -- 0
top$ -- 0
type$ -- 184
type$ -- 180
warning$ -- 0
while$ -- 231
width$ -- 48
write$ -- 574
while$ -- 224
width$ -- 47
write$ -- 561

View File

@@ -1,4 +1,4 @@
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 21 APR 2026 00:12
This is pdfTeX, Version 3.141592653-2.6-1.40.28 (MiKTeX 25.12) (preloaded format=pdflatex 2026.4.14) 21 APR 2026 00:15
entering extended mode
restricted \write18 enabled.
%&-line parsing enabled.
@@ -580,154 +580,154 @@ Underfull \hbox (badness 1609) in paragraph at lines 29--35
[]
Underfull \hbox (badness 2653) in paragraph at lines 91--98
Underfull \hbox (badness 2653) in paragraph at lines 84--91
[]\T1/lmr/m/n/9 Kollovieh, M., Ansari, A.F., Bohlke-Schneider, M., Zschieg-ner,
J., Wang,
[]
Underfull \hbox (badness 2662) in paragraph at lines 91--98
Underfull \hbox (badness 2662) in paragraph at lines 84--91
\T1/lmr/m/n/9 H., Wang, Y.B.: Pre-dict, re-fine, syn-the-size: Self-guiding dif
-fu-sion mod-els
[]
[17]
Underfull \hbox (badness 2626) in paragraph at lines 113--119
Underfull \hbox (badness 2626) in paragraph at lines 106--112
\T1/lmr/m/n/9 wal, A., Bel-grave, D., Cho, K., Oh, A. (eds.) Ad-vances in Neu-r
al In-
[]
Underfull \hbox (badness 1946) in paragraph at lines 113--119
Underfull \hbox (badness 1946) in paragraph at lines 106--112
\T1/lmr/m/n/9 for-ma-tion Pro-cess-ing Sys-tems. vol. 35, pp. 4328--4343. Cur-r
an As-so-ciates,
[]
Underfull \hbox (badness 1603) in paragraph at lines 125--130
Underfull \hbox (badness 1603) in paragraph at lines 118--123
[]\T1/lmr/m/n/9 Lin, Z., Jain, A., Wang, C., Fanti, G., Sekar, V.: Us-ing gans
for shar-
[]
Underfull \hbox (badness 2310) in paragraph at lines 125--130
Underfull \hbox (badness 2310) in paragraph at lines 118--123
\T1/lmr/m/n/9 tions. In: Pro-ceed-ings of the ACM In-ter-net Mea-sure-ment Con-
fer-ence. p.
[]
Underfull \hbox (badness 1394) in paragraph at lines 125--130
Underfull \hbox (badness 1394) in paragraph at lines 118--123
\T1/lmr/m/n/9 464^^U483. IMC '20, As-so-ci-a-tion for Com-put-ing Ma-chin-ery,
New York, NY,
[]
Underfull \hbox (badness 5316) in paragraph at lines 125--130
Underfull \hbox (badness 5316) in paragraph at lines 118--123
\T1/lmr/m/n/9 USA (2020). https://doi.org/10.1145/3419394.3423643, $\T1/lmtt/m/
n/9 https : / / doi . org / 10 .
[]
Underfull \hbox (badness 1043) in paragraph at lines 132--136
Underfull \hbox (badness 1043) in paragraph at lines 125--129
[]\T1/lmr/m/n/9 Liu, M., Huang, H., Feng, H., Sun, L., Du, B., Fu, Y.: Pristi:
A con-di-
[]
Underfull \hbox (badness 5592) in paragraph at lines 160--164
Underfull \hbox (badness 5592) in paragraph at lines 153--157
[]\T1/lmr/m/n/9 National In-sti-tute of Stan-dards and Tech-nol-ogy: Guide to o
p-er-a-tional
[]
Underfull \hbox (badness 3514) in paragraph at lines 160--164
Underfull \hbox (badness 3514) in paragraph at lines 153--157
\T1/lmr/m/n/9 tech-nol-ogy (ot) se-cu-rity. Spe-cial Pub-li-ca-tion 800-82 Rev.
3, NIST (sep
[]
[18]
Underfull \hbox (badness 1253) in paragraph at lines 187--192
Underfull \hbox (badness 1253) in paragraph at lines 180--185
\T1/lmr/m/n/9 data (jan 2026). https://doi.org/10.2139/ssrn.6055903, $\T1/lmtt/
m/n/9 https : / / papers . ssrn .
[]
Underfull \hbox (badness 1226) in paragraph at lines 199--205
Underfull \hbox (badness 1226) in paragraph at lines 192--198
[]\T1/lmr/m/n/9 Shi, J., Han, K., Wang, Z., Doucet, A., Tit-sias, M.: Sim-pli-f
ied and gen-
[]
Underfull \hbox (badness 3954) in paragraph at lines 199--205
Underfull \hbox (badness 3954) in paragraph at lines 192--198
\T1/lmr/m/n/9 er-al-ized masked dif-fu-sion for dis-crete data. In: Glober-son,
A., Mackey,
[]
Underfull \hbox (badness 3701) in paragraph at lines 199--205
Underfull \hbox (badness 3701) in paragraph at lines 192--198
\T1/lmr/m/n/9 vances in Neu-ral In-for-ma-tion Pro-cess-ing Sys-tems. vol. 37,
pp. 103131--
[]
Underfull \hbox (badness 10000) in paragraph at lines 199--205
Underfull \hbox (badness 10000) in paragraph at lines 192--198
\T1/lmr/m/n/9 103167. Cur-ran As-so-ciates, Inc. (2024). https://doi.org/10.522
02/079017-
[]
Underfull \hbox (badness 10000) in paragraph at lines 199--205
Underfull \hbox (badness 10000) in paragraph at lines 192--198
\T1/lmr/m/n/9 3277, $\T1/lmtt/m/n/9 https : / / proceedings . neurips . cc / pa
per _ files / paper / 2024 / file /
[]
Underfull \hbox (badness 10000) in paragraph at lines 217--222
Underfull \hbox (badness 10000) in paragraph at lines 210--215
[]\T1/lmr/m/n/9 Sikder, M.F., Ra-machan-dran-pil-lai, R., Heintz, F.: Trans-fu-
sion: Gen-
[]
Underfull \hbox (badness 10000) in paragraph at lines 217--222
Underfull \hbox (badness 10000) in paragraph at lines 210--215
\T1/lmr/m/n/9 er-at-ing long, high fi-delity time se-ries us-ing dif-fu-sion mo
d-els with
[]
Underfull \hbox (badness 10000) in paragraph at lines 217--222
Underfull \hbox (badness 10000) in paragraph at lines 210--215
\T1/lmr/m/n/9 trans-form-ers. Ma-chine Learn-ing with Ap-pli-ca-tions \T1/lmr/b
x/n/9 20\T1/lmr/m/n/9 , 100652 (2025).
[]
Underfull \hbox (badness 10000) in paragraph at lines 217--222
Underfull \hbox (badness 10000) in paragraph at lines 210--215
\T1/lmr/m/n/9 https://doi.org/https://doi.org/10.1016/j.mlwa.2025.100652, $\T1/
lmtt/m/n/9 https : / / www .
[]
[19]
Underfull \hbox (badness 2229) in paragraph at lines 250--254
Underfull \hbox (badness 2229) in paragraph at lines 243--247
\T1/lmr/m/n/9 er-a-tion. SIG-COMM Com-put. Com-mun. Rev. \T1/lmr/bx/n/9 36\T1/l
mr/m/n/9 (4), 111^^U122 (Aug 2006).
[]
Underfull \hbox (badness 10000) in paragraph at lines 250--254
Underfull \hbox (badness 10000) in paragraph at lines 243--247
\T1/lmr/m/n/9 https://doi.org/10.1145/1151659.1159928, $\T1/lmtt/m/n/9 https :
/ / doi . org / 10 . 1145 / 1151659 .
[]
[20]) [21] (main.aux)
[19]) [20] (main.aux)
***********
LaTeX2e <2025-11-01>
L3 programming layer <2025-12-24>
***********
)
Here is how much of TeX's memory you used:
6341 strings out of 467871
97343 string characters out of 5435199
552253 words of memory out of 5000000
35105 multiletter control sequences out of 15000+600000
6340 strings out of 467871
97321 string characters out of 5435199
552249 words of memory out of 5000000
35104 multiletter control sequences out of 15000+600000
706871 words of font info for 99 fonts, out of 8000000 for 9000
1141 hyphenation exceptions out of 8191
57i,9n,65p,2477b,352s stack positions out of 10000i,1000n,20000p,200000b,200000s
@@ -744,9 +744,9 @@ type1/public/lm/lmri9.pfb><D:/MikTex/fonts/type1/public/lm/lmsy10.pfb><D:/MikTe
x/fonts/type1/public/lm/lmsy7.pfb><D:/MikTex/fonts/type1/public/lm/lmsy9.pfb><D
:/MikTex/fonts/type1/public/lm/lmtt10.pfb><D:/MikTex/fonts/type1/public/lm/lmtt
9.pfb><D:/MikTex/fonts/type1/public/amsfonts/symbols/msbm10.pfb>
Output written on main.pdf (21 pages, 1116663 bytes).
Output written on main.pdf (20 pages, 1116013 bytes).
PDF statistics:
317 PDF objects out of 1000 (max. 8388607)
314 PDF objects out of 1000 (max. 8388607)
0 named destinations out of 1000 (max. 500000)
13845 words of extra memory for PDF output out of 14400 (max. 10000000)

View File

@@ -51,7 +51,7 @@ Despite these advances, most existing work either focuses on packet-level genera
\label{sec:related}
Early generation of network data oriented towards "realism" mostly remained at the packet/flow header level, either through replay or statistical synthesis based on single-point observations. Swing, in a closed-loop, network-responsive manner, extracts user/application/network distributions from single-point observations to reproduce burstiness and correlation across multiple time scales \citep{10.1145/1151659.1159928}. Subsequently, a series of works advanced header synthesis to learning-based generation: the WGAN-based method added explicit verification of protocol field consistency to NetFlow/IPFIX \citep{Ring_2019}, NetShare reconstructed header modeling as flow-level time series and improved fidelity and scalability through domain encoding and parallel fine-tuning \citep{10.1145/3544216.3544251}, and DoppelGANger preserved the long-range structure and downstream sorting consistency of networked time series by decoupling attributes from sequences \citep{Lin_2020}. However, in industrial control system (ICS) scenarios, the original PCAP is usually not shareable, and public testbeds (such as SWaT, WADI) mostly provide process/monitoring telemetry and protocol interactions for security assessment, but public datasets emphasize operational variables rather than packet-level traces \citep{7469060,10.1145/3055366.3055375}. This makes "synthesis at the feature/telemetry level, aware of protocol and semantics" more feasible and necessary in practice: we are more concerned with reproducing high-level distributions and multi-scale temporal patterns according to operational semantics and physical constraints without relying on the original packets. From this perspective, the generation paradigm naturally shifts from "packet syntax reproduction" to "modeling of high-level spatio-temporal distributions and uncertainties", requiring stable training, strong distribution fitting, and interpretable uncertainty characterization.
Diffusion models exhibit good fit along this path: DDPM achieves high-quality sampling and stable optimization through efficient $\epsilon$ parameterization and weighted variational objectives \citep{NEURIPS2020_4c5bcfec}, the SDE perspective unifies score-based and diffusion, providing likelihood evaluation and prediction-correction sampling strategies based on probability flow ODEs \citep{song2021score}. For time series, TimeGrad replaces the constrained output distribution with conditional denoising, capturing high-dimensional correlations at each step \citep{rasul2021autoregressivedenoisingdiffusionmodels}; CSDI explicitly performs conditional diffusion and uses two-dimensional attention to simultaneously leverage temporal and cross-feature dependencies, suitable for conditioning and filling in missing values \citep{tashiro2021csdiconditionalscorebaseddiffusion}; in a more general spatio-temporal structure, DiffSTG generalizes diffusion to spatio-temporal graphs, combining TCN/GCN with denoising U-Net to improve CRPS and inference efficiency in a non-autoregressive manner \citep{wen2024diffstgprobabilisticspatiotemporalgraph}, and PriSTI further enhances conditional features and geographical relationships, maintaining robustness under high missing rates and sensor failures \citep{liu2023pristiconditionaldiffusionframework}; in long sequences and continuous domains, DiffWave verifies that diffusion can also match the quality of strong vocoders under non-autoregressive fast synthesis \citep{kong2021diffwaveversatilediffusionmodel}; studies on cellular communication traffic show that diffusion can recover spatio-temporal patterns and provide uncertainty characterization at the urban scale \citep{11087622}. These results overall point to a conclusion: when the research focus is on "telemetry/high-level features" rather than raw messages, diffusion models provide stable and fine-grained distribution fitting and uncertainty quantification, which is exactly in line with the requirements of ICS telemetry synthesis. Meanwhile, directly entrusting all structures to a "monolithic diffusion" is not advisable: long-range temporal skeletons and fine-grained marginal distributions often have optimization tensions, requiring explicit decoupling in modeling.
Diffusion models exhibit good fit along this path: DDPM achieves high-quality sampling and stable optimization through efficient $\epsilon$ parameterization and weighted variational objectives \citep{ho2020denoising}, the SDE perspective unifies score-based and diffusion, providing likelihood evaluation and prediction-correction sampling strategies based on probability flow ODEs \citep{song2021score}. For time series, TimeGrad replaces the constrained output distribution with conditional denoising, capturing high-dimensional correlations at each step \citep{rasul2021autoregressivedenoisingdiffusionmodels}; CSDI explicitly performs conditional diffusion and uses two-dimensional attention to simultaneously leverage temporal and cross-feature dependencies, suitable for conditioning and filling in missing values \citep{tashiro2021csdiconditionalscorebaseddiffusion}; in a more general spatio-temporal structure, DiffSTG generalizes diffusion to spatio-temporal graphs, combining TCN/GCN with denoising U-Net to improve CRPS and inference efficiency in a non-autoregressive manner \citep{wen2024diffstgprobabilisticspatiotemporalgraph}, and PriSTI further enhances conditional features and geographical relationships, maintaining robustness under high missing rates and sensor failures \citep{liu2023pristiconditionaldiffusionframework}; in long sequences and continuous domains, DiffWave verifies that diffusion can also match the quality of strong vocoders under non-autoregressive fast synthesis \citep{kong2021diffwaveversatilediffusionmodel}; studies on cellular communication traffic show that diffusion can recover spatio-temporal patterns and provide uncertainty characterization at the urban scale \citep{11087622}. These results overall point to a conclusion: when the research focus is on "telemetry/high-level features" rather than raw messages, diffusion models provide stable and fine-grained distribution fitting and uncertainty quantification, which is exactly in line with the requirements of ICS telemetry synthesis. Meanwhile, directly entrusting all structures to a "monolithic diffusion" is not advisable: long-range temporal skeletons and fine-grained marginal distributions often have optimization tensions, requiring explicit decoupling in modeling.
Looking further into the mechanism complexity of ICS: its channel types are inherently mixed, containing both continuous process trajectories and discrete supervision/status variables, and discrete channels must be "legal" under operational constraints. The aforementioned progress in time series diffusion has mainly occurred in continuous spaces, but discrete diffusion has also developed systematic methods: D3PM improves sampling quality and likelihood through absorption/masking and structured transitions in discrete state spaces \citep{austin2021structured}, subsequent masked diffusion provides stable reconstruction on categorical data in a more simplified form \citep{Lin_2020}, multinomial diffusion directly defines diffusion on a finite vocabulary through mechanisms such as argmax flows \citep{hoogeboom2021argmaxflowsmultinomialdiffusion}, and Diffusion-LM demonstrates an effective path for controllable text generation by imposing gradient constraints in continuous latent spaces \citep{li2022diffusionlmimprovescontrollabletext}. From the perspectives of protocols and finite-state machines, coverage-guided fuzz testing emphasizes the criticality of "sequence legality and state coverage" \citep{meng2025aflnetyearslatercoverageguided,godefroid2017learnfuzzmachinelearninginput,she2019neuzzefficientfuzzingneural}, echoing the concept of "legality by construction" in discrete diffusion: preferentially adopting absorption/masking diffusion on discrete channels, supplemented by type-aware conditioning and sampling constraints, to avoid semantic invalidity and marginal distortion caused by post hoc thresholding.

View File

@@ -624,18 +624,6 @@ numpages = {12},
keywords = {burstiness, energy plot, generator, internet, modeling, structural model, traffic, wavelets}
}
@inproceedings{NEURIPS2020_4c5bcfec,
author = {Ho, Jonathan and Jain, Ajay and Abbeel, Pieter},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin},
pages = {6840--6851},
publisher = {Curran Associates, Inc.},
title = {Denoising Diffusion Probabilistic Models},
url = {https://proceedings.neurips.cc/paper_files/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf},
volume = {33},
year = {2020}
}
@inproceedings{nie2023patchtst,
title={A Time Series is Worth 64 Words: Long-term Forecasting with Transformers},
author={Nie, Yuqi and Nguyen, Nam H. and Sinthong, Phanwadee and Kalagnanam, Jayant},