Files
internal-docs/LaTeX2e+Proceedings+Templates+download/references.bib
2026-04-21 00:20:06 +08:00

634 lines
46 KiB
BibTeX
Raw Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
@inproceedings{vaswani2017attention,
author = {Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, \L ukasz and Polosukhin, Illia},
booktitle = {Advances in Neural Information Processing Systems},
editor = {I. Guyon and U. Von Luxburg and S. Bengio and H. Wallach and R. Fergus and S. Vishwanathan and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {Attention is All you Need},
url = {https://proceedings.neurips.cc/paper_files/paper/2017/file/3f5ee243547dee91fbd053c1c4a845aa-Paper.pdf},
volume = {30},
year = {2017}
}
@inproceedings{ho2020denoising,
author = {Ho, Jonathan and Jain, Ajay and Abbeel, Pieter},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M.F. Balcan and H. Lin},
pages = {6840--6851},
publisher = {Curran Associates, Inc.},
title = {Denoising Diffusion Probabilistic Models},
url = {https://proceedings.neurips.cc/paper_files/paper/2020/file/4c5bcfec8584af0d967f1ab10179ca4b-Paper.pdf},
volume = {33},
year = {2020}
}
@inproceedings{austin2021structured,
author = {Austin, Jacob and Johnson, Daniel D. and Ho, Jonathan and Tarlow, Daniel and van den Berg, Rianne},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {17981--17993},
publisher = {Curran Associates, Inc.},
title = {Structured Denoising Diffusion Models in Discrete State-Spaces},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/958c530554f78bcd8e97125b70e6973d-Paper.pdf},
volume = {34},
year = {2021}
}
@inproceedings{shi2024simplified,
author = {Shi, Jiaxin and Han, Kehang and Wang, Zhe and Doucet, Arnaud and Titsias, Michalis},
booktitle = {Advances in Neural Information Processing Systems},
doi = {10.52202/079017-3277},
editor = {A. Globerson and L. Mackey and D. Belgrave and A. Fan and U. Paquet and J. Tomczak and C. Zhang},
pages = {103131--103167},
publisher = {Curran Associates, Inc.},
title = {Simplified and Generalized Masked Diffusion for Discrete Data},
url = {https://proceedings.neurips.cc/paper_files/paper/2024/file/bad233b9849f019aead5e5cc60cef70f-Paper-Conference.pdf},
volume = {37},
year = {2024}
}
@InProceedings{hang2023efficient,
author = {Hang, Tiankai and Gu, Shuyang and Li, Chen and Bao, Jianmin and Chen, Dong and Hu, Han and Geng, Xin and Guo, Baining},
title = {Efficient Diffusion Training via Min-SNR Weighting Strategy},
booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {October},
year = {2023},
pages = {7441-7451}
}
@inproceedings{kollovieh2023tsdiff,
author = {Kollovieh, Marcel and Ansari, Abdul Fatir and Bohlke-Schneider, Michael and Zschiegner, Jasper and Wang, Hao and Wang, Yuyang (Bernie)},
booktitle = {Advances in Neural Information Processing Systems},
editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine},
pages = {28341--28364},
publisher = {Curran Associates, Inc.},
title = {Predict, Refine, Synthesize: Self-Guiding Diffusion Models for Probabilistic Time Series Forecasting},
url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/5a1a10c2c2c9b9af1514687bc24b8f3d-Paper-Conference.pdf},
volume = {36},
year = {2023}
}
@article{sikder2023transfusion,
title = {TransFusion: Generating long, high fidelity time series using diffusion models with transformers},
journal = {Machine Learning with Applications},
volume = {20},
pages = {100652},
year = {2025},
issn = {2666-8270},
doi = {https://doi.org/10.1016/j.mlwa.2025.100652},
url = {https://www.sciencedirect.com/science/article/pii/S2666827025000350},
author = {Md Fahim Sikder and Resmi Ramachandranpillai and Fredrik Heintz},
keywords = {Time series generation, Generative models, Diffusion models, Synthetic data, Long-sequenced data},
abstract = {The generation of high-quality, long-sequenced time-series data is essential due to its wide range of applications. In the past, standalone Recurrent and Convolutional Neural Network-based Generative Adversarial Networks (GAN) were used to synthesize time-series data. However, they are inadequate for generating long sequences of time-series data due to limitations in the architecture, such as difficulties in capturing long-range dependencies, limited temporal coherence, and scalability challenges. Furthermore, GANs are well known for their training instability and mode collapse problem. To address this, we propose TransFusion, a diffusion, and transformers-based generative model to generate high-quality long-sequence time-series data. We extended the sequence length to 384, surpassing the previous limit, and successfully generated high-quality synthetic data. Also, we introduce two evaluation metrics to evaluate the quality of the synthetic data as well as its predictive characteristics. TransFusion is evaluated using a diverse set of visual and empirical metrics, consistently outperforming the previous state-of-the-art by a significant margin.}
}
@misc{song2021score,
title={Score-Based Generative Modeling through Stochastic Differential Equations},
author={Yang Song and Jascha Sohl-Dickstein and Diederik P. Kingma and Abhishek Kumar and Stefano Ermon and Ben Poole},
year={2021},
eprint={2011.13456},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2011.13456},
}
@misc{shi2025tabdiff,
title={TabDiff: a Mixed-type Diffusion Model for Tabular Data Generation},
author={Juntong Shi and Minkai Xu and Harper Hua and Hengrui Zhang and Stefano Ermon and Jure Leskovec},
year={2025},
eprint={2410.20626},
archivePrefix={arXiv},
primaryClass={cs.LG},
url={https://arxiv.org/abs/2410.20626},
}
@inproceedings{yuan2025ctu,
author = {Yuan, Yusong and Sha, Yun and Zhao, Haidong},
title = {CTU-DDPM: Generating Industrial Control System Time-Series Data with a CNN-Transformer Hybrid Diffusion Model},
year = {2025},
isbn = {9798400721007},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3776759.3776845},
doi = {10.1145/3776759.3776845},
abstract = {The security of Industrial Control Systems (ICS) is of paramount importance to national security. Anomaly detection, as a critical security measure, can effectively identify attack behaviors targeting ICS. However, the performance of anomaly detection methods is highly dependent on high-quality datasets, and real anomalous data, in particular, is often difficult to obtain due to its sensitive security implications. To address this challenge, this paper proposes CTU-DDPM, a method for generating multivariate time series data based on Diffusion Models. Our aim is to Generate high-quality industrial control time series data to enhance the performance of anomaly detection methods. This research constructs a diffusion model that fuses a Convolutional Neural Network (CNN) and a Transformer architecture. This hybrid approach is designed to achieve more precise and realistic data generation in complex industrial time series, thereby effectively compensating for the scarcity of authentic anomalous data and providing crucial data support for ICS security.},
booktitle = {Proceedings of the 2025 International Symposium on Artificial Intelligence and Computational Social Sciences},
pages = {547552},
numpages = {6},
keywords = {Convolutional Neural Network, Diffusion Model, Generation, Industrial Control Systems, Time Series Data, Transformer},
location = {
},
series = {AICSS '25}
}
@misc{sha2026ddpm,
title={DDPM Fusing Mamba and Adaptive Attention: An Augmentation Method for Industrial Control Systems Anomaly Data},
author={Sha, Yun and Yuan, Yusong and Wu, Yonghao and Zhao, Haidong},
year={2026},
month={jan},
note={SSRN Electronic Journal},
eprint={6055903},
archivePrefix={SSRN},
doi={10.2139/ssrn.6055903},
url={https://papers.ssrn.com/sol3/papers.cfm?abstract_id=6055903}
}
@techreport{nist2023sp80082,
title={Guide to Operational Technology (OT) Security},
author={{National Institute of Standards and Technology}},
institution={NIST},
type={Special Publication},
number={800-82 Rev. 3},
year={2023},
month={sep},
doi={10.6028/NIST.SP.800-82r3},
url={https://csrc.nist.gov/pubs/sp/800/82/r3/final}
}
@article{10.1007/s10844-022-00753-1,
title={Machine learning in industrial control system (ICS) security: current landscape, opportunities and challenges},
author={Koay, Abigail MY and Ko, Ryan K L and Hettema, Hinne and Radke, Kenneth},
journal={Journal of Intelligent Information Systems},
volume={60},
number={2},
pages={377--405},
year={2023},
publisher={Springer}
}
@Article{Nankya2023-gp,
AUTHOR = {Nankya, Mary and Chataut, Robin and Akl, Robert},
TITLE = {Securing Industrial Control Systems: Components, Cyber Threats, and Machine Learning-Driven Defense Strategies},
JOURNAL = {Sensors},
VOLUME = {23},
YEAR = {2023},
NUMBER = {21},
ARTICLE-NUMBER = {8840},
URL = {https://www.mdpi.com/1424-8220/23/21/8840},
PubMedID = {37960539},
ISSN = {1424-8220},
ABSTRACT = {Industrial Control Systems (ICS), which include Supervisory Control and Data Acquisition (SCADA) systems, Distributed Control Systems (DCS), and Programmable Logic Controllers (PLC), play a crucial role in managing and regulating industrial processes. However, ensuring the security of these systems is of utmost importance due to the potentially severe consequences of cyber attacks. This article presents an overview of ICS security, covering its components, protocols, industrial applications, and performance aspects. It also highlights the typical threats and vulnerabilities faced by these systems. Moreover, the article identifies key factors that influence the design decisions concerning control, communication, reliability, and redundancy properties of ICS, as these are critical in determining the security needs of the system. The article outlines existing security countermeasures, including network segmentation, access control, patch management, and security monitoring. Furthermore, the article explores the integration of machine learning techniques to enhance the cybersecurity of ICS. Machine learning offers several advantages, such as anomaly detection, threat intelligence analysis, and predictive maintenance. However, combining machine learning with other security measures is essential to establish a comprehensive defense strategy for ICS. The article also addresses the challenges associated with existing measures and provides recommendations for improving ICS security. This paper becomes a valuable reference for researchers aiming to make meaningful contributions within the constantly evolving ICS domain by providing an in-depth examination of the present state, challenges, and potential future advancements.},
DOI = {10.3390/s23218840}
}
@misc{shin,
title = {HAI Security Dataset},
url = {https://www.kaggle.com/dsv/5821622},
doi = {10.34740/kaggle/dsv/5821622},
publisher = {Kaggle},
author = {Shin, Hyeok-Ki and Lee, Woomyo and Choi, Seungoh and Yun, Jeong-Han and Min, Byung Gil and Kim, HyoungChun},
year = {2023}
}
@Article{info16100910,
AUTHOR = {Ali, Jokha and Ali, Saqib and Al Balushi, Taiseera and Nadir, Zia},
TITLE = {Intrusion Detection in Industrial Control Systems Using Transfer Learning Guided by Reinforcement Learning},
JOURNAL = {Information},
VOLUME = {16},
YEAR = {2025},
NUMBER = {10},
ARTICLE-NUMBER = {910},
URL = {https://www.mdpi.com/2078-2489/16/10/910},
ISSN = {2078-2489},
ABSTRACT = {Securing Industrial Control Systems (ICSs) is critical, but it is made challenging by the constant evolution of cyber threats and the scarcity of labeled attack data in these specialized environments. Standard intrusion detection systems (IDSs) often fail to adapt when transferred to new networks with limited data. To address this, this paper introduces an adaptive intrusion detection framework that combines a hybrid Convolutional Neural Network and Long Short-Term Memory (CNN-LSTM) model with a novel transfer learning strategy. We employ a Reinforcement Learning (RL) agent to intelligently guide the fine-tuning process, which allows the IDS to dynamically adjust its parameters such as layer freezing and learning rates in real-time based on performance feedback. We evaluated our system in a realistic data-scarce scenario using only 50 labeled training samples. Our RL-Guided model achieved a final F1-score of 0.9825, significantly outperforming a standard neural fine-tuning model (0.861) and a target baseline model (0.759). Analysis of the RL agents behavior confirmed that it learned a balanced and effective policy for adapting the model to the target domain. We conclude that the proposed RL-guided approach creates a highly accurate and adaptive IDS that overcomes the limitations of static transfer learning methods. This dynamic fine-tuning strategy is a powerful and promising direction for building resilient cybersecurity defenses for critical infrastructure.},
DOI = {10.3390/info16100910}
}
@InProceedings{pmlr-v202-kotelnikov23a,
title = {{T}ab{DDPM}: Modelling Tabular Data with Diffusion Models},
author = {Kotelnikov, Akim and Baranchuk, Dmitry and Rubachev, Ivan and Babenko, Artem},
booktitle = {Proceedings of the 40th International Conference on Machine Learning},
pages = {17564--17579},
year = {2023},
editor = {Krause, Andreas and Brunskill, Emma and Cho, Kyunghyun and Engelhardt, Barbara and Sabato, Sivan and Scarlett, Jonathan},
volume = {202},
series = {Proceedings of Machine Learning Research},
month = {23--29 Jul},
publisher = {PMLR},
pdf = {https://proceedings.mlr.press/v202/kotelnikov23a/kotelnikov23a.pdf},
url = {https://proceedings.mlr.press/v202/kotelnikov23a.html},
abstract = {Denoising diffusion probabilistic models are becoming the leading generative modeling paradigm for many important data modalities. Being the most prevalent in the computer vision community, diffusion models have recently gained some attention in other domains, including speech, NLP, and graph-like data. In this work, we investigate if the framework of diffusion models can be advantageous for general tabular problems, where data points are typically represented by vectors of heterogeneous features. The inherent heterogeneity of tabular data makes it quite challenging for accurate modeling since the individual features can be of a completely different nature, i.e., some of them can be continuous and some can be discrete. To address such data types, we introduce TabDDPM — a diffusion model that can be universally applied to any tabular dataset and handles any feature types. We extensively evaluate TabDDPM on a wide set of benchmarks and demonstrate its superiority over existing GAN/VAE alternatives, which is consistent with the advantage of diffusion models in other fields.}
}
@InProceedings{rasul2021autoregressivedenoisingdiffusionmodels,
title = {Autoregressive Denoising Diffusion Models for Multivariate Probabilistic Time Series Forecasting},
author = {Rasul, Kashif and Seward, Calvin and Schuster, Ingmar and Vollgraf, Roland},
booktitle = {Proceedings of the 38th International Conference on Machine Learning},
pages = {8857--8868},
year = {2021},
editor = {Meila, Marina and Zhang, Tong},
volume = {139},
series = {Proceedings of Machine Learning Research},
month = {18--24 Jul},
publisher = {PMLR},
pdf = {http://proceedings.mlr.press/v139/rasul21a/rasul21a.pdf},
url = {https://proceedings.mlr.press/v139/rasul21a.html},
abstract = {In this work, we propose TimeGrad, an autoregressive model for multivariate probabilistic time series forecasting which samples from the data distribution at each time step by estimating its gradient. To this end, we use diffusion probabilistic models, a class of latent variable models closely connected to score matching and energy-based methods. Our model learns gradients by optimizing a variational bound on the data likelihood and at inference time converts white noise into a sample of the distribution of interest through a Markov chain using Langevin sampling. We demonstrate experimentally that the proposed autoregressive denoising diffusion model is the new state-of-the-art multivariate probabilistic forecasting method on real-world data sets with thousands of correlated dimensions. We hope that this method is a useful tool for practitioners and lays the foundation for future research in this area.}
}
@article{jiang2023netdiffusionnetworkdataaugmentation,
author = {Jiang, Xi and Liu, Shinan and Gember-Jacobson, Aaron and Bhagoji, Arjun Nitin and Schmitt, Paul and Bronzino, Francesco and Feamster, Nick},
title = {NetDiffusion: Network Data Augmentation Through Protocol-Constrained Traffic Generation},
year = {2024},
issue_date = {March 2024},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {8},
number = {1},
url = {https://doi.org/10.1145/3639037},
doi = {10.1145/3639037},
abstract = {Datasets of labeled network traces are essential for a multitude of machine learning (ML) tasks in networking, yet their availability is hindered by privacy and maintenance concerns, such as data staleness. To overcome this limitation, synthetic network traces can often augment existing datasets. Unfortunately, current synthetic trace generation methods, which typically produce only aggregated flow statistics or a few selected packet attributes, do not always suffice, especially when model training relies on having features that are only available from packet traces. This shortfall manifests in both insufficient statistical resemblance to real traces and suboptimal performance on ML tasks when employed for data augmentation. In this paper, we apply diffusion models to generate high-resolution synthetic network traffic traces. We present NetDiffusion1, a tool that uses a finely-tuned, controlled variant of a Stable Diffusion model to generate synthetic network traffic that is high fidelity and conforms to protocol specifications. Our evaluation demonstrates that packet captures generated from NetDiffusion can achieve higher statistical similarity to real data and improved ML model performance than current state-of-the-art approaches (e.g., GAN-based approaches). Furthermore, our synthetic traces are compatible with common network analysis tools and support a myriad of network tasks, suggesting that NetDiffusion can serve a broader spectrum of network analysis and testing tasks, extending beyond ML-centric applications.},
journal = {Proc. ACM Meas. Anal. Comput. Syst.},
month = feb,
articleno = {11},
numpages = {32},
keywords = {diffusion model, network traffic, synthesis}
}
@article{Ring_2019,
title = {Flow-based network traffic generation using Generative Adversarial Networks},
journal = {Computers \& Security},
volume = {82},
pages = {156-172},
year = {2019},
issn = {0167-4048},
doi = {https://doi.org/10.1016/j.cose.2018.12.012},
url = {https://www.sciencedirect.com/science/article/pii/S0167404818308393},
author = {Markus Ring and Daniel Schlör and Dieter Landes and Andreas Hotho},
keywords = {GANs, TTUR WGAN-GP, NetFlow, Generation, IDS},
abstract = {Flow-based data sets are necessary for evaluating network-based intrusion detection systems (NIDS). In this work, we propose a novel methodology for generating realistic flow-based network traffic. Our approach is based on Generative Adversarial Networks (GANs) which achieve good results for image generation. A major challenge lies in the fact that GANs can only process continuous attributes. However, flow-based data inevitably contain categorical attributes such as IP addresses or port numbers. Therefore, we propose three different preprocessing approaches for flow-based data in order to transform them into continuous values. Further, we present a new method for evaluating the generated flow-based network traffic which uses domain knowledge to define quality tests. We use the three approaches for generating flow-based network traffic based on the CIDDS-001 data set. Experiments indicate that two of the three approaches are able to generate high quality data.}
}
@inproceedings{10.1145/3544216.3544251,
author = {Yin, Yucheng and Lin, Zinan and Jin, Minhao and Fanti, Giulia and Sekar, Vyas},
title = {Practical GAN-based synthetic IP header trace generation using NetShare},
year = {2022},
isbn = {9781450394208},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3544216.3544251},
doi = {10.1145/3544216.3544251},
abstract = {We explore the feasibility of using Generative Adversarial Networks (GANs) to automatically learn generative models to generate synthetic packet- and flow header traces for networking tasks (e.g., telemetry, anomaly detection, provisioning). We identify key fidelity, scalability, and privacy challenges and tradeoffs in existing GAN-based approaches. By synthesizing domain-specific insights with recent advances in machine learning and privacy, we identify design choices to tackle these challenges. Building on these insights, we develop an end-to-end framework, NetShare. We evaluate NetShare on six diverse packet header traces and find that: (1) across all distributional metrics and traces, it achieves 46\% more accuracy than baselines and (2) it meets users' requirements of downstream tasks in evaluating accuracy and rank ordering of candidate approaches.},
booktitle = {Proceedings of the ACM SIGCOMM 2022 Conference},
pages = {458472},
numpages = {15},
keywords = {synthetic data generation, privacy, network packets, network flows, generative adversarial networks},
location = {Amsterdam, Netherlands},
series = {SIGCOMM '22}
}
@inproceedings{Lin_2020,
author = {Lin, Zinan and Jain, Alankar and Wang, Chen and Fanti, Giulia and Sekar, Vyas},
title = {Using GANs for Sharing Networked Time Series Data: Challenges, Initial Promise, and Open Questions},
year = {2020},
isbn = {9781450381383},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3419394.3423643},
doi = {10.1145/3419394.3423643},
abstract = {Limited data access is a longstanding barrier to data-driven research and development in the networked systems community. In this work, we explore if and how generative adversarial networks (GANs) can be used to incentivize data sharing by enabling a generic framework for sharing synthetic datasets with minimal expert knowledge. As a specific target, our focus in this paper is on time series datasets with metadata (e.g., packet loss rate measurements with corresponding ISPs). We identify key challenges of existing GAN approaches for such workloads with respect to fidelity (e.g., long-term dependencies, complex multidimensional relationships, mode collapse) and privacy (i.e., existing guarantees are poorly understood and can sacrifice fidelity). To improve fidelity, we design a custom workflow called DoppelGANger (DG) and demonstrate that across diverse real-world datasets (e.g., bandwidth measurements, cluster requests, web sessions) and use cases (e.g., structural characterization, predictive modeling, algorithm comparison), DG achieves up to 43\% better fidelity than baseline models. Although we do not resolve the privacy problem in this work, we identify fundamental challenges with both classical notions of privacy and recent advances to improve the privacy properties of GANs, and suggest a potential roadmap for addressing these challenges. By shedding light on the promise and challenges, we hope our work can rekindle the conversation on workflows for data sharing.},
booktitle = {Proceedings of the ACM Internet Measurement Conference},
pages = {464483},
numpages = {20},
keywords = {generative adversarial networks, privacy, synthetic data generation, time series},
location = {Virtual Event, USA},
series = {IMC '20}
}
@INPROCEEDINGS{7469060,
author={Mathur, Aditya P. and Tippenhauer, Nils Ole},
booktitle={2016 International Workshop on Cyber-physical Systems for Smart Water Networks (CySWater)},
title={SWaT: a water treatment testbed for research and training on ICS security},
year={2016},
volume={},
number={},
pages={31-36},
keywords={Sensors;Actuators;Feeds;Process control;Chemicals;Chemical sensors;Security;Cyber Physical Systems;Industrial Control Systems;Cyber Attacks;Cyber Defense;Water Testbed},
doi={10.1109/CySWater.2016.7469060}
}
@inproceedings{10.1145/3055366.3055375,
author = {Ahmed, Chuadhry Mujeeb and Palleti, Venkata Reddy and Mathur, Aditya P.},
title = {WADI: a water distribution testbed for research in the design of secure cyber physical systems},
year = {2017},
isbn = {9781450349758},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3055366.3055375},
doi = {10.1145/3055366.3055375},
abstract = {The architecture of a water distribution testbed (WADI), and on-going research in the design of secure water distribution system is presented. WADI consists of three stages controlled by Programmable Logic Controllers (PLCs) and two stages controlled via Remote Terminal Units (RTUs). Each PLC and RTU uses sensors to estimate the system state and the actuators to effect control. WADI is currently used to (a) conduct security analysis for water distribution networks, (b) experimentally assess detection mechanisms for potential cyber and physical attacks, and (c) understand how the impact of an attack on one CPS could cascade to other connected CPSs. The cascading effects of attacks can be studied in WADI through its connection to two other testbeds, namely for water treatment and power generation and distribution.},
booktitle = {Proceedings of the 3rd International Workshop on Cyber-Physical Systems for Smart Water Networks},
pages = {2528},
numpages = {4},
keywords = {attack detection, cyber physical systems, cyber security, industrial control systems, water distribution testbed},
location = {Pittsburgh, Pennsylvania},
series = {CySWATER '17}
}
@inproceedings{tashiro2021csdiconditionalscorebaseddiffusion,
author = {Tashiro, Yusuke and Song, Jiaming and Song, Yang and Ermon, Stefano},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {24804--24816},
publisher = {Curran Associates, Inc.},
title = {CSDI: Conditional Score-based Diffusion Models for Probabilistic Time Series Imputation},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/cfe8504bda37b575c70ee1a8276f3486-Paper.pdf},
volume = {34},
year = {2021}
}
@inproceedings{wen2024diffstgprobabilisticspatiotemporalgraph,
author = {Wen, Haomin and Lin, Youfang and Xia, Yutong and Wan, Huaiyu and Wen, Qingsong and Zimmermann, Roger and Liang, Yuxuan},
title = {DiffSTG: Probabilistic Spatio-Temporal Graph Forecasting with Denoising Diffusion Models},
year = {2023},
isbn = {9798400701689},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3589132.3625614},
doi = {10.1145/3589132.3625614},
abstract = {Spatio-temporal graph neural networks (STGNN) have emerged as the dominant model for spatio-temporal graph (STG) forecasting. Despite their success, they fail to model intrinsic uncertainties within STG data, which cripples their practicality in downstream tasks for decision-making. To this end, this paper focuses on probabilistic STG forecasting, which is challenging due to the difficulty in modeling uncertainties and complex ST dependencies. In this study, we present the first attempt to generalize the popular de-noising diffusion probabilistic models to STGs, leading to a novel non-autoregressive framework called DiffSTG, along with the first denoising network UGnet for STG in the framework. Our approach combines the spatio-temporal learning capabilities of STGNNs with the uncertainty measurements of diffusion models. Extensive experiments validate that DiffSTG reduces the Continuous Ranked Probability Score (CRPS) by 4\%-14\%, and Root Mean Squared Error (RMSE) by 2\%-7\% over existing methods on three real-world datasets.},
booktitle = {Proceedings of the 31st ACM International Conference on Advances in Geographic Information Systems},
articleno = {60},
numpages = {12},
keywords = {spatio-temporal graph forecasting, probabilistic forecasting, diffusion model},
location = {Hamburg, Germany},
series = {SIGSPATIAL '23}
}
@INPROCEEDINGS{liu2023pristiconditionaldiffusionframework,
author={Liu, Mingzhe and Huang, Han and Feng, Hao and Sun, Leilei and Du, Bowen and Fu, Yanjie},
booktitle={2023 IEEE 39th International Conference on Data Engineering (ICDE)},
title={PriSTI: A Conditional Diffusion Framework for Spatiotemporal Imputation},
year={2023},
volume={},
number={},
pages={1927-1939},
keywords={Correlation;Scalability;Transforms;Predictive models;Feature extraction;Propagation losses;Probabilistic logic;Spatiotemporal Imputation;Diffusion Model;Spatiotemporal Dependency Learning},
doi={10.1109/ICDE55515.2023.00150}}
@misc{kong2021diffwaveversatilediffusionmodel,
title={DiffWave: A Versatile Diffusion Model for Audio Synthesis},
author={Zhifeng Kong and Wei Ping and Jiaji Huang and Kexin Zhao and Bryan Catanzaro},
year={2021},
eprint={2009.09761},
archivePrefix={arXiv},
primaryClass={eess.AS},
url={https://arxiv.org/abs/2009.09761},
}
@ARTICLE{11087622,
author={Liu, Xiaosi and Xu, Xiaowen and Liu, Zhidan and Li, Zhenjiang and Wu, Kaishun},
journal={IEEE Transactions on Mobile Computing},
title={Spatio-Temporal Diffusion Model for Cellular Traffic Generation},
year={2026},
volume={25},
number={1},
pages={257-271},
keywords={Base stations;Diffusion models;Data models;Uncertainty;Predictive models;Generative adversarial networks;Knowledge graphs;Mobile computing;Telecommunication traffic;Semantics;Cellular traffic;data generation;diffusion model;spatio-temporal graph},
doi={10.1109/TMC.2025.3591183}
}
@inproceedings{hoogeboom2021argmaxflowsmultinomialdiffusion,
author = {Hoogeboom, Emiel and Nielsen, Didrik and Jaini, Priyank and Forr\'{e}, Patrick and Welling, Max},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {12454--12465},
publisher = {Curran Associates, Inc.},
title = {Argmax Flows and Multinomial Diffusion: Learning Categorical Distributions},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/67d96d458abdef21792e6d8e590244e7-Paper.pdf},
volume = {34},
year = {2021}
}
@inproceedings{li2022diffusionlmimprovescontrollabletext,
author = {Li, Xiang and Thickstun, John and Gulrajani, Ishaan and Liang, Percy S and Hashimoto, Tatsunori B},
booktitle = {Advances in Neural Information Processing Systems},
editor = {S. Koyejo and S. Mohamed and A. Agarwal and D. Belgrave and K. Cho and A. Oh},
pages = {4328--4343},
publisher = {Curran Associates, Inc.},
title = {Diffusion-LM Improves Controllable Text Generation},
url = {https://proceedings.neurips.cc/paper_files/paper/2022/file/1be5bc25d50895ee656b8c2d9eb89d6a-Paper-Conference.pdf},
volume = {35},
year = {2022}
}
@ARTICLE{meng2025aflnetyearslatercoverageguided,
author={Meng, Ruijie and Pham, Van-Thuan and Böhme, Marcel and Roychoudhury, Abhik},
journal={IEEE Transactions on Software Engineering},
title={AFLNet Five Years Later: On Coverage-Guided Protocol Fuzzing},
year={2025},
volume={51},
number={4},
pages={960-974},
keywords={Protocols;Servers;Fuzzing;Codes;Security;Data models;Source coding;Computer bugs;Software systems;Reliability;Greybox fuzzing;network protocol testing;stateful fuzzing},
doi={10.1109/TSE.2025.3535925}}
@INPROCEEDINGS{godefroid2017learnfuzzmachinelearninginput,
author={Godefroid, Patrice and Peleg, Hila and Singh, Rishabh},
booktitle={2017 32nd IEEE/ACM International Conference on Automated Software Engineering (ASE)},
title={Learn\&Fuzz: Machine learning for input fuzzing},
year={2017},
volume={},
number={},
pages={50-59},
keywords={Portable document format;Grammar;Training;Probability distribution;Recurrent neural networks;Fuzzing;Deep Learning;Grammar-based Fuzzing;Grammar Learning},
doi={10.1109/ASE.2017.8115618}}
@INPROCEEDINGS{she2019neuzzefficientfuzzingneural,
author={She, Dongdong and Pei, Kexin and Epstein, Dave and Yang, Junfeng and Ray, Baishakhi and Jana, Suman},
booktitle={2019 IEEE Symposium on Security and Privacy (SP)},
title={NEUZZ: Efficient Fuzzing with Neural Program Smoothing},
year={2019},
volume={},
number={},
pages={803-817},
keywords={Optimization;Fuzzing;Computer bugs;Artificial neural networks;Smoothing methods;Evolutionary computation;fuzzing;-neural-program-smoothing;-gradient-guided-mutation},
doi={10.1109/SP.2019.00052}}
@inproceedings{dai2019transformerxlattentivelanguagemodels,
title = "Transformer-{XL}: Attentive Language Models beyond a Fixed-Length Context",
author = "Dai, Zihang and
Yang, Zhilin and
Yang, Yiming and
Carbonell, Jaime and
Le, Quoc and
Salakhutdinov, Ruslan",
editor = "Korhonen, Anna and
Traum, David and
M{\`a}rquez, Llu{\'i}s",
booktitle = "Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2019",
address = "Florence, Italy",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/P19-1285/",
doi = "10.18653/v1/P19-1285",
pages = "2978--2988",
abstract = "Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the context fragmentation problem. As a result, Transformer-XL learns dependency that is 80{\%} longer than RNNs and 450{\%} longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+ times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably coherent, novel text articles with thousands of tokens. Our code, pretrained models, and hyperparameters are available in both Tensorflow and PyTorch."
}
@article{zhou2021informerefficienttransformerlong,
title={Informer: Beyond Efficient Transformer for Long Sequence Time-Series Forecasting},
volume={35}, url={https://ojs.aaai.org/index.php/AAAI/article/view/17325},
DOI={10.1609/aaai.v35i12.17325},
abstractNote={Many real-world applications require the prediction of long sequence time-series, such as electricity consumption planning. Long sequence time-series forecasting (LSTF) demands a high prediction capacity of the model, which is the ability to capture precise long-range dependency coupling between output and input efficiently. Recent studies have shown the potential of Transformer to increase the prediction capacity. However, there are several severe issues with Transformer that prevent it from being directly applicable to LSTF, including quadratic time complexity, high memory usage, and inherent limitation of the encoder-decoder architecture. To address these issues, we design an efficient transformer-based model for LSTF, named Informer, with three distinctive characteristics: (i) a ProbSparse self-attention mechanism, which achieves O(L log L) in time complexity and memory usage, and has comparable performance on sequences dependency alignment. (ii) the self-attention distilling highlights dominating attention by halving cascading layer input, and efficiently handles extreme long input sequences. (iii) the generative style decoder, while conceptually simple, predicts the long time-series sequences at one forward operation rather than a step-by-step way, which drastically improves the inference speed of long-sequence predictions. Extensive experiments on four large-scale datasets demonstrate that Informer significantly outperforms existing methods and provides a new solution to the LSTF problem.},
number={12},
journal={Proceedings of the AAAI Conference on Artificial Intelligence},
author={Zhou, Haoyi and Zhang, Shanghang and Peng, Jieqi and Zhang, Shuai and Li, Jianxin and Xiong, Hui and Zhang, Wancai},
year={2021},
month={May},
pages={11106-11115}
}
@inproceedings{wu2022autoformerdecompositiontransformersautocorrelation,
author = {Wu, Haixu and Xu, Jiehui and Wang, Jianmin and Long, Mingsheng},
booktitle = {Advances in Neural Information Processing Systems},
editor = {M. Ranzato and A. Beygelzimer and Y. Dauphin and P.S. Liang and J. Wortman Vaughan},
pages = {22419--22430},
publisher = {Curran Associates, Inc.},
title = {Autoformer: Decomposition Transformers with Auto-Correlation for Long-Term Series Forecasting},
url = {https://proceedings.neurips.cc/paper_files/paper/2021/file/bcc0d400288793e8bdcd7c19a8ac0c2b-Paper.pdf},
volume = {34},
year = {2021}
}
@InProceedings{zhou2022fedformerfrequencyenhanceddecomposed,
title = {{FED}former: Frequency Enhanced Decomposed Transformer for Long-term Series Forecasting},
author = {Zhou, Tian and Ma, Ziqing and Wen, Qingsong and Wang, Xue and Sun, Liang and Jin, Rong},
booktitle = {Proceedings of the 39th International Conference on Machine Learning},
pages = {27268--27286},
year = {2022},
editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},
volume = {162},
series = {Proceedings of Machine Learning Research},
month = {17--23 Jul},
publisher = {PMLR},
pdf = {https://proceedings.mlr.press/v162/zhou22g/zhou22g.pdf},
url = {https://proceedings.mlr.press/v162/zhou22g.html},
abstract = {Long-term time series forecasting is challenging since prediction accuracy tends to decrease dramatically with the increasing horizon. Although Transformer-based methods have significantly improved state-of-the-art results for long-term forecasting, they are not only computationally expensive but more importantly, are unable to capture the global view of time series (e.g. overall trend). To address these problems, we propose to combine Transformer with the seasonal-trend decomposition method, in which the decomposition method captures the global profile of time series while Transformers capture more detailed structures. To further enhance the performance of Transformer for long-term prediction, we exploit the fact that most time series tend to have a sparse representation in a well-known basis such as Fourier transform, and develop a frequency enhanced Transformer. Besides being more effective, the proposed method, termed as Frequency Enhanced Decomposed Transformer (FEDformer), is more efficient than standard Transformer with a linear complexity to the sequence length. Our empirical studies with six benchmark datasets show that compared with state-of-the-art methods, Fedformer can reduce prediction error by 14.8% and 22.6% for multivariate and univariate time series, respectively. Code is publicly available at https://github.com/MAZiqing/FEDformer.}
}
@article{2023,
title={A Note on Extremal Sombor Indices of Trees with a Given Degree Sequence},
volume={90},
ISSN={0340-6253},
url={http://dx.doi.org/10.46793/match.90-1.197D},
DOI={10.46793/match.90-1.197d},
number={1},
journal={Match Communications in Mathematical and in Computer Chemistry},
publisher={University Library in Kragujevac},
author={Damjanović, Ivan and Milošević, Marko and Stevanović, Dragan},
year={2023},
pages={197202}
}
@article{stenger2024survey,
title={Evaluation is key: a survey on evaluation measures for synthetic time series},
author={Stenger, Michael and Leppich, Robert and Foster, Ian T and Kounev, Samuel and Bauer, Andre},
journal={Journal of Big Data},
volume={11},
number={1},
pages={66},
year={2024},
publisher={Springer}
}
@ARTICLE{lin1991divergence,
author={Lin, J.},
journal={IEEE Transactions on Information Theory},
title={Divergence measures based on the Shannon entropy},
year={1991},
volume={37},
number={1},
pages={145-151},
keywords={Entropy;Probability distribution;Upper bound;Pattern analysis;Signal analysis;Signal processing;Pattern recognition;Taxonomy;Genetics;Computer science},
doi={10.1109/18.61115}}
@inproceedings{yoon2019timegan,
author = {Yoon, Jinsung and Jarrett, Daniel and van der Schaar, Mihaela},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Wallach and H. Larochelle and A. Beygelzimer and F. d\textquotesingle Alch\'{e}-Buc and E. Fox and R. Garnett},
pages = {},
publisher = {Curran Associates, Inc.},
title = {Time-series Generative Adversarial Networks},
url = {https://proceedings.neurips.cc/paper_files/paper/2019/file/c9efe5f26cd17ba6216bbe2a7d26d490-Paper.pdf},
volume = {32},
year = {2019}
}
@inproceedings{10.1145/3490354.3494393,
author = {Ni, Hao and Szpruch, Lukasz and Sabate-Vidales, Marc and Xiao, Baoren and Wiese, Magnus and Liao, Shujian},
title = {Sig-wasserstein GANs for time series generation},
year = {2022},
isbn = {9781450391481},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3490354.3494393},
doi = {10.1145/3490354.3494393},
abstract = {Synthetic data is an emerging technology that can significantly accelerate the development and deployment of AI machine learning pipelines. In this work, we develop high-fidelity time-series generators, the SigWGAN, by combining continuous-time stochastic models with the newly proposed signature W1 metric. The former are the Logsig-RNN models based on the stochastic differential equations, whereas the latter originates from the universal and principled mathematical features to characterize the measure induced by time series. SigWGAN allows turning computationally challenging GAN min-max problem into supervised learning while generating high fidelity samples. We validate the proposed model on both synthetic data generated by popular quantitative risk models and empirical financial data. Codes are available at https://github.com/SigCGANs/Sig-Wasserstein-GANs.git},
booktitle = {Proceedings of the Second ACM International Conference on AI in Finance},
articleno = {28},
numpages = {8},
keywords = {signatures, neural networks, generative modelling},
location = {Virtual Event},
series = {ICAIF '21}
}
@inproceedings{coletta2023constrained,
author = {Coletta, Andrea and Gopalakrishnan, Sriram and Borrajo, Daniel and Vyetrenko, Svitlana},
booktitle = {Advances in Neural Information Processing Systems},
editor = {A. Oh and T. Naumann and A. Globerson and K. Saenko and M. Hardt and S. Levine},
pages = {61048--61059},
publisher = {Curran Associates, Inc.},
title = {On the Constrained Time-Series Generation Problem},
url = {https://proceedings.neurips.cc/paper_files/paper/2023/file/bfb6a69c0d9e2bc596e1cd31f16fcdde-Paper-Conference.pdf},
volume = {36},
year = {2023}
}
@article{yang2001interlock,
title = {Automatic verification of safety interlock systems for industrial processes},
journal = {Journal of Loss Prevention in the Process Industries},
volume = {14},
number = {5},
pages = {379-386},
year = {2001},
issn = {0950-4230},
doi = {https://doi.org/10.1016/S0950-4230(01)00014-6},
url = {https://www.sciencedirect.com/science/article/pii/S0950423001000146},
author = {S.H. Yang and L.S. Tan and C.H. He},
keywords = {Safety interlock system, Symbolic model checking, Safety verification, Industrial processes},
abstract = {The safety interlock system (SIS) is one of the most important protective measurements in industrial processes that provide automatic actions to correct an abnormal plant event. This paper considers the use of formal techniques based on symbolic model checking and computation tree logic (CTL) in the specification to automatically verify the SIS for industrial processes. It addresses the problem of modelling industrial processes and presenting the SIS in CTL. It shows how symbolic model checking can be used efficiently in the verification of a SIS. A transferring system for a penicillin process is used as a case study.}
}
@article{10.1145/1151659.1159928,
author = {Vishwanath, Kashi Venkatesh and Vahdat, Amin},
title = {Realistic and responsive network traffic generation},
year = {2006},
issue_date = {October 2006},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
volume = {36},
number = {4},
issn = {0146-4833},
url = {https://doi.org/10.1145/1151659.1159928},
doi = {10.1145/1151659.1159928},
abstract = {This paper presents Swing, a closed-loop, network-responsive traffic generator that accurately captures the packet interactions of a range of applications using a simple structural model. Starting from observed traffic at a single point in the network, Swing automatically extracts distributions for user, application, and network behavior. It then generates live traffic corresponding to the underlying models in a network emulation environment running commodity network protocol stacks. We find that the generated traces are statistically similar to the original traces. Further, to the best of our knowledge, we are the first to reproduce burstiness in traffic across a range of timescales using a model applicable to a variety of network settings. An initial sensitivity analysis reveals the importance of capturing and recreating user, application, and network characteristics to accurately reproduce such burstiness. Finally, we explore Swing's ability to vary user characteristics, application properties, and wide-area network conditions to project traffic characteristics into alternate scenarios.},
journal = {SIGCOMM Comput. Commun. Rev.},
month = aug,
pages = {111122},
numpages = {12},
keywords = {burstiness, energy plot, generator, internet, modeling, structural model, traffic, wavelets}
}
@inproceedings{nie2023patchtst,
title={A Time Series is Worth 64 Words: Long-term Forecasting with Transformers},
author={Nie, Yuqi and Nguyen, Nam H. and Sinthong, Phanwadee and Kalagnanam, Jayant},
booktitle={International Conference on Learning Representations (ICLR)},
year={2023},
url={https://arxiv.org/abs/2211.14730}
}