Files
openmlsys-zh/mlsys.bib
Yao Fu 437d606771 Yaofu recsys (#93)
* Fix a citation error

* Fix figures #70

* Add sections

* Add details on recommender models

* Fix #94

* Fix #94
2022-03-17 22:54:48 +00:00

758 lines
34 KiB
BibTeX
Raw Permalink Blame History

This file contains ambiguous Unicode characters
This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.
@article{rosenblatt1958perceptron,
title={The perceptron: a probabilistic model for information storage and organization in the brain.},
author={Rosenblatt, Frank},
journal={Psychological Review},
volume={65},
number={6},
pages={386},
year={1958},
publisher={American Psychological Association}
}
@article{lecun1989backpropagation,
title={Backpropagation applied to handwritten zip code recognition},
author={LeCun, Yann and Boser, Bernhard and Denker, John S and Henderson, Donnie and Howard, Richard E and Hubbard, Wayne and Jackel, Lawrence D},
journal={Neural computation},
volume={1},
number={4},
pages={541--551},
year={1989},
publisher={MIT Press}
}
@inproceedings{krizhevsky2012imagenet,
title={Imagenet classification with deep convolutional neural networks},
author={Krizhevsky, Alex and Sutskever, Ilya and Hinton, Geoffrey E},
booktitle={Advances in Neural Information Processing Systems},
pages={1097--1105},
year={2012}
}
@inproceedings{he2016deep,
title={{Deep Residual Learning for Image Recognition}},
author={He, Kaiming and Zhang, Xiangyu and Ren, Shaoqing and Sun, Jian},
booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
year={2016}
}
@article{rumelhart1986learning,
title={Learning representations by back-propagating errors},
author={Rumelhart, David E and Hinton, Geoffrey E and Williams, Ronald J},
journal={Nature},
volume={323},
number={6088},
pages={533},
year={1986},
publisher={Nature Publishing Group}
}
@article{Hochreiter1997lstm,
author = {Hochreiter, Sepp and Hochreiter, S and Schmidhuber, J{\"{u}}rgen and Schmidhuber, J},
isbn = {08997667 (ISSN)},
issn = {0899-7667},
journal = {Neural Computation},
number = {8},
pages = {1735--80},
pmid = {9377276},
title = {{Long Short-Term Memory.}},
volume = {9},
year = {1997}
}
@inproceedings{vaswani2017attention,
title={Attention is all you need},
author={Vaswani, Ashish and Shazeer, Noam and Parmar, Niki and Uszkoreit, Jakob and Jones, Llion and Gomez, Aidan N and Kaiser, {\L}ukasz and Polosukhin, Illia},
booktitle={Advances in Neural Information Processing Systems},
pages={5998--6008},
year={2017}
}
@article{lecun2015deep,
title={Deep learning},
author={LeCun, Yann and Bengio, Yoshua and Hinton, Geoffrey},
journal={Nature},
volume={521},
number={7553},
pages={436},
year={2015},
publisher={Nature Publishing Group}
}
@inproceedings{KingmaAdam2014,
title = {{Adam}: A Method for Stochastic Optimization},
author = {Kingma, Diederik and Ba, Jimmy},
booktitle = {Proceedings of the International Conference on Learning Representations (ICLR)},
year = {2014}
}
@techreport{tieleman2012rmsprop,
title={Divide the gradient by a running average of its recent magnitude. COURSERA: Neural networks for machine learning},
author={Tieleman, T and Hinton, G},
year={2017},
institution={Technical Report}
}
@article{duchi2011adagrad,
title={Adaptive subgradient methods for online learning and stochastic optimization},
author={Duchi, John and Hazan, Elad and Singer, Yoram},
journal={Journal of Machine Learning Research (JMLR)},
volume={12},
number={Jul},
pages={2121--2159},
year={2011}
}
@inproceedings{meijer2006linq,
title={Linq: reconciling object, relations and xml in the. net framework},
author={Meijer, Erik and Beckman, Brian and Bierman, Gavin},
booktitle={Proceedings of the 2006 ACM SIGMOD international conference on Management of data},
pages={706--706},
year={2006}
}
@inproceedings{murray2013naiad,
title={Naiad: a timely dataflow system},
author={Murray, Derek G and McSherry, Frank and Isaacs, Rebecca and Isard, Michael and Barham, Paul and Abadi, Mart{\'\i}n},
booktitle={Proceedings of the Twenty-Fourth ACM Symposium on Operating Systems Principles},
pages={439--455},
year={2013}
}
@inproceedings{mnih2016asynchronous,
title={Asynchronous methods for deep reinforcement learning},
author={Mnih, Volodymyr and Badia, Adria Puigdomenech and Mirza, Mehdi and Graves, Alex and Lillicrap, Timothy and Harley, Tim and Silver, David and Kavukcuoglu, Koray},
booktitle={International Conference on Machine Learning (ICML)},
pages={1928--1937},
year={2016}
}
@article{espeholt2018impala,
title={Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures},
author={Espeholt, Lasse and Soyer, Hubert and Munos, Remi and Simonyan, Karen and Mnih, Volodymir and Ward, Tom and Doron, Yotam and Firoiu, Vlad and Harley, Tim and Dunning, Iain and others},
journal={arXiv preprint arXiv:1802.01561},
year={2018}
}
@article{espeholt2019seed,
title={Seed rl: Scalable and efficient deep-rl with accelerated central inference},
author={Espeholt, Lasse and Marinier, Rapha{\"e}l and Stanczyk, Piotr and Wang, Ke and Michalski, Marcin},
journal={arXiv preprint arXiv:1910.06591},
year={2019}
}
@misc{horgan2018distributed,
title={Distributed Prioritized Experience Replay},
author={Dan Horgan and John Quan and David Budden and Gabriel Barth-Maron and Matteo Hessel and Hado van Hasselt and David Silver},
year={2018},
eprint={1803.00933},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@inproceedings{moritz2018ray,
title={Ray: A distributed framework for emerging $\{$AI$\}$ applications},
author={Moritz, Philipp and Nishihara, Robert and Wang, Stephanie and Tumanov, Alexey and Liaw, Richard and Liang, Eric and Elibol, Melih and Yang, Zongheng and Paul, William and Jordan, Michael I and others},
booktitle={13th $\{$USENIX$\}$ Symposium on Operating Systems Design and Implementation ($\{$OSDI$\}$ 18)},
pages={561--577},
year={2018}
}
@inproceedings{zaharia2010spark,
title={Spark: Cluster computing with working sets},
author={Zaharia, Matei and Chowdhury, Mosharaf and Franklin, Michael J and Shenker, Scott and Stoica, Ion},
booktitle={2nd USENIX Workshop on Hot Topics in Cloud Computing (HotCloud 10)},
year={2010}
}
@article{fetterly2009dryadlinq,
title={DryadLINQ: A system for general-purpose distributed data-parallel computing using a high-level language},
author={Fetterly, Yuan Yu Michael Isard Dennis and Budiu, Mihai and Erlingsson, {\'U}lfar and Currey, Pradeep Kumar Gunda Jon},
journal={Proc. LSDS-IR},
volume={8},
year={2009}
}
@article{murray2021tf,
title={tf. data: A machine learning data processing framework},
author={Murray, Derek G and Simsa, Jiri and Klimovic, Ana and Indyk, Ihor},
journal={arXiv preprint arXiv:2101.12127},
year={2021}
}
@article{mohan2020analyzing,
title={Analyzing and mitigating data stalls in dnn training},
author={Mohan, Jayashree and Phanishayee, Amar and Raniwala, Ashish and Chidambaram, Vijay},
journal={arXiv preprint arXiv:2007.06775},
year={2020}
}
@misc{rmpygil
author = "Sam Gross",
title = "Multithreaded Python without the GIL",
howpublished = "Website",
year = {2021},
note = {\url{https://docs.google.com/document/d/18CXhDb1ygxg-YXNBJNzfzZsDFosB5e6BfnXLlejd9l0/edit#heading=h.kcngwrty1lv}}
}
@misc{nvidia_dali
author = "NVIDIA",
title = "DALI",
howpublished = "Website",
year = {2018},
note = {\url{https://github.com/NVIDIA/DALI}}
}
@misc{minddata
author = "HuaWei",
title = "Dataset Plugin",
howpublished = "Website",
year = {2020},
note = {\url{https://gitee.com/mindspore/dataset-plugin}}
}
@article{liang2017ray,
title={Ray rllib: A composable and scalable reinforcement learning library},
author={Liang, Eric and Liaw, Richard and Nishihara, Robert and Moritz, Philipp and Fox, Roy and Gonzalez, Joseph and Goldberg, Ken and Stoica, Ion},
journal={arXiv preprint arXiv:1712.09381},
pages={85},
year={2017}
}
@article{cassirer2021reverb,
title={Reverb: A Framework For Experience Replay},
author={Cassirer, Albin and Barth-Maron, Gabriel and Brevdo, Eugene and Ramos, Sabela and Boyd, Toby and Sottiaux, Thibault and Kroiss, Manuel},
journal={arXiv preprint arXiv:2102.04736},
year={2021}
}
@article{hoffman2020acme,
title={Acme: A research framework for distributed reinforcement learning},
author={Hoffman, Matt and Shahriari, Bobak and Aslanides, John and Barth-Maron, Gabriel and Behbahani, Feryal and Norman, Tamara and Abdolmaleki, Abbas and Cassirer, Albin and Yang, Fan and Baumli, Kate and others},
journal={arXiv preprint arXiv:2006.00979},
year={2020}
}
@article{ding2020efficient,
title={Efficient Reinforcement Learning Development with RLzoo},
author={Ding, Zihan and Yu, Tianyang and Huang, Yanhua and Zhang, Hongming and Li, Guo and Guo, Quancheng and Mai, Luo and Dong, Hao},
journal={arXiv preprint arXiv:2009.08644},
year={2020}
}
@article{makoviychuk2021isaac,
title={Isaac Gym: High Performance GPU-Based Physics Simulation For Robot Learning},
author={Makoviychuk, Viktor and Wawrzyniak, Lukasz and Guo, Yunrong and Lu, Michelle and Storey, Kier and Macklin, Miles and Hoeller, David and Rudin, Nikita and Allshire, Arthur and Handa, Ankur and others},
journal={arXiv preprint arXiv:2108.10470},
year={2021}
}
@article{vinyals2019grandmaster,
title={Grandmaster level in StarCraft II using multi-agent reinforcement learning},
author={Vinyals, Oriol and Babuschkin, Igor and Czarnecki, Wojciech M and Mathieu, Micha{\"e}l and Dudzik, Andrew and Chung, Junyoung and Choi, David H and Powell, Richard and Ewalds, Timo and Georgiev, Petko and others},
journal={Nature},
volume={575},
number={7782},
pages={350--354},
year={2019},
publisher={Nature Publishing Group}
}
@article{berner2019dota,
title={Dota 2 with large scale deep reinforcement learning},
author={Berner, Christopher and Brockman, Greg and Chan, Brooke and Cheung, Vicki and D{\k{e}}biak, Przemys{\l}aw and Dennison, Christy and Farhi, David and Fischer, Quirin and Hashme, Shariq and Hesse, Chris and others},
journal={arXiv preprint arXiv:1912.06680},
year={2019}
}
@article{han2020tstarbot,
title={Tstarbot-x: An open-sourced and comprehensive study for efficient league training in starcraft ii full game},
author={Han, Lei and Xiong, Jiechao and Sun, Peng and Sun, Xinghai and Fang, Meng and Guo, Qingwei and Chen, Qiaobo and Shi, Tengfei and Yu, Hongsheng and Wu, Xipeng and others},
journal={arXiv preprint arXiv:2011.13729},
year={2020}
}
@inproceedings{wang2021scc,
title={SCC: an efficient deep reinforcement learning agent mastering the game of StarCraft II},
author={Wang, Xiangjun and Song, Junxiao and Qi, Penghui and Peng, Peng and Tang, Zhenkun and Zhang, Wei and Li, Weimin and Pi, Xiongjun and He, Jujie and Gao, Chao and others},
booktitle={International Conference on Machine Learning},
pages={10905--10915},
year={2021},
organization={PMLR}
}
@inproceedings{MLSYS2021_979d472a,
author = {Yin, Chunxing and Acun, Bilge and Wu, Carole-Jean and Liu, Xing},
booktitle = {Proceedings of Machine Learning and Systems},
editor = {A. Smola and A. Dimakis and I. Stoica},
pages = {448--462},
title = {TT-Rec: Tensor Train Compression for Deep Learning Recommendation Models},
url = {https://proceedings.mlsys.org/paper/2021/file/979d472a84804b9f647bc185a877a8b5-Paper.pdf},
volume = {3},
year = {2021}
}
@inproceedings{MLSYS2020_f7e6c855,
author = {Zhao, Weijie and Xie, Deping and Jia, Ronglai and Qian, Yulei and Ding, Ruiquan and Sun, Mingming and Li, Ping},
booktitle = {Proceedings of Machine Learning and Systems},
editor = {I. Dhillon and D. Papailiopoulos and V. Sze},
pages = {412--428},
title = {Distributed Hierarchical GPU Parameter Server for Massive Scale Deep Learning Ads Systems},
url = {https://proceedings.mlsys.org/paper/2020/file/f7e6c85504ce6e82442c770f7c8606f0-Paper.pdf},
volume = {2},
year = {2020}
}
@article{zionex,
title={Software-Hardware Co-design for Fast and Scalable Training of Deep Learning Recommendation Models},
author={Mudigere, Dheevatsa and Hao, Yuchen and Huang, Jianyu and Jia, Zhihao and Tulloch, Andrew and Sridharan, Srinivas and Liu, Xing and Ozdal, Mustafa and Nie, Jade and Park, Jongsoo and others},
journal={arXiv preprint arXiv:2104.05158},
year={2021}
}
@inproceedings{gong2020edgerec,
title={EdgeRec: Recommender System on Edge in Mobile Taobao},
author={Gong, Yu and Jiang, Ziwen and Feng, Yufei and Hu, Binbin and Zhao, Kaiqi and Liu, Qingwen and Ou, Wenwu},
booktitle={Proceedings of the 29th ACM International Conference on Information \& Knowledge Management},
pages={2477--2484},
year={2020}
}
@inproceedings{NEURIPS2020_a1d4c20b,
author = {He, Chaoyang and Annavaram, Murali and Avestimehr, Salman},
booktitle = {Advances in Neural Information Processing Systems},
editor = {H. Larochelle and M. Ranzato and R. Hadsell and M. F. Balcan and H. Lin},
pages = {14068--14080},
publisher = {Curran Associates, Inc.},
title = {Group Knowledge Transfer: Federated Learning of Large CNNs at the Edge},
url = {https://proceedings.neurips.cc/paper/2020/file/a1d4c20b182ad7137ab3606f0e3fc8a4-Paper.pdf},
volume = {33},
year = {2020}
}
@INPROCEEDINGS{9355295,
author={Xie, Minhui and Ren, Kai and Lu, Youyou and Yang, Guangxu and Xu, Qingxing and Wu, Bihai and Lin, Jiazhen and Ao, Hongbo and Xu, Wanhong and Shu, Jiwu},
booktitle={SC20: International Conference for High Performance Computing, Networking, Storage and Analysis},
title={Kraken: Memory-Efficient Continual Learning for Large-Scale Real-Time Recommendations},
year={2020},
volume={},
number={},
pages={1-17},
doi={10.1109/SC41405.2020.00025}
}
@inproceedings{MLSYS2021_ec895663,
author = {Jiang, Wenqi and He, Zhenhao and Zhang, Shuai and Preu\ss er, Thomas B. and Zeng, Kai and Feng, Liang and Zhang, Jiansong and Liu, Tongxuan and Li , Yong and Zhou, Jingren and Zhang, Ce and Alonso, Gustavo},
booktitle = {Proceedings of Machine Learning and Systems},
editor = {A. Smola and A. Dimakis and I. Stoica},
pages = {845--859},
title = {MicroRec: Efficient Recommendation Inference by Hardware and Data Structure Solutions},
url = {https://proceedings.mlsys.org/paper/2021/file/ec8956637a99787bd197eacd77acce5e-Paper.pdf},
volume = {3},
year = {2021}
}
@inproceedings{10.1145/3394486.3403059,
author = {Shi, Hao-Jun Michael and Mudigere, Dheevatsa and Naumov, Maxim and Yang, Jiyan},
title = {Compositional Embeddings Using Complementary Partitions for Memory-Efficient Recommendation Systems},
year = {2020},
isbn = {9781450379984},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3394486.3403059},
doi = {10.1145/3394486.3403059},
abstract = {},
booktitle = {Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining},
pages = {165175},
numpages = {11},
keywords = {model compression, recommendation systems, embeddings},
location = {Virtual Event, CA, USA},
series = {KDD '20}
}
@misc{ginart2021mixed,
title={Mixed Dimension Embeddings with Application to Memory-Efficient Recommendation Systems},
author={Antonio Ginart and Maxim Naumov and Dheevatsa Mudigere and Jiyan Yang and James Zou},
year={2021},
eprint={1909.11810},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
@inproceedings{10.1145/2020408.2020444,
author = {Chu, Wei and Zinkevich, Martin and Li, Lihong and Thomas, Achint and Tseng, Belle},
title = {Unbiased Online Active Learning in Data Streams},
year = {2011},
isbn = {9781450308137},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/2020408.2020444},
doi = {10.1145/2020408.2020444},
abstract = {Unlabeled samples can be intelligently selected for labeling to minimize classification error. In many real-world applications, a large number of unlabeled samples arrive in a streaming manner, making it impossible to maintain all the data in a candidate pool. In this work, we focus on binary classification problems and study selective labeling in data streams where a decision is required on each sample sequentially. We consider the unbiasedness property in the sampling process, and design optimal instrumental distributions to minimize the variance in the stochastic process. Meanwhile, Bayesian linear classifiers with weighted maximum likelihood are optimized online to estimate parameters. In empirical evaluation, we collect a data stream of user-generated comments on a commercial news portal in 30 consecutive days, and carry out offline evaluation to compare various sampling strategies, including unbiased active learning, biased variants, and random sampling. Experimental results verify the usefulness of online active learning, especially in the non-stationary situation with concept drift.},
booktitle = {Proceedings of the 17th ACM SIGKDD International Conference on Knowledge Discovery and Data Mining},
pages = {195203},
numpages = {9},
keywords = {unbiasedness, bayesian online learning, active learning, data streaming, adaptive importance sampling},
location = {San Diego, California, USA},
series = {KDD '11}
}
@inproceedings{10.1145/3267809.3267817,
author = {Tian, Huangshi and Yu, Minchen and Wang, Wei},
title = {Continuum: A Platform for Cost-Aware, Low-Latency Continual Learning},
year = {2018},
isbn = {9781450360111},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3267809.3267817},
doi = {10.1145/3267809.3267817},
abstract = {Many machine learning applications operate in dynamic environments that change over time, in which models must be continually updated to capture the recent trend in data. However, most of today's learning frameworks perform training offline, without a system support for continual model updating.In this paper, we design and implement Continuum, a general-purpose platform that streamlines the implementation and deployment of continual model updating across existing learning frameworks. In pursuit of fast data incorporation, we further propose two update policies, cost-aware and best-effort, that judiciously determine when to perform model updating, with and without accounting for the training cost (machine-time), respectively. Theoretical analysis shows that cost-aware policy is 2-competitive. We implement both polices in Continuum, and evaluate their performance through EC2 deployment and trace-driven simulations. The evaluation shows that Continuum results in reduced data incorporation latency, lower training cost, and improved model quality in a number of popular online learning applications that span multiple application domains, programming languages, and frameworks.},
booktitle = {Proceedings of the ACM Symposium on Cloud Computing},
pages = {2640},
numpages = {15},
keywords = {Competitive Analysis, Continual Learning System, Online Algorithm},
location = {Carlsbad, CA, USA},
series = {SoCC '18}
}
@inproceedings{10.1145/2648584.2648589,
author = {He, Xinran and Pan, Junfeng and Jin, Ou and Xu, Tianbing and Liu, Bo and Xu, Tao and Shi, Yanxin and Atallah, Antoine and Herbrich, Ralf and Bowers, Stuart and Candela, Joaquin Qui\~{n}onero},
title = {Practical Lessons from Predicting Clicks on Ads at Facebook},
year = {2014},
isbn = {9781450329996},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/2648584.2648589},
doi = {10.1145/2648584.2648589},
abstract = {Online advertising allows advertisers to only bid and pay for measurable user responses, such as clicks on ads. As a consequence, click prediction systems are central to most online advertising systems. With over 750 million daily active users and over 1 million active advertisers, predicting clicks on Facebook ads is a challenging machine learning task. In this paper we introduce a model which combines decision trees with logistic regression, outperforming either of these methods on its own by over 3%, an improvement with significant impact to the overall system performance. We then explore how a number of fundamental parameters impact the final prediction performance of our system. Not surprisingly, the most important thing is to have the right features: those capturing historical information about the user or ad dominate other types of features. Once we have the right features and the right model (decisions trees plus logistic regression), other factors play small roles (though even small improvements are important at scale). Picking the optimal handling for data freshness, learning rate schema and data sampling improve the model slightly, though much less than adding a high-value feature, or picking the right model to begin with.},
booktitle = {Proceedings of the Eighth International Workshop on Data Mining for Online Advertising},
pages = {19},
numpages = {9},
location = {New York, NY, USA},
series = {ADKDD'14}
}
@misc{2017NVIDIA,
author={NVIDIA},
title={NVIDIA Tesla V100 GPU Architecture: The World's Most Advanced Datacenter GPU},
year={2017},
howpublished = "Website",
note = {\url{http://www.nvidia.com/object/volta-architecture-whitepaper.html}}
}
@article{2018Modeling,
title={Modeling Deep Learning Accelerator Enabled GPUs},
author={Raihan, M. A. and Goli, N. and Aamodt, T.},
journal={arXiv e-prints arXiv:1811.08309},
year={2018}
}
@misc{2020MLIR,
title={MLIR: A Compiler Infrastructure for the End of Moore's Law},
author={ Lattner, C. and Amini, M. and Bondhugula, U. and Cohen, A. and Davis, A. and Pienaar, J. and Riddle, R. and Shpeisman, T. and Vasilache, N. and Zinenko, O. },
year={2020},
}
@book{2007Engineering,
title={Engineering a Compiler},
author={ Cooper, Keith D. and Torczon, Linda },
publisher={Engineering A Compiler},
year={2007},
}
@misc{2007Compilers,
title={Compilers: Principles, Techniques, and Tools (Rental), 2nd Edition},
author={ Aho, A. V. and Lam, M. S. and Ullman, J. D. and Sethi, R. },
year={2007},
}
@inproceedings{2004LLVM,
title={LLVM: A Compilation Framework for Lifelong Program Analysis & Transformation},
author={ Lattner, C. and Adve, V. },
booktitle={Code Generation and Optimization, 2004. CGO 2004. International Symposium on},
year={2004},
}
@article{Richard1995A,
title={A correspondence between continuation passing style and static single assignment form},
author={Richard and A. and Kelsey},
journal={Acm Sigplan Notices},
year={1995},
}
@article{2010C,
title={C++ lambda expressions and closures},
author={ Jaervi, Jaakko and Freeman, J. },
journal={Science of Computer Programming},
volume={75},
number={9},
pages={762-772},
year={2010},
}
@article{spuler1994compiler,
title={Compiler detection of function call side effects},
author={Spuler, David A and Sajeev, A Sayed Muhammed},
journal={Informatica},
volume={18},
number={2},
pages={219--227},
year={1994},
publisher={Citeseer}
}
@book{10.5555/1455489,
author = {Griewank, Andreas and Walther, Andrea},
title = {Evaluating Derivatives: Principles and Techniques of Algorithmic Differentiation},
year = {2008},
isbn = {0898716594},
publisher = {Society for Industrial and Applied Mathematics},
address = {USA},
edition = {Second},
}
@article{2015Automatic,
title={Automatic Differentiation in Machine Learning: a Survey},
author={ Pearlmutter, B. A. },
journal={computer science},
number={February},
year={2015},
}
@article{2015Numerical,
title={Numerical Analysis},
author={ Burden, R. L. and Faires, Jdd },
journal={Journal of the Royal Statistical Society},
volume={71},
number={1},
pages={48-50},
year={2015},
}
@book{2003Computer,
title={Computer Algebra Handbook: Foundations * Applications * Systems},
author={ Grabmeier, J. and Kaltofen, E. and Weispfenning, V. },
publisher={Computer algebra handbook : foundations, applications, systems},
year={2003},
}
@inbook{10.5555/60181.60188,
author = {Corliss, George F.},
title = {Applications of Differentiation Arithmetic},
year = {1988},
isbn = {0125056303},
publisher = {Academic Press Professional, Inc.},
address = {USA},
booktitle = {Reliability in Computing: The Role of Interval Methods in Scientific Computing},
pages = {127148},
numpages = {22}
}
@article{2000An,
title={An introduction to automatic differentiation},
author={ Verma, A. },
journal={Siam Computational Differentiation Techniques Applications & Tools},
volume={78},
number={7},
pages={804-807},
year={2000},
}
@inproceedings{2006The,
title={The Data-Flow Equations of Checkpointing in Reverse Automatic Differentiation},
author={ Dauvergne, B. and L Hascoët},
booktitle={Computational Science-iccs, International Conference, Reading, Uk, May},
year={2006},
}
@article{2017Divide,
title={Divide-and-Conquer Checkpointing for Arbitrary Programs with No User Annotation},
author={ Siskind, Jeffrey Mark and Pearlmutter, Barak A. },
journal={Optimization Methods and Software},
volume={33},
number={4-6},
year={2017},
}
@article{1969The,
title={The Principal Type-Scheme of an Object in Combinatory Logic},
author={ Hindley, R. },
journal={Transactions of the American Mathematical Society},
volume={146},
pages={29-60},
year={1969},
}
@article{1978A,
title={A theory of type polymorphism in programming},
author={ Milner, R. },
journal={Journal of Computer and System Sciences},
volume={17},
number={3},
pages={348-375},
year={1978},
}
@article{ragan2013halide,
title={Halide: a language and compiler for optimizing parallelism, locality, and recomputation in image processing pipelines},
author={Ragan-Kelley, Jonathan and Barnes, Connelly and Adams, Andrew and Paris, Sylvain and Durand, Fr{\'e}do and Amarasinghe, Saman},
journal={Acm Sigplan Notices},
volume={48},
number={6},
pages={519--530},
year={2013},
publisher={ACM New York, NY, USA}
}
@inproceedings{verdoolaege2010isl,
title={isl: An integer set library for the polyhedral model},
author={Verdoolaege, Sven},
booktitle={International Congress on Mathematical Software},
pages={299--302},
year={2010},
organization={Springer}
}
@article{chen2018tvm,
title={TVM: end-to-end optimization stack for deep learning},
author={Chen, Tianqi and Moreau, Thierry and Jiang, Ziheng and Shen, Haichen and Yan, Eddie Q and Wang, Leyuan and Hu, Yuwei and Ceze, Luis and Guestrin, Carlos and Krishnamurthy, Arvind},
journal={arXiv preprint arXiv:1802.04799},
volume={11},
pages={20},
year={2018},
publisher={CoRR}
}
@inproceedings{zheng2020ansor,
title={Ansor: Generating $\{$High-Performance$\}$ Tensor Programs for Deep Learning},
author={Zheng, Lianmin and Jia, Chengfan and Sun, Minmin and Wu, Zhao and Yu, Cody Hao and Haj-Ali, Ameer and Wang, Yida and Yang, Jun and Zhuo, Danyang and Sen, Koushik and others},
booktitle={14th USENIX Symposium on Operating Systems Design and Implementation (OSDI 20)},
pages={863--879},
year={2020}
}
@inproceedings{zhao2021akg,
title={AKG: automatic kernel generation for neural processing units using polyhedral transformations},
author={Zhao, Jie and Li, Bojie and Nie, Wang and Geng, Zhen and Zhang, Renwei and Gao, Xiong and Cheng, Bin and Wu, Chen and Cheng, Yun and Li, Zheng and others},
booktitle={Proceedings of the 42nd ACM SIGPLAN International Conference on Programming Language Design and Implementation},
pages={1233--1248},
year={2021}
}
@article{lattner2020mlir,
title={MLIR: A compiler infrastructure for the end of Moore's law},
author={Lattner, Chris and Amini, Mehdi and Bondhugula, Uday and Cohen, Albert and Davis, Andy and Pienaar, Jacques and Riddle, River and Shpeisman, Tatiana and Vasilache, Nicolas and Zinenko, Oleksandr},
journal={arXiv preprint arXiv:2002.11054},
year={2020}
}
@article{vasilache2022composable,
title={Composable and Modular Code Generation in MLIR: A Structured and Retargetable Approach to Tensor Compiler Construction},
author={Vasilache, Nicolas and Zinenko, Oleksandr and Bik, Aart JC and Ravishankar, Mahesh and Raoux, Thomas and Belyaev, Alexander and Springer, Matthias and Gysi, Tobias and Caballero, Diego and Herhut, Stephan and others},
journal={arXiv preprint arXiv:2202.03293},
year={2022}
}
@inproceedings{bastoul2004code,
title={Code generation in the polyhedral model is easier than you think},
author={Bastoul, C{\'e}dric},
booktitle={Proceedings. 13th International Conference on Parallel Architecture and Compilation Techniques, 2004. PACT 2004.},
pages={7--16},
year={2004},
organization={IEEE}
}
@ARTICLE{2020tkde_li,
author={Li, Xiao-Hui and Cao, Caleb Chen and Shi, Yuhan and Bai, Wei and Gao, Han and Qiu, Luyu and Wang, Cong and Gao, Yuanyuan and Zhang, Shenjia and Xue, Xun and Chen, Lei},
journal={IEEE Transactions on Knowledge and Data Engineering},
title={A Survey of Data-driven and Knowledge-aware eXplainable AI},
year={2020},
volume={},
number={},
pages={1-1},
doi={10.1109/TKDE.2020.2983930}
}
@article{erhan2009visualizing,
title={Visualizing higher-layer features of a deep network},
author={Erhan, Dumitru and Bengio, Yoshua and Courville, Aaron and Vincent, Pascal},
journal={University of Montreal},
volume={1341},
number={3},
pages={1},
year={2009}
}
@misc{kim2018interpretability,
title={Interpretability Beyond Feature Attribution: Quantitative Testing with Concept Activation Vectors (TCAV)},
author={Been Kim and Martin Wattenberg and Justin Gilmer and Carrie Cai and James Wexler and Fernanda Viegas and Rory Sayres},
year={2018},
eprint={1711.11279},
archivePrefix={arXiv},
primaryClass={stat.ML}
}
@article{riedl2019human,
title={Human-centered artificial intelligence and machine learning},
author={Riedl, Mark O.},
journal={Human Behavior and Emerging Technologies},
volume={1},
number={1},
pages={33--36},
year={2019},
publisher={Wiley Online Library}
}
@inproceedings{10.1145/2988450.2988454,
author = {Cheng, Heng-Tze and Koc, Levent and Harmsen, Jeremiah and Shaked, Tal and Chandra, Tushar and Aradhye, Hrishi and Anderson, Glen and Corrado, Greg and Chai, Wei and Ispir, Mustafa and Anil, Rohan and Haque, Zakaria and Hong, Lichan and Jain, Vihan and Liu, Xiaobing and Shah, Hemal},
title = {Wide & Deep Learning for Recommender Systems},
year = {2016},
isbn = {9781450347952},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/2988450.2988454},
doi = {10.1145/2988450.2988454},
abstract = {Generalized linear models with nonlinear feature transformations are widely used for large-scale regression and classification problems with sparse inputs. Memorization of feature interactions through a wide set of cross-product feature transformations are effective and interpretable, while generalization requires more feature engineering effort. With less feature engineering, deep neural networks can generalize better to unseen feature combinations through low-dimensional dense embeddings learned for the sparse features. However, deep neural networks with embeddings can over-generalize and recommend less relevant items when the user-item interactions are sparse and high-rank. In this paper, we present Wide & Deep learning---jointly trained wide linear models and deep neural networks---to combine the benefits of memorization and generalization for recommender systems. We productionized and evaluated the system on Google Play, a commercial mobile app store with over one billion active users and over one million apps. Online experiment results show that Wide & Deep significantly increased app acquisitions compared with wide-only and deep-only models. We have also open-sourced our implementation in TensorFlow.},
booktitle = {Proceedings of the 1st Workshop on Deep Learning for Recommender Systems},
pages = {7-10},
numpages = {4},
keywords = {Recommender Systems, Wide & Deep Learning},
location = {Boston, MA, USA},
series = {DLRS 2016}
}
@inproceedings{10.1145/3124749.3124754,
author = {Wang, Ruoxi and Fu, Bin and Fu, Gang and Wang, Mingliang},
title = {Deep & Cross Network for Ad Click Predictions},
year = {2017},
isbn = {9781450351942},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3124749.3124754},
doi = {10.1145/3124749.3124754},
abstract = {Feature engineering has been the key to the success of many prediction models. However, the process is nontrivial and often requires manual feature engineering or exhaustive searching. DNNs are able to automatically learn feature interactions; however, they generate all the interactions implicitly, and are not necessarily efficient in learning all types of cross features. In this paper, we propose the Deep & Cross Network (DCN) which keeps the benefits of a DNN model, and beyond that, it introduces a novel cross network that is more efficient in learning certain bounded-degree feature interactions. In particular, DCN explicitly applies feature crossing at each layer, requires no manual feature engineering, and adds negligible extra complexity to the DNN model. Our experimental results have demonstrated its superiority over the state-of-art algorithms on the CTR prediction dataset and dense classification dataset, in terms of both model accuracy and memory usage.},
booktitle = {Proceedings of the ADKDD'17},
articleno = {12},
numpages = {7},
keywords = {CTR Prediction, Deep Learning, Neural Networks, Feature Crossing},
location = {Halifax, NS, Canada},
series = {ADKDD'17}
}
@inproceedings{ijcai2017-239,
author = {Huifeng Guo and Ruiming TANG and Yunming Ye and Zhenguo Li and Xiuqiang He},
title = {DeepFM: A Factorization-Machine based Neural Network for CTR Prediction},
booktitle = {Proceedings of the Twenty-Sixth International Joint Conference on
Artificial Intelligence, {IJCAI-17}},
pages = {1725--1731},
year = {2017},
doi = {10.24963/ijcai.2017/239},
url = {https://doi.org/10.24963/ijcai.2017/239},
}
@article{naumov2019deep,
title={Deep learning recommendation model for personalization and recommendation systems},
author={Naumov, Maxim and Mudigere, Dheevatsa and Shi, Hao-Jun Michael and Huang, Jianyu and Sundaraman, Narayanan and Park, Jongsoo and Wang, Xiaodong and Gupta, Udit and Wu, Carole-Jean and Azzolini, Alisson G and others},
journal={arXiv preprint arXiv:1906.00091},
year={2019}
}