Here you will find BibTeX citations of the original works presenting these tokenizations
@inproceedings{miditok2021,
title={{MidiTok}: A Python package for {MIDI} file tokenization},
author={Fradet, Nathan and Briot, Jean-Pierre and Chhel, Fabien and El Fallah Seghrouchni, Amal and Gutowski, Nicolas},
booktitle={Extended Abstracts for the Late-Breaking Demo Session of the 22nd International Society for Music Information Retrieval Conference},
year={2021},
url={https://archives.ismir.net/ismir2021/latebreaking/000005.pdf},
}
@inproceedings{huang_remi_2020,
author = {Huang, Yu-Siang and Yang, Yi-Hsuan},
title = {Pop Music Transformer: Beat-Based Modeling and Generation of Expressive Pop Piano Compositions},
year = {2020},
isbn = {9781450379885},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3394171.3413671},
doi = {10.1145/3394171.3413671},
booktitle = {Proceedings of the 28th ACM International Conference on Multimedia},
pages = {1180–1188},
numpages = {9},
keywords = {transformer, neural sequence model, automatic music composition},
location = {Seattle, WA, USA},
series = {MM '20}
}
@article{oore_midilike_2018,
author={Sageev Oore and Ian Simon and Sander Dieleman and Douglas Eck and Karen Simonyan},
title={This Time with Feeling: Learning Expressive Musical Performance},
journal={Neural Computing and Applications},
volume={32},
year={2018},
pages={955–967},
url={https://link.springer.com/article/10.1007/s00521-018-3758-9},
publisher={Springer}
}
@misc{Fradet2023BPE-Symbolic-Music,
doi = {10.48550/ARXIV.2301.11975},
url = {https://arxiv.org/abs/2301.11975},
author = {Fradet, Nathan and Briot, Jean-Pierre and Chhel, Fabien and Seghrouchni, Amal El Fallah and Gutowski, Nicolas},
title = {Byte Pair Encoding for Symbolic Music},
publisher = {arXiv},
year = {2023},
}
@misc{pia2021hadjeres,
title={The Piano Inpainting Application},
author={Gaëtan Hadjeres and Léopold Crestel},
year={2021},
eprint={2107.05944},
archivePrefix={arXiv},
primaryClass={cs.SD},
url={https://arxiv.org/abs/2107.05944},
}
@article{cpword2021,
title={Compound Word Transformer: Learning to Compose Full-Song Music over Dynamic Directed Hypergraphs},
volume={35},
url={https://ojs.aaai.org/index.php/AAAI/article/view/16091},
DOI={10.1609/aaai.v35i1.16091},
number={1},
journal={Proceedings of the AAAI Conference on Artificial Intelligence},
author={Hsiao, Wen-Yi and Liu, Jen-Yu and Yeh, Yin-Cheng and Yang, Yi-Hsuan},
year={2021},
month={May},
pages={178-186}
}
@inproceedings{zeng2021musicbert,
title = "{M}usic{BERT}: Symbolic Music Understanding with Large-Scale Pre-Training",
author = "Zeng, Mingliang and Tan, Xu and Wang, Rui and Ju, Zeqian and Qin, Tao and Liu, Tie-Yan",
booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",
month = aug,
year = "2021",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2021.findings-acl.70",
doi = "10.18653/v1/2021.findings-acl.70",
pages = "791--800",
}
@inproceedings{popmag2020,
author = {Ren, Yi and He, Jinzheng and Tan, Xu and Qin, Tao and Zhao, Zhou and Liu, Tie-Yan},
title = {PopMAG: Pop Music Accompaniment Generation},
year = {2020},
isbn = {9781450379885},
publisher = {Association for Computing Machinery},
url = {https://arxiv.org/abs/2008.07703},
doi = {10.1145/3394171.3413721},
abstract = {"MuMIDI encoding, similar to CP.
Generates multitrack music, filling every track tokens in a single sequence},
booktitle = {Proceedings of the 28th ACM International Conference on Multimedia},
pages = {1198–1206},
numpages = {9},
keywords = {accompaniment generation, music representation, music generation, sequence-to-sequence model, pop music},
location = {Seattle, WA, USA}
}