@inproceedings{f2f156e2d0244a4bbd8f7475fc98e822,
title = "Animation synthesis triggered by vocal mimics",
abstract = "We propose a method leveraging the naturally time-related expressivity of our voice to control an animation composed of a set of short events. The user records itself mimicking onomatopoeia sounds such as {"}Tick{"}, {"}Pop{"}, or {"}Chhh{"} which are associated with specific animation events. The recorded soundtrack is automatically analyzed to extract every instant and types of sounds. We finally synthesize an animation where each event type and timing correspond with the soundtrack. In addition to being a natural way to control animation timing, we demonstrate that multiple stories can be efficiently generated by recording different voice sequences. Also, the use of more than one soundtrack allows us to control different characters with overlapping actions.",
keywords = "Sound-Driven Animation, Timing, Voice",
author = "Adrien Nivaggioli and Damien Rohmer",
note = "Publisher Copyright: {\textcopyright} 2019 Copyright held by the owner/author(s). Publication rights licensed to ACM.; 2019 ACM Conference on Motion, Interaction, and Games, MIG 2019 ; Conference date: 28-10-2019 Through 30-10-2019",
year = "2019",
month = oct,
day = "28",
doi = "10.1145/3359566.3360067",
language = "English",
series = "Proceedings - MIG 2019: ACM Conference on Motion, Interaction, and Games",
publisher = "Association for Computing Machinery, Inc",
editor = "Spencer, \{Stephen N.\}",
booktitle = "Proceedings - MIG 2019",
}