iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1109/TVCG.2021.3115902
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,17]],"date-time":"2024-09-17T10:56:35Z","timestamp":1726570595770},"reference-count":89,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","issue":"2","license":[{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/ieeexplore.ieee.org\/Xplorehelp\/downloads\/license-information\/IEEE.html"},{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,2,1]],"date-time":"2023-02-01T00:00:00Z","timestamp":1675209600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Trans. Visual. Comput. Graphics"],"published-print":{"date-parts":[[2023,2,1]]},"DOI":"10.1109\/tvcg.2021.3115902","type":"journal-article","created":{"date-parts":[[2021,9,28]],"date-time":"2021-09-28T21:30:39Z","timestamp":1632864639000},"page":"1400-1414","source":"Crossref","is-referenced-by-count":2,"title":["A Music-Driven Deep Generative Adversarial Model for Guzheng Playing Animation"],"prefix":"10.1109","volume":"29","author":[{"given":"Jiali","family":"Chen","sequence":"first","affiliation":[{"name":"Netease Fuxi AI Lab, Netease, Hangzhou, Zhejiang, China"}]},{"given":"Changjie","family":"Fan","sequence":"additional","affiliation":[{"name":"Netease Fuxi AI Lab, Netease, Hangzhou, Zhejiang, China"}]},{"ORCID":"http:\/\/orcid.org\/0000-0002-3695-1129","authenticated-orcid":false,"given":"Zhimeng","family":"Zhang","sequence":"additional","affiliation":[{"name":"Netease Fuxi AI Lab, Netease, Hangzhou, Zhejiang, China"}]},{"given":"Gongzheng","family":"Li","sequence":"additional","affiliation":[{"name":"Netease Fuxi AI Lab, Netease, Hangzhou, Zhejiang, China"}]},{"ORCID":"http:\/\/orcid.org\/0000-0002-7292-876X","authenticated-orcid":false,"given":"Zeng","family":"Zhao","sequence":"additional","affiliation":[{"name":"Netease Fuxi AI Lab, Netease, Hangzhou, Zhejiang, China"}]},{"ORCID":"http:\/\/orcid.org\/0000-0003-2571-5865","authenticated-orcid":false,"given":"Zhigang","family":"Deng","sequence":"additional","affiliation":[{"name":"Department of Computer Science, University of Houston, Houston, TX, USA"}]},{"ORCID":"http:\/\/orcid.org\/0000-0003-1834-4429","authenticated-orcid":false,"given":"Yu","family":"Ding","sequence":"additional","affiliation":[{"name":"Netease Fuxi AI Lab, Netease, Hangzhou, Zhejiang, China"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-540-24598-8_44"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1002\/cav.1477"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1525\/mp.2007.24.5.433"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1177\/030573569302100201"},{"key":"ref5","first-page":"28","article-title":"A fuzzy analyzer of emotional expression in music performance and body motion","volume-title":"Proc. Music Music Sci.","author":"Sundberg"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1609\/aimag.v30i3.2249"},{"key":"ref7","first-page":"71","article-title":"Gesture - Music","volume-title":"Proc. Trends Gestural Control Music","author":"Cadoz"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1177\/1029864911423457"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2018.00790"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1162\/neco.1997.9.8.1735"},{"key":"ref11","first-page":"933","article-title":"Language modeling with gated convolutional networks","volume-title":"Proc. Int. Conf. Mach. Learn.","author":"Dauphin"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1109\/NAECON.2018.8556686"},{"key":"ref14","article-title":"Attention U-net: Learning where to look for the Pancreas","author":"Oktay","year":"2018"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.632"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.00361"},{"key":"ref17","first-page":"2672","article-title":"Generative adversarial nets","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Goodfellow"},{"key":"ref18","article-title":"Conditional generative adversarial nets","author":"Mirza","year":"2014"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1111\/cgf.13586"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1109\/TPAMI.2019.2929257"},{"key":"ref21","article-title":"Personalized speech2video with 3D skeleton regularization and expressive body poses","author":"Liao","year":"2020"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1145\/311535.311537"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.1002\/cav.80"},{"key":"ref24","first-page":"251","article-title":"eFASE: Expressive facial animation synthesis and editing with phoneme-isomap controls","volume-title":"Proc. Symp. Comput. Animation","author":"Deng"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2006.885910"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1109\/TASL.2012.2201476"},{"key":"ref27","doi-asserted-by":"publisher","DOI":"10.1109\/TVCG.2012.74"},{"key":"ref28","first-page":"103","article-title":"Eyebrow motion synthesis driven by speech","volume-title":"Proc. Workshop Affect Compagnon Artificiel Interact. (WACAI)","author":"Ding"},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP.2013.6638360"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-642-40415-3_19"},{"key":"ref31","first-page":"773","article-title":"Laughter animation synthesis","volume-title":"Proc. Int. Conf. Auton. Agents Multiagent Syst.","author":"Ding"},{"key":"ref32","first-page":"78","article-title":"Lip animation synthesis: A unified framework for speaking and laughing virtual agent","volume-title":"Proc. AVSP","author":"Ding"},{"key":"ref33","first-page":"1817","article-title":"Laughing with a virtual agent","volume-title":"Proc. Int. Conf. Auton. Agents Multiagent Syst.","author":"Pecune"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-21996-7_16"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1145\/2897824.2925984"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073658"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1145\/3072959.3073699"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1145\/2998571"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.287"},{"key":"ref40","doi-asserted-by":"publisher","DOI":"10.1109\/TAFFC.2017.2754365"},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-30808-1_190-1"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1145\/3242969.3243017"},{"key":"ref43","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2019.01034"},{"key":"ref44","doi-asserted-by":"publisher","DOI":"10.1145\/3308532.3329445"},{"key":"ref45","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v35i3.16286"},{"key":"ref46","doi-asserted-by":"publisher","DOI":"10.24963\/ijcai.2021\/152"},{"key":"ref47","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR46437.2021.00366"},{"key":"ref48","doi-asserted-by":"publisher","DOI":"10.1007\/978-1-84628-907-1_1"},{"key":"ref49","article-title":"Practice and theory of blendshape facial models","volume-title":"Proc. Eurographics- State Art Rep.","volume":"1","author":"Lewis"},{"key":"ref50","doi-asserted-by":"publisher","DOI":"10.1145\/3267851.3267898"},{"key":"ref51","doi-asserted-by":"publisher","DOI":"10.1145\/3308532.3329472"},{"key":"ref52","doi-asserted-by":"publisher","DOI":"10.1145\/3340250"},{"key":"ref53","doi-asserted-by":"publisher","DOI":"10.1016\/j.robot.2018.11.024"},{"key":"ref54","doi-asserted-by":"publisher","DOI":"10.1111\/j.1467-8659.2006.00964.x"},{"key":"ref55","doi-asserted-by":"publisher","DOI":"10.1007\/s11042-012-1288-5"},{"key":"ref56","doi-asserted-by":"publisher","DOI":"10.1145\/2663806.2663849"},{"key":"ref57","first-page":"2431","article-title":"Kinetic imaginations: Exploring the possibilities of combining AI and dance","volume-title":"Proc. 24th Int. Joint Conf. Artif. Intell.","author":"Berman"},{"key":"ref58","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3414005"},{"key":"ref59","doi-asserted-by":"publisher","DOI":"10.1145\/3450626.3459932"},{"key":"ref60","doi-asserted-by":"publisher","DOI":"10.1109\/TMM.2011.2181492"},{"issue":"17","key":"ref61","first-page":"26","article-title":"GrooveNet: Real-time music-driven dance movement generation using artificial neural networks","volume-title":"Proc. Workshop Mach. Learn. Creativity, 23rd ACM SIGKDD Conf. Knowl. Discov. Data Mining","volume":"8","author":"Alemi"},{"key":"ref62","doi-asserted-by":"publisher","DOI":"10.1145\/3240508.3240526"},{"key":"ref63","first-page":"3581","article-title":"Dancing to music","volume-title":"Proc. Neural Inf. Process. Syst.","author":"Lee"},{"key":"ref64","first-page":"894","article-title":"Automatic choreography generation with convolutional encoder-decoder network","volume-title":"Proc. ISMIR Conf.","author":"Lee"},{"key":"ref65","article-title":"Dance revolution: Long-term dance generation with music via curriculum learning","author":"Huang","year":"2020"},{"key":"ref66","article-title":"Towards movement generation with audio features","author":"Wallace","year":"2020"},{"key":"ref67","first-page":"218","article-title":"Skeleton plays piano: Online generation of pianist body movements from midi performance","volume-title":"Proc. ISMIR Conf.","author":"Li"},{"key":"ref68","doi-asserted-by":"publisher","DOI":"10.1109\/ICASSP40776.2020.9054463"},{"key":"ref69","doi-asserted-by":"publisher","DOI":"10.1145\/3395035.3425244"},{"key":"ref70","doi-asserted-by":"publisher","DOI":"10.1145\/3394171.3413848"},{"key":"ref71","article-title":"An empirical evaluation of generic convolutional and recurrent networks for sequence modeling","author":"Bai","year":"2018"},{"key":"ref72","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref73","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref74","doi-asserted-by":"publisher","DOI":"10.1109\/MSP.2017.2765202"},{"key":"ref75","doi-asserted-by":"publisher","DOI":"10.1155\/2011\/158970"},{"key":"ref76","doi-asserted-by":"publisher","DOI":"10.21437\/Interspeech.2018-1830"},{"key":"ref77","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-019-01245-6"},{"key":"ref78","article-title":"Quaternet: A quaternion-based recurrent model for human motion","author":"Pavllo","year":"2018"},{"key":"ref79","doi-asserted-by":"publisher","DOI":"10.1016\/0021-9045(76)90040-X"},{"key":"ref80","doi-asserted-by":"crossref","DOI":"10.23915\/distill.00003","article-title":"Deconvolution and checkerboard artifacts","volume-title":"Distill","author":"Odena","year":"2016"},{"key":"ref81","article-title":"Batch normalization: Accelerating deep network training by reducing internal covariate shift","author":"Ioffe","year":"2015"},{"key":"ref82","doi-asserted-by":"publisher","DOI":"10.5555\/3104322.3104425"},{"key":"ref83","article-title":"Adam: A method for stochastic optimization","author":"Kingma","year":"2014"},{"key":"ref84","doi-asserted-by":"publisher","DOI":"10.1017\/9781108924238.008"},{"key":"ref85","doi-asserted-by":"publisher","DOI":"10.1007\/s00138-020-01157-3"},{"key":"ref86","doi-asserted-by":"publisher","DOI":"10.1109\/CVPR.2015.7298958"},{"key":"ref87","doi-asserted-by":"publisher","DOI":"10.1109\/TASSP.1975.1162641"},{"key":"ref88","doi-asserted-by":"publisher","DOI":"10.1145\/322033.322044"},{"key":"ref89","doi-asserted-by":"publisher","DOI":"10.1002\/9780470479216.corpsy0524"}],"container-title":["IEEE Transactions on Visualization and Computer Graphics"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/2945\/10003072\/09551755.pdf?arnumber=9551755","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,1,11]],"date-time":"2024-01-11T22:24:36Z","timestamp":1705011876000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9551755\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,2,1]]},"references-count":89,"journal-issue":{"issue":"2"},"URL":"http:\/\/dx.doi.org\/10.1109\/tvcg.2021.3115902","relation":{},"ISSN":["1077-2626","1941-0506","2160-9306"],"issn-type":[{"value":"1077-2626","type":"print"},{"value":"1941-0506","type":"electronic"},{"value":"2160-9306","type":"electronic"}],"subject":[],"published":{"date-parts":[[2023,2,1]]}}}