iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1016/J.NEUNET.2022.08.007
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,15]],"date-time":"2024-09-15T05:53:28Z","timestamp":1726379608070},"reference-count":33,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,11,1]],"date-time":"2022-11-01T00:00:00Z","timestamp":1667260800000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100006469","name":"Fundo para o Desenvolvimento das Ci\u00eancias e da Tecnologia","doi-asserted-by":"publisher","award":["0016\/2019\/A1"],"id":[{"id":"10.13039\/501100006469","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/100012543","name":"Shanghai Science and Technology Development Foundation","doi-asserted-by":"publisher","id":[{"id":"10.13039\/100012543","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Neural Networks"],"published-print":{"date-parts":[[2022,11]]},"DOI":"10.1016\/j.neunet.2022.08.007","type":"journal-article","created":{"date-parts":[[2022,8,15]],"date-time":"2022-08-15T14:34:52Z","timestamp":1660574092000},"page":"28-38","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":3,"special_numbering":"C","title":["Fast 2-step regularization on style optimization for real face morphing"],"prefix":"10.1016","volume":"155","author":[{"given":"Cheng","family":"Yu","sequence":"first","affiliation":[]},{"given":"Wenmin","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Honglei","family":"Li","sequence":"additional","affiliation":[]},{"given":"Roberto","family":"Bugiolacchi","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.neunet.2022.08.007_b1","doi-asserted-by":"crossref","unstructured":"Abdal,\u00a0R., Qin,\u00a0Y., & Wonka,\u00a0P. (2019). Image2StyleGAN: How to Embed Images Into the StyleGAN Latent Space?. In Proc. IEEE int. conf. comput. vis. (pp. 4431\u20134440).","DOI":"10.1109\/ICCV.2019.00453"},{"issue":"3","key":"10.1016\/j.neunet.2022.08.007_b2","doi-asserted-by":"crossref","first-page":"21:1","DOI":"10.1145\/3447648","article-title":"StyleFlow: Attribute-conditioned exploration of styleGAN-generated images using conditional continuous normalizing flows","volume":"40","author":"Abdal","year":"2021","journal-title":"ACM Transactions on Graphics"},{"key":"10.1016\/j.neunet.2022.08.007_b3","unstructured":"Bau,\u00a0D., Zhu,\u00a0J., Strobelt,\u00a0H., Zhou,\u00a0B., Tenenbaum,\u00a0J. B., Freeman,\u00a0W. T., et al. (2019). GAN Dissection: Visualizing and Understanding Generative Adversarial Networks. In Int. conf. learn. represent.."},{"key":"10.1016\/j.neunet.2022.08.007_b4","unstructured":"Brock,\u00a0A., Donahue,\u00a0J., & Simonyan,\u00a0K. (2019). Large Scale GAN Training for High Fidelity Natural Image Synthesis. In Int. conf. learn. represent.."},{"issue":"3","key":"10.1016\/j.neunet.2022.08.007_b5","doi-asserted-by":"crossref","first-page":"27:1","DOI":"10.1145\/1961189.1961199","article-title":"LIBSVM: a library for support vector machines","volume":"2","author":"Chang","year":"2011","journal-title":"ACM Transactions on Intelligent Systems and Technology"},{"key":"10.1016\/j.neunet.2022.08.007_b6","series-title":"Proc. int. conf. mach. learn.","first-page":"1597","article-title":"A simple framework for contrastive learning of visual representations","volume":"vol. 119","author":"Chen","year":"2020"},{"issue":"1","key":"10.1016\/j.neunet.2022.08.007_b7","doi-asserted-by":"crossref","first-page":"1","DOI":"10.18637\/jss.v033.i01","article-title":"Regularization paths for generalized linear models via coordinate descent","volume":"33","author":"Friedman","year":"2010","journal-title":"Journal of Statistical Software"},{"key":"10.1016\/j.neunet.2022.08.007_b8","unstructured":"Goodfellow,\u00a0I., Pouget-Abadie,\u00a0J., Mirza,\u00a0M., Xu,\u00a0B., Warde-Farley,\u00a0D., Ozair,\u00a0S., et al. (2014). Generative Adversarial Nets. In Proc. int. conf. neural inf. process. syst. Vol. 27 (pp. 2672\u20132680)."},{"key":"10.1016\/j.neunet.2022.08.007_b9","unstructured":"H\u00e4rk\u00f6nen,\u00a0E., Hertzmann,\u00a0A., Lehtinen,\u00a0J., & Paris,\u00a0S. (2020). GANSpace: Discovering Interpretable GAN Controls. In Proc. int. conf. neural inf. process. syst.."},{"key":"10.1016\/j.neunet.2022.08.007_b10","doi-asserted-by":"crossref","unstructured":"He,\u00a0Z., Kan,\u00a0M., & Shan,\u00a0S. (2021). EigenGAN: Layer-Wise Eigen-Learning for GANs. In Proc. IEEE int. conf. comput. vis..","DOI":"10.1109\/ICCV48922.2021.01414"},{"key":"10.1016\/j.neunet.2022.08.007_b11","unstructured":"Heusel,\u00a0M., Ramsauer,\u00a0H., Unterthiner,\u00a0T., Nessler,\u00a0B., & Hochreiter,\u00a0S. (2017). GANs Trained by a Two Time-Scale Update Rule Converge to a Local Nash Equilibrium. In Proc. int. conf. neural inf. process. syst. (pp. 6626\u20136637)."},{"issue":"C","key":"10.1016\/j.neunet.2022.08.007_b12","doi-asserted-by":"crossref","first-page":"209","DOI":"10.1016\/j.neunet.2021.10.017","article-title":"GuidedStyle: Attribute knowledge guided style manipulation for semantic face editing","volume":"145","author":"Hou","year":"2022","journal-title":"Neural Networks"},{"key":"10.1016\/j.neunet.2022.08.007_b13","unstructured":"Karras,\u00a0T., Aila,\u00a0T., Laine,\u00a0S., & Lehtinen,\u00a0J. (2018). Progressive Growing of GANs for Improved Quality, Stability, and Variation. In Int. conf. learn. represent.."},{"key":"10.1016\/j.neunet.2022.08.007_b14","doi-asserted-by":"crossref","unstructured":"Karras,\u00a0T., Laine,\u00a0S., & Aila,\u00a0T. (2019). A Style-Based Generator Architecture for Generative Adversarial Networks. In Proc. IEEE conf. comput. vis. pattern recognit..","DOI":"10.1109\/CVPR.2019.00453"},{"key":"10.1016\/j.neunet.2022.08.007_b15","doi-asserted-by":"crossref","unstructured":"Karras,\u00a0T., Laine,\u00a0S., Aittala,\u00a0M., Hellsten,\u00a0J., Lehtinen,\u00a0J., & Aila,\u00a0T. (2020). Analyzing and Improving the Image Quality of StyleGAN. In Proc. IEEE conf. comput. vis. pattern recognit. (pp. 8107\u20138116).","DOI":"10.1109\/CVPR42600.2020.00813"},{"issue":"4","key":"10.1016\/j.neunet.2022.08.007_b16","doi-asserted-by":"crossref","first-page":"606","DOI":"10.1109\/JSTSP.2007.910971","article-title":"An interior-point method for large-scale \u21131-regularized least squares","volume":"1","author":"Kim","year":"2007","journal-title":"Journal on Selected Topics in Signal Processing"},{"key":"10.1016\/j.neunet.2022.08.007_b17","doi-asserted-by":"crossref","unstructured":"Liu,\u00a0Z., Luo,\u00a0P., Wang,\u00a0X., & Tang,\u00a0X. (2015). Deep Learning Face Attributes in the Wild. In Proc. IEEE int. conf. comput. vis. (pp. 3730\u20133738).","DOI":"10.1109\/ICCV.2015.425"},{"key":"10.1016\/j.neunet.2022.08.007_b18","series-title":"StyleGAN encoder for official TensorFlow implementation","author":"Nikitko","year":"2019"},{"key":"10.1016\/j.neunet.2022.08.007_b19","series-title":"ICCV","first-page":"2065","article-title":"StyleCLIP: Text-driven manipulation of styleGAN imagery","author":"Patashnik","year":"2021"},{"key":"10.1016\/j.neunet.2022.08.007_b20","doi-asserted-by":"crossref","unstructured":"Pidhorskyi,\u00a0S., Adjeroh,\u00a0D. A., & Doretto,\u00a0G. (2020). Adversarial Latent Autoencoders. In Proc. IEEE conf. comput. vis. pattern recognit..","DOI":"10.1109\/CVPR42600.2020.01411"},{"key":"10.1016\/j.neunet.2022.08.007_b21","series-title":"Proc. int. conf. mach. learn.","first-page":"8748","article-title":"Learning transferable visual models from natural language supervision","volume":"vol. 139","author":"Radford","year":"2021"},{"key":"10.1016\/j.neunet.2022.08.007_b22","unstructured":"Radford,\u00a0A., Metz,\u00a0L., & Chintala,\u00a0S. (2016). Unsupervised Representation Learning with Deep Convolutional Generative Adversarial Networks. In Int. conf. learn. represent.."},{"issue":"4","key":"10.1016\/j.neunet.2022.08.007_b23","doi-asserted-by":"crossref","first-page":"2004","DOI":"10.1109\/TPAMI.2020.3034267","article-title":"InterFaceGAN: Interpreting the disentangled face representation learned by GANs","volume":"44","author":"Shen","year":"2022","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)"},{"key":"10.1016\/j.neunet.2022.08.007_b24","unstructured":"Vaswani,\u00a0A., Shazeer,\u00a0N., Parmar,\u00a0N., Uszkoreit,\u00a0J., Jones,\u00a0L., Gomez,\u00a0A. N., et al. (2017). Attention is All you Need. In Proc. int. conf. neural inf. process. syst. (pp. 5998\u20136008)."},{"issue":"4","key":"10.1016\/j.neunet.2022.08.007_b25","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","article-title":"Image quality assessment: from error visibility to structural similarity","volume":"13","author":"Wang","year":"2004","journal-title":"IEEE Transactions on Image Processing"},{"issue":"01","key":"10.1016\/j.neunet.2022.08.007_b26","first-page":"1","article-title":"GAN inversion: A survey","author":"Xia","year":"2022","journal-title":"IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)"},{"issue":"5","key":"10.1016\/j.neunet.2022.08.007_b27","doi-asserted-by":"crossref","first-page":"1451","DOI":"10.1007\/s11263-020-01429-5","article-title":"Semantic hierarchy emerges in deep generative representations for scene synthesis","volume":"129","author":"Yang","year":"2021","journal-title":"International Journal of Computer Vision"},{"key":"10.1016\/j.neunet.2022.08.007_b28","series-title":"Adaptable GAN encoders for image reconstruction via multi-type latent vectors with two-scale attentions","author":"Yu","year":"2021"},{"key":"10.1016\/j.neunet.2022.08.007_b29","doi-asserted-by":"crossref","first-page":"92","DOI":"10.1016\/j.patrec.2021.11.026","article-title":"Fast transformation of discriminators into encoders using pre-trained GANs","volume":"153","author":"Yu","year":"2022","journal-title":"Pattern Recognition Letters"},{"key":"10.1016\/j.neunet.2022.08.007_b30","series-title":"LSUN: construction of a large-scale image dataset using deep learning with humans in the loop","author":"Yu","year":"2015"},{"key":"10.1016\/j.neunet.2022.08.007_b31","doi-asserted-by":"crossref","unstructured":"Y\u00fcksel,\u00a0O. K., Simsar,\u00a0E., Er,\u00a0E. G., & Yanardag,\u00a0P. (2021). LatentCLR: A Contrastive Learning Approach for Unsupervised Discovery of Interpretable Directions. In Proc. IEEE int. conf. comput. vis. (pp. 14243\u201314252).","DOI":"10.1109\/ICCV48922.2021.01400"},{"key":"10.1016\/j.neunet.2022.08.007_b32","doi-asserted-by":"crossref","unstructured":"Zhang,\u00a0R., Isola,\u00a0P., Efros,\u00a0A. A., Shechtman,\u00a0E., & Wang,\u00a0O. (2018). The Unreasonable Effectiveness of Deep Features as a Perceptual Metric. In Proc. IEEE conf. comput. vis. pattern recognit..","DOI":"10.1109\/CVPR.2018.00068"},{"key":"10.1016\/j.neunet.2022.08.007_b33","doi-asserted-by":"crossref","unstructured":"Zhu,\u00a0J., Shen,\u00a0Y., li\u00a0Zhao,\u00a0D., & Zhou,\u00a0B. (2020). In-Domain GAN Inversion for Real Image Editing. In Europ. conf. comput. vis..","DOI":"10.1007\/978-3-030-58520-4_35"}],"container-title":["Neural Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608022003070?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0893608022003070?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2024,4,22]],"date-time":"2024-04-22T13:54:10Z","timestamp":1713794050000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0893608022003070"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,11]]},"references-count":33,"alternative-id":["S0893608022003070"],"URL":"https:\/\/doi.org\/10.1016\/j.neunet.2022.08.007","relation":{"has-preprint":[{"id-type":"doi","id":"10.36227\/techrxiv.19105493","asserted-by":"object"},{"id-type":"doi","id":"10.36227\/techrxiv.19105493.v2","asserted-by":"object"}]},"ISSN":["0893-6080"],"issn-type":[{"value":"0893-6080","type":"print"}],"subject":[],"published":{"date-parts":[[2022,11]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Fast 2-step regularization on style optimization for real face morphing","name":"articletitle","label":"Article Title"},{"value":"Neural Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.neunet.2022.08.007","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2022 Elsevier Ltd. All rights reserved.","name":"copyright","label":"Copyright"}]}}