iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1016/J.KNOSYS.2024.111898
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,11,18]],"date-time":"2024-11-18T01:40:07Z","timestamp":1731894007352,"version":"3.28.0"},"reference-count":48,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/legal\/tdmrep-license"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2024,7,1]],"date-time":"2024-07-01T00:00:00Z","timestamp":1719792000000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001348","name":"Agency for Science Technology and Research","doi-asserted-by":"publisher","id":[{"id":"10.13039\/501100001348","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Knowledge-Based Systems"],"published-print":{"date-parts":[[2024,7]]},"DOI":"10.1016\/j.knosys.2024.111898","type":"journal-article","created":{"date-parts":[[2024,5,4]],"date-time":"2024-05-04T02:04:02Z","timestamp":1714788242000},"page":"111898","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":1,"special_numbering":"C","title":["Intrinsic-style distribution matching for arbitrary style transfer"],"prefix":"10.1016","volume":"296","author":[{"ORCID":"http:\/\/orcid.org\/0000-0002-3242-783X","authenticated-orcid":false,"given":"Meichen","family":"Liu","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-2979-090X","authenticated-orcid":false,"given":"Songnan","family":"Lin","sequence":"additional","affiliation":[]},{"given":"Hengmin","family":"Zhang","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-5515-5339","authenticated-orcid":false,"given":"Zhiyuan","family":"Zha","sequence":"additional","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0002-6874-6453","authenticated-orcid":false,"given":"Bihan","family":"Wen","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.knosys.2024.111898_b1","doi-asserted-by":"crossref","unstructured":"L.A. Gatys, A.S. Ecker, M. Bethge, Image style transfer using convolutional neural networks, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2016, pp. 2414\u20132423.","DOI":"10.1109\/CVPR.2016.265"},{"key":"10.1016\/j.knosys.2024.111898_b2","doi-asserted-by":"crossref","unstructured":"X. Huang, S. Belongie, Arbitrary style transfer in real-time with adaptive instance normalization, in: Proceedings of the IEEE International Conference on Computer Vision, 2017, pp. 1501\u20131510.","DOI":"10.1109\/ICCV.2017.167"},{"volume":"vol. 30","article-title":"Universal style transfer via feature transforms","year":"2017","author":"Li","key":"10.1016\/j.knosys.2024.111898_b3"},{"journal-title":"IEEE Trans. Neural Netw. Learn. Syst.","article-title":"Exploring the temporal consistency of arbitrary style transfer: A channelwise perspective","year":"2023","author":"Kong","key":"10.1016\/j.knosys.2024.111898_b4"},{"key":"10.1016\/j.knosys.2024.111898_b5","doi-asserted-by":"crossref","unstructured":"L. Sheng, Z. Lin, J. Shao, X. Wang, Avatar-net: Multi-scale zero-shot style transfer by feature decoration, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 8242\u20138250.","DOI":"10.1109\/CVPR.2018.00860"},{"key":"10.1016\/j.knosys.2024.111898_b6","doi-asserted-by":"crossref","unstructured":"S. Liu, T. Lin, D. He, F. Li, M. Wang, X. Li, Z. Sun, Q. Li, E. Ding, Adaattn: Revisit attention mechanism in arbitrary neural style transfer, in: Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2021, pp. 6649\u20136658.","DOI":"10.1109\/ICCV48922.2021.00658"},{"year":"2022","series-title":"MicroAST: Towards super-fast ultra-resolution arbitrary style transfer","author":"Wang","key":"10.1016\/j.knosys.2024.111898_b7"},{"key":"10.1016\/j.knosys.2024.111898_b8","doi-asserted-by":"crossref","unstructured":"X. Luo, Z. Han, L. Yang, Progressive Attentional Manifold Alignment for Arbitrary Style Transfer, in: Proceedings of the Asian Conference on Computer Vision, 2022, pp. 3206\u20133222.","DOI":"10.1007\/978-3-031-26293-7_9"},{"key":"10.1016\/j.knosys.2024.111898_b9","doi-asserted-by":"crossref","unstructured":"Y. Deng, F. Tang, W. Dong, C. Ma, X. Pan, L. Wang, C. Xu, Stytr2: Image style transfer with transformers, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 11326\u201311336.","DOI":"10.1109\/CVPR52688.2022.01104"},{"key":"10.1016\/j.knosys.2024.111898_b10","doi-asserted-by":"crossref","unstructured":"M. Zhu, X. He, N. Wang, X. Wang, X. Gao, All-to-key attention for arbitrary style transfer, in: Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2023, pp. 23109\u201323119.","DOI":"10.1109\/ICCV51070.2023.02112"},{"key":"10.1016\/j.knosys.2024.111898_b11","doi-asserted-by":"crossref","unstructured":"L. Wen, C. Gao, C. Zou, CAP-VSTNet: Content Affinity Preserved Versatile Style Transfer, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 18300\u201318309.","DOI":"10.1109\/CVPR52729.2023.01755"},{"key":"10.1016\/j.knosys.2024.111898_b12","series-title":"Computer Vision\u2013ECCV 2022: 17th European Conference, Tel Aviv, Israel, October 23\u201327, 2022, Proceedings, Part XVI","first-page":"189","article-title":"CCPL: Contrastive coherence preserving loss for versatile style transfer","author":"Wu","year":"2022"},{"key":"10.1016\/j.knosys.2024.111898_b13","doi-asserted-by":"crossref","unstructured":"Y. Zhang, F. Tang, W. Dong, H. Huang, C. Ma, T.-Y. Lee, C. Xu, Domain enhanced arbitrary image style transfer via contrastive learning, in: ACM SIGGRAPH 2022 Conference Proceedings, 2022, pp. 1\u20138.","DOI":"10.1145\/3528233.3530736"},{"key":"10.1016\/j.knosys.2024.111898_b14","first-page":"26561","article-title":"Artistic style transfer with internal-external learning and contrastive learning","volume":"34","author":"Chen","year":"2021","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2024.111898_b15","doi-asserted-by":"crossref","first-page":"6761","DOI":"10.1109\/TIP.2022.3215899","article-title":"CLAST: Contrastive learning for arbitrary style transfer","volume":"31","author":"Wang","year":"2022","journal-title":"IEEE Trans. Image Process."},{"issue":"5","key":"10.1016\/j.knosys.2024.111898_b16","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1145\/3605548","article-title":"A unified arbitrary style transfer framework via adaptive contrastive learning","volume":"42","author":"Zhang","year":"2023","journal-title":"ACM Trans. Graph."},{"key":"10.1016\/j.knosys.2024.111898_b17","doi-asserted-by":"crossref","unstructured":"X. Li, S. Liu, J. Kautz, M.-H. Yang, Learning Linear Transformations for Fast Arbitrary Style Transfer, in: IEEE Conference on Computer Vision and Pattern Recognition, 2019.","DOI":"10.1109\/CVPR.2019.00393"},{"key":"10.1016\/j.knosys.2024.111898_b18","first-page":"1210","article-title":"Arbitrary video style transfer via multi-channel correlation","volume":"vol. 35","author":"Deng","year":"2021"},{"key":"10.1016\/j.knosys.2024.111898_b19","doi-asserted-by":"crossref","unstructured":"T. Karras, S. Laine, M. Aittala, J. Hellsten, J. Lehtinen, T. Aila, Analyzing and improving the image quality of stylegan, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 8110\u20138119.","DOI":"10.1109\/CVPR42600.2020.00813"},{"key":"10.1016\/j.knosys.2024.111898_b20","doi-asserted-by":"crossref","unstructured":"D.Y. Park, K.H. Lee, Arbitrary style transfer with style-attentional networks, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 5880\u20135888.","DOI":"10.1109\/CVPR.2019.00603"},{"key":"10.1016\/j.knosys.2024.111898_b21","doi-asserted-by":"crossref","unstructured":"W. Xu, C. Long, Y. Nie, Learning Dynamic Style Kernels for Artistic Style Transfer, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2023, pp. 10083\u201310092.","DOI":"10.1109\/CVPR52729.2023.00972"},{"key":"10.1016\/j.knosys.2024.111898_b22","first-page":"2292","article-title":"Shunit: Style harmonization for unpaired image-to-image translation","volume":"vol. 37","author":"Song","year":"2023"},{"key":"10.1016\/j.knosys.2024.111898_b23","first-page":"3190","article-title":"Contrastive multi-task dense prediction","volume":"vol. 37","author":"Yang","year":"2023"},{"key":"10.1016\/j.knosys.2024.111898_b24","doi-asserted-by":"crossref","unstructured":"F. Zhan, Y. Yu, R. Wu, J. Zhang, S. Lu, C. Zhang, Marginal contrastive correspondence for guided image generation, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 10663\u201310672.","DOI":"10.1109\/CVPR52688.2022.01040"},{"key":"10.1016\/j.knosys.2024.111898_b25","series-title":"Computer Vision\u2013ECCV 2020: 16th European Conference, Glasgow, UK, August 23\u201328, 2020, Proceedings, Part IX 16","first-page":"319","article-title":"Contrastive learning for unpaired image-to-image translation","author":"Park","year":"2020"},{"key":"10.1016\/j.knosys.2024.111898_b26","doi-asserted-by":"crossref","unstructured":"K. Baek, Y. Choi, Y. Uh, J. Yoo, H. Shim, Rethinking the truly unsupervised image-to-image translation, in: Proceedings of the IEEE\/CVF International Conference on Computer Vision, 2021, pp. 14154\u201314163.","DOI":"10.1109\/ICCV48922.2021.01389"},{"year":"2022","series-title":"Name your style: An arbitrary artist-aware image style transfer","author":"Liu","key":"10.1016\/j.knosys.2024.111898_b27"},{"year":"2023","series-title":"Fontdiffuser: One-shot font generation via denoising diffusion with multi-scale content aggregation and style contrastive learning","author":"Yang","key":"10.1016\/j.knosys.2024.111898_b28"},{"key":"10.1016\/j.knosys.2024.111898_b29","doi-asserted-by":"crossref","first-page":"146","DOI":"10.1016\/j.neunet.2023.04.037","article-title":"CSAST: Content self-supervised and style contrastive learning for arbitrary style transfer","volume":"164","author":"Zhang","year":"2023","journal-title":"Neural Netw."},{"year":"2023","series-title":"Zero-shot contrastive loss for text-guided diffusion image style transfer","author":"Yang","key":"10.1016\/j.knosys.2024.111898_b30"},{"article-title":"Very deep convolutional networks for large-scale image recognition","year":"2015","series-title":"3rd International Conference on Learning Representations","author":"Simonyan","key":"10.1016\/j.knosys.2024.111898_b31"},{"key":"10.1016\/j.knosys.2024.111898_b32","article-title":"Multi-mapping image-to-image translation via learning disentanglement","volume":"32","author":"Yu","year":"2019","journal-title":"Adv. Neural Inf. Process. Syst."},{"key":"10.1016\/j.knosys.2024.111898_b33","doi-asserted-by":"crossref","unstructured":"Y. Deng, F. Tang, W. Dong, W. Sun, F. Huang, C. Xu, Arbitrary style transfer via multi-adaptation network, in: Proceedings of the 28th ACM International Conference on Multimedia, 2020, pp. 2719\u20132727.","DOI":"10.1145\/3394171.3414015"},{"key":"10.1016\/j.knosys.2024.111898_b34","doi-asserted-by":"crossref","unstructured":"P. Chandran, G. Zoss, P. Gotardo, M. Gross, D. Bradley, Adaptive convolutions for structure-aware style transfer, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 7972\u20137981.","DOI":"10.1109\/CVPR46437.2021.00788"},{"journal-title":"IEEE Trans. Multimed.","article-title":"Multi-source style transfer via style disentanglement network","year":"2023","author":"Wang","key":"10.1016\/j.knosys.2024.111898_b35"},{"journal-title":"IEEE Trans. Circuits Syst. Video Technol.","article-title":"InfoStyler: Disentanglement information bottleneck for artistic style transfer","year":"2023","author":"Lyu","key":"10.1016\/j.knosys.2024.111898_b36"},{"key":"10.1016\/j.knosys.2024.111898_b37","doi-asserted-by":"crossref","unstructured":"J. Fu, J. Liu, H. Tian, Y. Li, Y. Bao, Z. Fang, H. Lu, Dual attention network for scene segmentation, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 3146\u20133154.","DOI":"10.1109\/CVPR.2019.00326"},{"key":"10.1016\/j.knosys.2024.111898_b38","doi-asserted-by":"crossref","unstructured":"J. An, S. Huang, Y. Song, D. Dou, W. Liu, J. Luo, Artflow: Unbiased image style transfer via reversible neural flows, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 862\u2013871.","DOI":"10.1109\/CVPR46437.2021.00092"},{"key":"10.1016\/j.knosys.2024.111898_b39","doi-asserted-by":"crossref","first-page":"2245","DOI":"10.1109\/TMM.2021.3087026","article-title":"Neural style palette: A multimodal and interactive style transfer from a single style image","volume":"23","author":"Virtusio","year":"2021","journal-title":"IEEE Trans. Multimed."},{"key":"10.1016\/j.knosys.2024.111898_b40","doi-asserted-by":"crossref","unstructured":"Y. Alharbi, N. Smith, P. Wonka, Latent filter scaling for multimodal unsupervised image-to-image translation, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 1458\u20131466.","DOI":"10.1109\/CVPR.2019.00155"},{"key":"10.1016\/j.knosys.2024.111898_b41","series-title":"Computer Vision\u2013ECCV 2016: 14th European Conference, Amsterdam, the Netherlands, October 11-14, 2016, Proceedings, Part II 14","first-page":"694","article-title":"Perceptual losses for real-time style transfer and super-resolution","author":"Johnson","year":"2016"},{"key":"10.1016\/j.knosys.2024.111898_b42","doi-asserted-by":"crossref","unstructured":"Z. Wang, Z. Zhang, L. Zhao, Z. Zuo, A. Li, W. Xing, D. Lu, AesUST: Towards aesthetic-enhanced universal style transfer, in: Proceedings of the 30th ACM International Conference on Multimedia, 2022, pp. 1095\u20131106.","DOI":"10.1145\/3503161.3547939"},{"key":"10.1016\/j.knosys.2024.111898_b43","doi-asserted-by":"crossref","unstructured":"Y. Zhang, M. Li, R. Li, K. Jia, L. Zhang, Exact feature distribution matching for arbitrary style transfer and domain generalization, in: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 8035\u20138045.","DOI":"10.1109\/CVPR52688.2022.00787"},{"key":"10.1016\/j.knosys.2024.111898_b44","series-title":"Computer Vision\u2013ECCV 2014: 13th European Conference, Zurich, Switzerland, September 6-12, 2014, Proceedings, Part V 13","first-page":"740","article-title":"Microsoft coco: Common objects in context","author":"Lin","year":"2014"},{"article-title":"Recognizing image style","year":"2014","series-title":"Proceedings of the British Machine Vision Conference","author":"Karayev","key":"10.1016\/j.knosys.2024.111898_b45"},{"year":"2014","series-title":"Adam: A method for stochastic optimization","author":"Kingma","key":"10.1016\/j.knosys.2024.111898_b46"},{"issue":"4","key":"10.1016\/j.knosys.2024.111898_b47","doi-asserted-by":"crossref","first-page":"600","DOI":"10.1109\/TIP.2003.819861","article-title":"Image quality assessment: From error visibility to structural similarity","volume":"13","author":"Wang","year":"2004","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.knosys.2024.111898_b48","doi-asserted-by":"crossref","unstructured":"R. Zhang, P. Isola, A.A. Efros, E. Shechtman, O. Wang, The unreasonable effectiveness of deep features as a perceptual metric, in: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 586\u2013595.","DOI":"10.1109\/CVPR.2018.00068"}],"container-title":["Knowledge-Based Systems"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S095070512400532X?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S095070512400532X?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2024,11,18]],"date-time":"2024-11-18T01:06:28Z","timestamp":1731891988000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S095070512400532X"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7]]},"references-count":48,"alternative-id":["S095070512400532X"],"URL":"http:\/\/dx.doi.org\/10.1016\/j.knosys.2024.111898","relation":{},"ISSN":["0950-7051"],"issn-type":[{"type":"print","value":"0950-7051"}],"subject":[],"published":{"date-parts":[[2024,7]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"Intrinsic-style distribution matching for arbitrary style transfer","name":"articletitle","label":"Article Title"},{"value":"Knowledge-Based Systems","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.knosys.2024.111898","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2024 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}],"article-number":"111898"}}