iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1007/S11760-023-02953-W
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,3,19]],"date-time":"2024-03-19T20:41:43Z","timestamp":1710880903366},"reference-count":25,"publisher":"Springer Science and Business Media LLC","issue":"3","license":[{"start":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T00:00:00Z","timestamp":1705536000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"},{"start":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T00:00:00Z","timestamp":1705536000000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springernature.com\/gp\/researchers\/text-and-data-mining"}],"funder":[{"name":"Leading Technology of Jiangsu Basic Research Plan","award":["BK20192003"]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["30919011401, 30922010204"],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":["SIViP"],"published-print":{"date-parts":[[2024,4]]},"DOI":"10.1007\/s11760-023-02953-w","type":"journal-article","created":{"date-parts":[[2024,1,18]],"date-time":"2024-01-18T14:02:00Z","timestamp":1705586520000},"page":"2829-2839","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":0,"title":["Visible-infrared image patch matching based on attention mechanism"],"prefix":"10.1007","volume":"18","author":[{"given":"Wuxin","family":"Li","sequence":"first","affiliation":[]},{"given":"Junqi","family":"Bai","sequence":"additional","affiliation":[]},{"given":"Qian","family":"Chen","sequence":"additional","affiliation":[]},{"given":"Guohua","family":"Gu","sequence":"additional","affiliation":[]},{"given":"Xiubao","family":"Sui","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2024,1,18]]},"reference":[{"key":"2953_CR1","doi-asserted-by":"publisher","first-page":"7127","DOI":"10.1109\/TIP.2021.3101414","volume":"30","author":"D Quan","year":"2021","unstructured":"Quan, D., Wang, S., Li, Y., Yang, B., Huyan, N., Chanussot, J., Hou, B., Jiao, L.: Multi-relation attention network for image patch matching. IEEE Trans. Image Process. 30, 7127\u20137142 (2021)","journal-title":"IEEE Trans. Image Process."},{"key":"2953_CR2","doi-asserted-by":"crossref","unstructured":"Melekhov, I., Kannala, J., Rahtu, E.: Image patch matching using convolutional descriptors with euclidean distance. In: Asian Conference on Computer Vision (ACCV), pp. 638\u2013653 (2016)","DOI":"10.1007\/978-3-319-54526-4_46"},{"key":"2953_CR3","doi-asserted-by":"publisher","first-page":"54","DOI":"10.1016\/j.patrec.2019.01.005","volume":"120","author":"MS Hanif","year":"2019","unstructured":"Hanif, M.S.: Patch match networks: Improved two-channel and siamese networks for image patch matching. Pattern Recogn. Lett. 120, 54\u201361 (2019)","journal-title":"Pattern Recogn. Lett."},{"key":"2953_CR4","doi-asserted-by":"publisher","first-page":"148","DOI":"10.1016\/j.isprsjprs.2017.12.012","volume":"145","author":"S Wang","year":"2018","unstructured":"Wang, S., Quan, D., Liang, X., Ning, M., Guo, Y., Jiao, L.: A deep learning framework for remote sensing image registration. ISPRS J. Photogramm. Remote. Sens. 145, 148\u2013164 (2018)","journal-title":"ISPRS J. Photogramm. Remote. Sens."},{"issue":"5","key":"2953_CR5","doi-asserted-by":"publisher","first-page":"2941","DOI":"10.1109\/TGRS.2017.2656380","volume":"55","author":"Y Ye","year":"2017","unstructured":"Ye, Y., Shan, J., Bruzzone, L., Shen, L.: Robust registration of multimodal remote sensing images based on structural similarity. IEEE Trans. Geosci. Remote Sens. 55(5), 2941\u20132958 (2017)","journal-title":"IEEE Trans. Geosci. Remote Sens."},{"issue":"10","key":"2953_CR6","doi-asserted-by":"publisher","first-page":"1850","DOI":"10.1109\/LGRS.2017.2738632","volume":"14","author":"CFG Nunes","year":"2017","unstructured":"Nunes, C.F.G., P\u00e1dua, F.L.C.: A local feature descriptor based on log-gabor filters for keypoint matching in multispectral images. IEEE Geosci. Remote Sens. Lett. 14(10), 1850\u20131854 (2017)","journal-title":"IEEE Geosci. Remote Sens. Lett."},{"issue":"23","key":"2953_CR7","doi-asserted-by":"publisher","first-page":"2836","DOI":"10.3390\/rs11232836","volume":"11","author":"R Zhu","year":"2019","unstructured":"Zhu, R., Dawen, Yu., Ji, S., Meng, L.: Matching RGB and infrared remote sensing images with densely-connected convolutional neural networks. Remote Sens. 11(23), 2836 (2019)","journal-title":"Remote Sens."},{"issue":"4","key":"2953_CR8","doi-asserted-by":"publisher","first-page":"2188","DOI":"10.1007\/s10489-020-01996-7","volume":"51","author":"Y Mao","year":"2021","unstructured":"Mao, Y., He, Z.: Dual-y network: infrared-visible image patches matching via semi-supervised transfer learning. Appl. Intell. 51(4), 2188\u20132197 (2021)","journal-title":"Appl. Intell."},{"key":"2953_CR9","doi-asserted-by":"crossref","unstructured":"Hu, J., Shen, L., Sun, G.: Squeeze-and-excitation networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 7132\u20137141 (2018)","DOI":"10.1109\/CVPR.2018.00745"},{"issue":"1","key":"2953_CR10","doi-asserted-by":"publisher","first-page":"85","DOI":"10.1007\/s10489-020-01723-2","volume":"51","author":"Z Liu","year":"2021","unstructured":"Liu, Z., Huang, J., Zhu, C., Peng, X., Xinyu, D.: Residual attention network using multi-channel dense connections for image super-resolution. Appl. Intell. 51(1), 85\u201399 (2021)","journal-title":"Appl. Intell."},{"issue":"1","key":"2953_CR11","doi-asserted-by":"publisher","first-page":"652","DOI":"10.1007\/s10489-021-02489-x","volume":"52","author":"F Hao","year":"2022","unstructured":"Hao, F., Zhang, T., Zhao, L., Tang, Y.: Efficient residual attention network for single image super-resolution. Appl. Intell. 52(1), 652\u2013661 (2022)","journal-title":"Appl. Intell."},{"key":"2953_CR12","doi-asserted-by":"crossref","unstructured":"Huang, Z., Wang, X., Huang, L., Huang, C., Wei, Y., Liu, W.: Ccnet: Criss-cross attention for semantic segmentation. In: Proceedings of the IEEE\/CVF International Conference on Computer Vision, pp. 603\u2013612 (2019)","DOI":"10.1109\/ICCV.2019.00069"},{"key":"2953_CR13","doi-asserted-by":"crossref","unstructured":"Li, W., Chen, Q., Gu, G., Sui, X.: Object matching between visible and infrared images using a siamese network. Appl. Intell. 1\u201313 (2021)","DOI":"10.1007\/s10489-021-02841-1"},{"key":"2953_CR14","doi-asserted-by":"crossref","unstructured":"Woo, S., Park, J., Lee, J.-Y., Kweon, I.S.: Cbam: Convolutional block attention module. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 3\u201319 (2018)","DOI":"10.1007\/978-3-030-01234-2_1"},{"key":"2953_CR15","doi-asserted-by":"crossref","unstructured":"Lin, T.-Y., Goyal, P., Girshick, R., He, K., Doll\u00e1r, P.: Focal loss for dense object detection. In: Proceedings of the IEEE International Conference on Computer Vision, pp. 2980\u20132988 (2017)","DOI":"10.1109\/ICCV.2017.324"},{"key":"2953_CR16","doi-asserted-by":"publisher","DOI":"10.1016\/j.patcog.2019.106977","volume":"96","author":"C Li","year":"2019","unstructured":"Li, C., Liang, X., Yijuan, L., Zhao, N., Tang, J.: RGB-T object tracking: benchmark and baseline. Pattern Recogn. 96, 106977 (2019)","journal-title":"Pattern Recogn."},{"key":"2953_CR17","doi-asserted-by":"crossref","unstructured":"Bertinetto, L., Valmadre, J., Henriques, J.F., Vedaldi, A., Torr, P.H.S.: Fully-convolutional siamese networks for object tracking. In: European Conference on Computer Vision, pp. 850\u2013865. Springer, Berlin (2016)","DOI":"10.1007\/978-3-319-48881-3_56"},{"key":"2953_CR18","unstructured":"Kristan, M., Leonardis, A., Matas, J., Felsberg, M., Pflugfelder, R., \u010cehovin\u00a0Zajc, L., Vojir, T., Bhat, G., Lukezic, A., Eldesokey, A., et\u00a0al.: The sixth visual object tracking vot2018 challenge results. In: 5th European Conference on Computer Vision (ECCV), vol. 11129, pp. 3\u201353 (2018)"},{"key":"2953_CR19","doi-asserted-by":"crossref","unstructured":"Li, Y., Zhu, J., Hoi, S.C.H., Song, W., Wang, Z., Liu, H.: Robust estimation of similarity transformation for visual object tracking. In: Proceedings of the AAAI Conference on Artificial Intelligence, vol.\u00a033, pp. 8666\u20138673 (2019)","DOI":"10.1609\/aaai.v33i01.33018666"},{"key":"2953_CR20","doi-asserted-by":"crossref","unstructured":"Bertinetto, L., Valmadre, J., Golodetz, S., Miksik, O., Torr, P.H.S.: Staple: complementary learners for real-time tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 1401\u20131409 (2016)","DOI":"10.1109\/CVPR.2016.156"},{"key":"2953_CR21","doi-asserted-by":"crossref","unstructured":"Zhu, Z., Wang, Q., Li, B., Wu, W., Yan, J., Hu, W.: Distractor-aware siamese networks for visual object tracking. In: Proceedings of the European Conference on Computer Vision (ECCV), pp. 101\u2013117 (2018)","DOI":"10.1007\/978-3-030-01240-3_7"},{"key":"2953_CR22","doi-asserted-by":"crossref","unstructured":"Danelljan, M., Bhat, G., Shahbaz\u00a0Khan, F., Felsberg, M.: Eco: efficient convolution operators for tracking. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 6638\u20136646 (2017)","DOI":"10.1109\/CVPR.2017.733"},{"issue":"3","key":"2953_CR23","doi-asserted-by":"publisher","first-page":"583","DOI":"10.1109\/TPAMI.2014.2345390","volume":"37","author":"JF Henriques","year":"2014","unstructured":"Henriques, J.F., Caseiro, R., Martins, P., Batista, J.: High-speed tracking with kernelized correlation filters. IEEE Trans. Pattern Anal. Mach. Intell. 37(3), 583\u2013596 (2014)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"2953_CR24","doi-asserted-by":"crossref","unstructured":"Guo, D., Wang, J., Cui, Y., Wang, Z., Chen, S.: Siamcar: Siamese fully convolutional classification and regression for visual tracking. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6269\u20136277 (2020)","DOI":"10.1109\/CVPR42600.2020.00630"},{"key":"2953_CR25","doi-asserted-by":"crossref","unstructured":"Chen, Z., Zhong, B., Li, G., Zhang, S., Ji, R.: Siamese box adaptive network for visual tracking. In: Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, pp. 6668\u20136677 (2020)","DOI":"10.1109\/CVPR42600.2020.00670"}],"container-title":["Signal, Image and Video Processing"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-023-02953-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/article\/10.1007\/s11760-023-02953-w\/fulltext.html","content-type":"text\/html","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/s11760-023-02953-w.pdf","content-type":"application\/pdf","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,3,19]],"date-time":"2024-03-19T20:05:28Z","timestamp":1710878728000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/s11760-023-02953-w"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,1,18]]},"references-count":25,"journal-issue":{"issue":"3","published-print":{"date-parts":[[2024,4]]}},"alternative-id":["2953"],"URL":"https:\/\/doi.org\/10.1007\/s11760-023-02953-w","relation":{},"ISSN":["1863-1703","1863-1711"],"issn-type":[{"value":"1863-1703","type":"print"},{"value":"1863-1711","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,1,18]]},"assertion":[{"value":"10 October 2023","order":1,"name":"received","label":"Received","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"24 November 2023","order":2,"name":"revised","label":"Revised","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"10 December 2023","order":3,"name":"accepted","label":"Accepted","group":{"name":"ArticleHistory","label":"Article History"}},{"value":"18 January 2024","order":4,"name":"first_online","label":"First Online","group":{"name":"ArticleHistory","label":"Article History"}},{"order":1,"name":"Ethics","group":{"name":"EthicsHeading","label":"Declarations"}},{"value":"The authors declare that they have no competing interests as defined by Springer, or other interests that might be perceived to influence the results and\/or discussion reported in this paper.","order":2,"name":"Ethics","group":{"name":"EthicsHeading","label":"Conflict of interest"}},{"value":"This paper does not contain any studies with human participants or animals performed by any of the authors.","order":3,"name":"Ethics","group":{"name":"EthicsHeading","label":"Ethical and informed consent for data used"}}]}}