iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1007/978-3-031-20077-9_9
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,8]],"date-time":"2024-10-08T04:12:02Z","timestamp":1728360722833},"publisher-location":"Cham","reference-count":60,"publisher":"Springer Nature Switzerland","isbn-type":[{"type":"print","value":"9783031200762"},{"type":"electronic","value":"9783031200779"}],"license":[{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"},{"start":{"date-parts":[[2022,1,1]],"date-time":"2022-01-01T00:00:00Z","timestamp":1640995200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/www.springer.com\/tdm"}],"content-domain":{"domain":["link.springer.com"],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022]]},"DOI":"10.1007\/978-3-031-20077-9_9","type":"book-chapter","created":{"date-parts":[[2022,11,5]],"date-time":"2022-11-05T16:21:52Z","timestamp":1667665312000},"page":"139-158","update-policy":"http:\/\/dx.doi.org\/10.1007\/springer_crossmark_policy","source":"Crossref","is-referenced-by-count":53,"title":["Multimodal Object Detection via\u00a0Probabilistic Ensembling"],"prefix":"10.1007","author":[{"given":"Yi-Ting","family":"Chen","sequence":"first","affiliation":[]},{"given":"Jinghao","family":"Shi","sequence":"additional","affiliation":[]},{"given":"Zelin","family":"Ye","sequence":"additional","affiliation":[]},{"given":"Christoph","family":"Mertz","sequence":"additional","affiliation":[]},{"given":"Deva","family":"Ramanan","sequence":"additional","affiliation":[]},{"given":"Shu","family":"Kong","sequence":"additional","affiliation":[]}],"member":"297","published-online":{"date-parts":[[2022,11,6]]},"reference":[{"key":"9_CR1","unstructured":"Akiba, T., Kerola, T., Niitani, Y., Ogawa, T., Sano, S., Suzuki, S.: PFDet: 2nd place solution to open images challenge 2018 object detection track. arXiv:1809.00778 (2018)"},{"key":"9_CR2","doi-asserted-by":"crossref","unstructured":"Albaba, B.M., Ozer, S.: SyNet: an ensemble network for object detection in UAV images. In: 2020 25th International Conference on Pattern Recognition (ICPR). pp. 10227\u201310234. IEEE (2021)","DOI":"10.1109\/ICPR48806.2021.9412847"},{"issue":"1","key":"9_CR3","doi-asserted-by":"publisher","first-page":"105","DOI":"10.1023\/A:1007515423169","volume":"36","author":"E Bauer","year":"1999","unstructured":"Bauer, E., Kohavi, R.: An empirical comparison of voting classification algorithms: Bagging, boosting, and variants. Mach. Learn. 36(1), 105\u2013139 (1999)","journal-title":"Mach. Learn."},{"key":"9_CR4","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"546","DOI":"10.1007\/978-3-030-58542-6_33","volume-title":"Computer Vision \u2013 ECCV 2020","author":"M Kieu","year":"2020","unstructured":"Kieu, M., Bagdanov, A.D., Bertini, M., del Bimbo, A.: Task-conditioned domain adaptation for pedestrian detection in thermal imagery. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12367, pp. 546\u2013562. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58542-6_33"},{"key":"9_CR5","doi-asserted-by":"crossref","unstructured":"Bodla, N., Singh, B., Chellappa, R., Davis, L.S.: Soft-NMS-improving object detection with one line of code. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.593"},{"key":"9_CR6","doi-asserted-by":"crossref","unstructured":"Bolya, D., Zhou, C., Xiao, F., Lee, Y.J.: YOLACT: real-time instance segmentation. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00925"},{"key":"9_CR7","doi-asserted-by":"crossref","unstructured":"Caesar, H., et al.: nuScenes a multimodal dataset for autonomous driving. In: CVPR (2020)","DOI":"10.1109\/CVPR42600.2020.01164"},{"key":"9_CR8","doi-asserted-by":"crossref","unstructured":"Cao, Y., Zhou, T., Zhu, X., Su, Y.: Every feature counts: an improved one-stage detector in thermal imagery. In: IEEE International Conference on Computer and Communications (ICCC) (2019)","DOI":"10.1109\/ICCC47050.2019.9064036"},{"key":"9_CR9","unstructured":"Choi, H., Kim, S., Park, K., Sohn, K.: Multi-spectral pedestrian detection based on accumulated object proposal with fully convolutional networks. In: International Conference on Pattern Recognition (ICPR) (2016)"},{"key":"9_CR10","unstructured":"Dalal, N., Triggs, B.: Histograms of oriented gradients for human detection. In: CVPR (2005)"},{"issue":"1","key":"9_CR11","doi-asserted-by":"crossref","first-page":"1","DOI":"10.1111\/j.2517-6161.1979.tb01052.x","volume":"41","author":"AP Dawid","year":"1979","unstructured":"Dawid, A.P.: Conditional independence in statistical theory. J. Roy. Stat. Soc.: Ser. B (Methodol.) 41(1), 1\u201315 (1979)","journal-title":"J. Roy. Stat. Soc.: Ser. B (Methodol.)"},{"key":"9_CR12","doi-asserted-by":"crossref","unstructured":"Devaguptapu, C., Akolekar, N., M Sharma, M., N Balasubramanian, V.: Borrow from anywhere: pseudo multi-modal object detection in thermal imagery. In: CVPR Workshops (2019)","DOI":"10.1109\/CVPRW.2019.00135"},{"key":"9_CR13","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"1","DOI":"10.1007\/3-540-45014-9_1","volume-title":"Multiple Classifier Systems","author":"TG Dietterich","year":"2000","unstructured":"Dietterich, T.G.: Ensemble methods in machine learning. In: Kittler, J., Roli, F. (eds.) MCS 2000. LNCS, vol. 1857, pp. 1\u201315. Springer, Heidelberg (2000). https:\/\/doi.org\/10.1007\/3-540-45014-9_1"},{"key":"9_CR14","doi-asserted-by":"crossref","unstructured":"Doll\u00e1r, P., Wojek, C., Schiele, B., Perona, P.: Pedestrian detection: A benchmark. In: CVPR (2009)","DOI":"10.1109\/CVPR.2009.5206631"},{"issue":"4","key":"9_CR15","doi-asserted-by":"publisher","first-page":"743","DOI":"10.1109\/TPAMI.2011.155","volume":"34","author":"P Dollar","year":"2011","unstructured":"Dollar, P., Wojek, C., Schiele, B., Perona, P.: Pedestrian detection: An evaluation of the state of the art. IEEE Trans. Pattern Anal. Mach. Intell. 34(4), 743\u2013761 (2011)","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"issue":"1","key":"9_CR16","doi-asserted-by":"publisher","first-page":"98","DOI":"10.1007\/s11263-014-0733-5","volume":"111","author":"M Everingham","year":"2015","unstructured":"Everingham, M., Eslami, S.A., Van Gool, L., Williams, C.K., Winn, J., Zisserman, A.: The pascal visual object classes challenge: a retrospective. Int. J. Comput. Vision 111(1), 98\u2013136 (2015)","journal-title":"Int. J. Comput. Vision"},{"key":"9_CR17","unstructured":"FLIR: Flir thermal dataset for algorithm training (2018). https:\/\/www.flir.in\/oem\/adas\/adas-dataset-form"},{"key":"9_CR18","unstructured":"Freund, Y., et al.: Experiments with a new boosting algorithm. In: ICML, vol. 96, pp. 148\u2013156. Citeseer (1996)"},{"key":"9_CR19","doi-asserted-by":"crossref","unstructured":"Geiger, A., Lenz, P., Urtasun, R.: Are we ready for autonomous driving? The KITTI vision benchmark suite. In: Conference on Computer Vision and Pattern Recognition (CVPR) (2012)","DOI":"10.1109\/CVPR.2012.6248074"},{"key":"9_CR20","doi-asserted-by":"publisher","first-page":"148","DOI":"10.1016\/j.inffus.2018.11.017","volume":"50","author":"D Guan","year":"2019","unstructured":"Guan, D., Cao, Y., Yang, J., Cao, Y., Yang, M.Y.: Fusion of multispectral data through illumination-aware deep neural networks for pedestrian detection. Inf. Fusion 50, 148\u2013157 (2019)","journal-title":"Inf. Fusion"},{"key":"9_CR21","unstructured":"Guo, C., Pleiss, G., Sun, Y., Weinberger, K.Q.: On calibration of modern neural networks. arXiv:1706.04599 (2017)"},{"key":"9_CR22","unstructured":"Guo, R., et al.: 2nd place solution in google ai open images object detection track 2019. arXiv:1911.07171 (2019)"},{"key":"9_CR23","doi-asserted-by":"crossref","unstructured":"He, K., Gkioxari, G., Doll\u00e1r, P., Girshick, R.: Mask R-CNN. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.322"},{"key":"9_CR24","doi-asserted-by":"crossref","unstructured":"Hosang, J., Benenson, R., Schiele, B.: Learning non-maximum suppression. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pp. 4507\u20134515 (2017)","DOI":"10.1109\/CVPR.2017.685"},{"key":"9_CR25","unstructured":"Huang, Z., Chen, Z., Li, Q., Zhang, H., Wang, N.: 1st place solutions of waymo open dataset challenge 2020\u20132D object detection track. arXiv:2008.01365 (2020)"},{"key":"9_CR26","doi-asserted-by":"crossref","unstructured":"Hwang, S., Park, J., Kim, N., Choi, Y., So Kweon, I.: Multispectral pedestrian detection: Benchmark dataset and baseline. In: CVPR (2015)","DOI":"10.1109\/CVPR.2015.7298706"},{"key":"9_CR27","doi-asserted-by":"crossref","unstructured":"Kiew, M.Y., Bagdanov, A.D., Bertini, M.: Bottom-up and layer-wise domain adaptation for pedestrian detection in thermal images. ACM Transactions on Multimedia Computing Communications and Applications (2020)","DOI":"10.1145\/3418213"},{"issue":"4","key":"9_CR28","doi-asserted-by":"publisher","first-page":"7846","DOI":"10.1109\/LRA.2021.3099870","volume":"6","author":"J Kim","year":"2021","unstructured":"Kim, J., Kim, H., Kim, T., Kim, N., Choi, Y.: MLPD: multi-label pedestrian detector in multispectral domain. IEEE Rob. Auto. Lett. 6(4), 7846\u20137853 (2021)","journal-title":"IEEE Rob. Auto. Lett."},{"key":"9_CR29","doi-asserted-by":"crossref","unstructured":"Kittler, J., Hatef, M., Duin, R.P., Matas, J.: On combining classifiers. IEEE Trans. Pattern Anal. Mach. Intell. 20(3), 226\u2013239 (1998)","DOI":"10.1109\/34.667881"},{"key":"9_CR30","doi-asserted-by":"crossref","unstructured":"Konig, D., Adam, M., Jarvers, C., Layher, G., Neumann, H., Teutsch, M.: Fully convolutional region proposal networks for multispectral person detection. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 49\u201356 (2017)","DOI":"10.1109\/CVPRW.2017.36"},{"key":"9_CR31","first-page":"1097","volume":"25","author":"A Krizhevsky","year":"2012","unstructured":"Krizhevsky, A., Sutskever, I., Hinton, G.E.: ImageNet classification with deep convolutional neural networks. Adv. Neural. Inf. Process. Syst. 25, 1097\u20131105 (2012)","journal-title":"Adv. Neural. Inf. Process. Syst."},{"key":"9_CR32","unstructured":"Li, C., Song, D., Tong, R., Tang, M.: Multispectral pedestrian detection via simultaneous detection and segmentation. arXiv:1808.04818 (2018)"},{"key":"9_CR33","doi-asserted-by":"publisher","first-page":"161","DOI":"10.1016\/j.patcog.2018.08.005","volume":"85","author":"C Li","year":"2019","unstructured":"Li, C., Song, D., Tong, R., Tang, M.: Illumination-aware faster r-CNN for robust multispectral pedestrian detection. Pattern Recogn. 85, 161\u2013171 (2019)","journal-title":"Pattern Recogn."},{"key":"9_CR34","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"740","DOI":"10.1007\/978-3-319-10602-1_48","volume-title":"Computer Vision \u2013 ECCV 2014","author":"TY Lin","year":"2014","unstructured":"Lin, T.Y., et al.: Microsoft COCO: common objects in context. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 740\u2013755. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_48"},{"key":"9_CR35","unstructured":"Liu, J., Zhang, S., Wang, S., Metaxas, D.: Improved annotations of test set of KAIST (2018)"},{"key":"9_CR36","doi-asserted-by":"crossref","unstructured":"Liu, J., Zhang, S., Wang, S., Metaxas, D.N.: Multispectral deep neural networks for pedestrian detection. In: BMVC (2016)","DOI":"10.5244\/C.30.73"},{"key":"9_CR37","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"21","DOI":"10.1007\/978-3-319-46448-0_2","volume-title":"Computer Vision \u2013 ECCV 2016","author":"W Liu","year":"2016","unstructured":"Liu, W., Anguelov, D., Erhan, D., Szegedy, C., Reed, S., Fu, C.-Y., Berg, A.C.: SSD: single shot multibox detector. In: Leibe, B., Matas, J., Sebe, N., Welling, M. (eds.) ECCV 2016. LNCS, vol. 9905, pp. 21\u201337. Springer, Cham (2016). https:\/\/doi.org\/10.1007\/978-3-319-46448-0_2"},{"key":"9_CR38","unstructured":"Munir, F., Azam, S., Rafique, M.A., Sheri, A.M., Jeon, M.: Thermal object detection using domain adaptation through style consistency. arXiv:2006.00821 (2020)"},{"key":"9_CR39","doi-asserted-by":"crossref","unstructured":"Nix, D.A., Weigend, A.S.: Estimating the mean and variance of the target probability distribution. In: Proceedings of 1994 IEEE international conference on neural networks (ICNN 1994), vol. 1, pp. 55\u201360. IEEE (1994)","DOI":"10.1109\/ICNN.1994.374138"},{"key":"9_CR40","unstructured":"Paszke, A., et al.: Automatic differentiation in Pytorch (2017)"},{"key":"9_CR41","unstructured":"Pearl, J.: Probabilistic Reasoning in Intelligent Systems: Networks of Plausible Inference. Elsevier, San Mateo (2014)"},{"key":"9_CR42","unstructured":"Quigley, M., et al.: ROS: an open-source robot operating system. In: ICRA Workshop on Open Source Software, vol. 3, p. 5. Kobe, Japan (2009)"},{"key":"9_CR43","doi-asserted-by":"crossref","unstructured":"Redmon, J., Divvala, S., Girshick, R., Farhadi, A.: You only look once: Unified, real-time object detection. In: CVPR (2016)","DOI":"10.1109\/CVPR.2016.91"},{"key":"9_CR44","doi-asserted-by":"crossref","unstructured":"Redmon, J., Farhadi, A.: Yolo9000: better, faster, stronger. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.690"},{"key":"9_CR45","unstructured":"Ren, S., He, K., Girshick, R., Sun, J.: Faster R-CNN: towards real-time object detection with region proposal networks. In: NeurIPS (2015)"},{"issue":"3","key":"9_CR46","doi-asserted-by":"publisher","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","volume":"115","author":"O Russakovsky","year":"2015","unstructured":"Russakovsky, O., et al.: ImageNet large scale visual recognition challenge. Int. J. Comput. Vis. 115(3), 211\u2013252 (2015)","journal-title":"Int. J. Comput. Vis."},{"key":"9_CR47","doi-asserted-by":"publisher","DOI":"10.1016\/j.imavis.2021.104117","volume":"107","author":"R Solovyev","year":"2021","unstructured":"Solovyev, R., Wang, W., Gabruseva, T.: Weighted boxes fusion: ensembling boxes from different object detection models. Image Vis. Comput. 107, 104117 (2021)","journal-title":"Image Vis. Comput."},{"key":"9_CR48","doi-asserted-by":"crossref","unstructured":"Valverde, F.R., Hurtado, J.V., Valada, A.: There is more than meets the eye: self-supervised multi-object detection and tracking with sound by distilling multimodal knowledge. In: CVPR (2021)","DOI":"10.1109\/CVPR46437.2021.01144"},{"key":"9_CR49","unstructured":"Wagner, J., Fischer, V., Herman, M., Behnke, S.: Multispectral pedestrian detection using deep fusion convolutional neural networks. In: Proceedings of European Symposium on Artificial Neural Networks (2016)"},{"key":"9_CR50","unstructured":"Wu, Y., Kirillov, A., Massa, F., Lo, W.Y., Girshick, R.: Detectron2. https:\/\/github.com\/facebookresearch\/detectron2 (2019)"},{"key":"9_CR51","doi-asserted-by":"crossref","unstructured":"Xu, D., Ouyang, W., Ricci, E., Wang, X., Sebe, N.: Learning cross-modal deep representations for robust pedestrian detection. In: CVPR (2017)","DOI":"10.1109\/CVPR.2017.451"},{"key":"9_CR52","doi-asserted-by":"crossref","unstructured":"Xu, P., Davoine, F., Denoeux, T.: Evidential combination of pedestrian detectors. In: British Machine Vision Conference, pp. 1\u201314 (2014)","DOI":"10.5244\/C.28.2"},{"key":"9_CR53","unstructured":"Zhang, H., Dana, K.: Multi-style generative network for real-time transfer. arXiv:1703.06953 (2017)"},{"key":"9_CR54","doi-asserted-by":"crossref","unstructured":"Zhang, H., Fromont, E., Lef\u00e8vre, S., Avignon, B.: Multispectral fusion for object detection with cyclic fuse-and-refine blocks. In: IEEE International Conference on Image Processing (ICIP) (2020)","DOI":"10.1109\/ICIP40778.2020.9191080"},{"key":"9_CR55","doi-asserted-by":"crossref","unstructured":"Zhang, H., Fromont, E., Lef\u00e8vre, S., Avignon, B.: Guided attentive feature fusion for multispectral pedestrian detection. In: WACV (2021)","DOI":"10.1109\/WACV48630.2021.00012"},{"key":"9_CR56","doi-asserted-by":"publisher","first-page":"20","DOI":"10.1016\/j.inffus.2018.09.015","volume":"50","author":"L Zhang","year":"2019","unstructured":"Zhang, L., et al.: Cross-modality interactive attention network for multispectral pedestrian detection. Inf. Fus. 50, 20\u201329 (2019)","journal-title":"Inf. Fus."},{"key":"9_CR57","doi-asserted-by":"crossref","unstructured":"Zhang, L., Zhu, X., Chen, X., Yang, X., Lei, Z., Liu, Z.: Weakly aligned cross-modal learning for multispectral pedestrian detection. In: ICCV (2019)","DOI":"10.1109\/ICCV.2019.00523"},{"key":"9_CR58","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"787","DOI":"10.1007\/978-3-030-58523-5_46","volume-title":"Computer Vision \u2013 ECCV 2020","author":"K Zhou","year":"2020","unstructured":"Zhou, K., Chen, L., Cao, X.: Improving multispectral pedestrian detection by addressing modality imbalance problems. In: Vedaldi, A., Bischof, H., Brox, T., Frahm, J.-M. (eds.) ECCV 2020. LNCS, vol. 12363, pp. 787\u2013803. Springer, Cham (2020). https:\/\/doi.org\/10.1007\/978-3-030-58523-5_46"},{"key":"9_CR59","doi-asserted-by":"crossref","unstructured":"Zhu, J.Y., Park, T., Isola, P., Efros, A.A.: Unpaired image-to-image translation using cycle-consistent adversarial networks. In: ICCV (2017)","DOI":"10.1109\/ICCV.2017.244"},{"key":"9_CR60","series-title":"Lecture Notes in Computer Science","doi-asserted-by":"publisher","first-page":"391","DOI":"10.1007\/978-3-319-10602-1_26","volume-title":"Computer Vision \u2013 ECCV 2014","author":"CL Zitnick","year":"2014","unstructured":"Zitnick, C.L., Doll\u00e1r, P.: Edge Boxes: locating object proposals from edges. In: Fleet, D., Pajdla, T., Schiele, B., Tuytelaars, T. (eds.) ECCV 2014. LNCS, vol. 8693, pp. 391\u2013405. Springer, Cham (2014). https:\/\/doi.org\/10.1007\/978-3-319-10602-1_26"}],"container-title":["Lecture Notes in Computer Science","Computer Vision \u2013 ECCV 2022"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/link.springer.com\/content\/pdf\/10.1007\/978-3-031-20077-9_9","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,10,7]],"date-time":"2024-10-07T10:07:07Z","timestamp":1728295627000},"score":1,"resource":{"primary":{"URL":"https:\/\/link.springer.com\/10.1007\/978-3-031-20077-9_9"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022]]},"ISBN":["9783031200762","9783031200779"],"references-count":60,"URL":"https:\/\/doi.org\/10.1007\/978-3-031-20077-9_9","relation":{},"ISSN":["0302-9743","1611-3349"],"issn-type":[{"type":"print","value":"0302-9743"},{"type":"electronic","value":"1611-3349"}],"subject":[],"published":{"date-parts":[[2022]]},"assertion":[{"value":"6 November 2022","order":1,"name":"first_online","label":"First Online","group":{"name":"ChapterHistory","label":"Chapter History"}},{"value":"ECCV","order":1,"name":"conference_acronym","label":"Conference Acronym","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"European Conference on Computer Vision","order":2,"name":"conference_name","label":"Conference Name","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Tel Aviv","order":3,"name":"conference_city","label":"Conference City","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Israel","order":4,"name":"conference_country","label":"Conference Country","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"2022","order":5,"name":"conference_year","label":"Conference Year","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"23 October 2022","order":7,"name":"conference_start_date","label":"Conference Start Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"27 October 2022","order":8,"name":"conference_end_date","label":"Conference End Date","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"17","order":9,"name":"conference_number","label":"Conference Number","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"eccv2022","order":10,"name":"conference_id","label":"Conference ID","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"https:\/\/eccv2022.ecva.net\/","order":11,"name":"conference_url","label":"Conference URL","group":{"name":"ConferenceInfo","label":"Conference Information"}},{"value":"Double-blind","order":1,"name":"type","label":"Type","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"CMT","order":2,"name":"conference_management_system","label":"Conference Management System","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"5804","order":3,"name":"number_of_submissions_sent_for_review","label":"Number of Submissions Sent for Review","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"1645","order":4,"name":"number_of_full_papers_accepted","label":"Number of Full Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"0","order":5,"name":"number_of_short_papers_accepted","label":"Number of Short Papers Accepted","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"28% - The value is computed by the equation \"Number of Full Papers Accepted \/ Number of Submissions Sent for Review * 100\" and then rounded to a whole number.","order":6,"name":"acceptance_rate_of_full_papers","label":"Acceptance Rate of Full Papers","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.21","order":7,"name":"average_number_of_reviews_per_paper","label":"Average Number of Reviews per Paper","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"3.91","order":8,"name":"average_number_of_papers_per_reviewer","label":"Average Number of Papers per Reviewer","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}},{"value":"Yes","order":9,"name":"external_reviewers_involved","label":"External Reviewers Involved","group":{"name":"ConfEventPeerReviewInformation","label":"Peer Review Information (provided by the conference organizers)"}}]}}