iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1016/J.INS.2023.03.058
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,6]],"date-time":"2024-09-06T08:46:25Z","timestamp":1725612385687},"reference-count":43,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-017"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-012"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2023,7,1]],"date-time":"2023-07-01T00:00:00Z","timestamp":1688169600000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-004"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61803072","61873044","62062034","62272081"],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100005047","name":"Natural Science Foundation of Liaoning Province","doi-asserted-by":"publisher","award":["2021-MS-111"],"id":[{"id":"10.13039\/501100005047","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012226","name":"Fundamental Research Funds for the Central Universities","doi-asserted-by":"publisher","award":["DUT22YG128"],"id":[{"id":"10.13039\/501100012226","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Information Sciences"],"published-print":{"date-parts":[[2023,7]]},"DOI":"10.1016\/j.ins.2023.03.058","type":"journal-article","created":{"date-parts":[[2023,3,15]],"date-time":"2023-03-15T07:15:55Z","timestamp":1678864555000},"page":"264-279","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":15,"special_numbering":"C","title":["A novel two-level interactive action recognition model based on inertial data fusion"],"prefix":"10.1016","volume":"633","author":[{"given":"Sen","family":"Qiu","sequence":"first","affiliation":[]},{"ORCID":"http:\/\/orcid.org\/0000-0003-1750-3221","authenticated-orcid":false,"given":"Tianqi","family":"Fan","sequence":"additional","affiliation":[]},{"given":"Junhan","family":"Jiang","sequence":"additional","affiliation":[]},{"given":"Zhelong","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Yongzhen","family":"Wang","sequence":"additional","affiliation":[]},{"given":"Junnan","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Tao","family":"Sun","sequence":"additional","affiliation":[]},{"given":"Nan","family":"Jiang","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"key":"10.1016\/j.ins.2023.03.058_br0010","doi-asserted-by":"crossref","first-page":"305","DOI":"10.1016\/j.ins.2014.12.017","article-title":"Extensive assessment and evaluation methodologies on assistive social robots for modelling human\u2013robot interaction \u2013 a review","volume":"301","author":"Sim","year":"2015","journal-title":"Inf. Sci."},{"issue":"1","key":"10.1016\/j.ins.2023.03.058_br0020","doi-asserted-by":"crossref","first-page":"32","DOI":"10.3390\/technologies6010032","article-title":"Socially assistive robotics: robot exercise trainer for older adults","volume":"6","author":"Lotfi","year":"2018","journal-title":"Technologies"},{"issue":"3","key":"10.1016\/j.ins.2023.03.058_br0030","doi-asserted-by":"crossref","first-page":"657","DOI":"10.1007\/s10514-016-9598-5","article-title":"An autonomous robotic exercise tutor for elderly people","volume":"41","author":"G\u00f6rer","year":"2017","journal-title":"Auton. Robots"},{"issue":"4\u20135","key":"10.1016\/j.ins.2023.03.058_br0040","doi-asserted-by":"crossref","first-page":"691","DOI":"10.1177\/0278364921990671","article-title":"AIR-Act2Act: human\u2013human interaction dataset for teaching non-verbal social behaviors to robots","volume":"40","author":"Ko","year":"2021","journal-title":"Int. J. Robot. Res."},{"issue":"4","key":"10.1016\/j.ins.2023.03.058_br0050","doi-asserted-by":"crossref","first-page":"988","DOI":"10.1109\/TRO.2016.2588880","article-title":"Data-driven HRI: learning social behaviors by example from human-human interaction","volume":"32","author":"Liu","year":"2016","journal-title":"IEEE Trans. Robot."},{"key":"10.1016\/j.ins.2023.03.058_br0060","doi-asserted-by":"crossref","DOI":"10.1016\/j.asoc.2021.107728","article-title":"DanHAR: dual attention network for multimodal human activity recognition using wearable sensors","volume":"111","author":"Gao","year":"2021","journal-title":"Appl. Soft Comput."},{"key":"10.1016\/j.ins.2023.03.058_br0070","doi-asserted-by":"crossref","first-page":"165","DOI":"10.1016\/j.comcom.2020.01.012","article-title":"Improved 1D-CNNs for behavior recognition using wearable sensor network","volume":"151","author":"Xu","year":"2020","journal-title":"Comput. Commun."},{"key":"10.1016\/j.ins.2023.03.058_br0080","doi-asserted-by":"crossref","DOI":"10.1016\/j.patcog.2020.107561","article-title":"Sensor-based and vision-based human activity recognition: a comprehensive survey","volume":"108","author":"Minh Dang","year":"2020","journal-title":"Pattern Recognit."},{"issue":"6","key":"10.1016\/j.ins.2023.03.058_br0090","doi-asserted-by":"crossref","DOI":"10.1007\/s10916-018-0948-z","article-title":"Human activity recognition from body sensor data using deep learning","volume":"42","author":"Hassan","year":"2018","journal-title":"J. Med. Syst."},{"key":"10.1016\/j.ins.2023.03.058_br0100","series-title":"Proceedings of the IEEE International Conference on Computer Vision 2017","first-page":"2166","article-title":"Lattice long short-term memory for human action recognition","author":"Sun","year":"2017"},{"issue":"24","key":"10.1016\/j.ins.2023.03.058_br0110","doi-asserted-by":"crossref","first-page":"32275","DOI":"10.1007\/s11042-018-6260-6","article-title":"Time-varying LSTM networks for action recognition","volume":"77","author":"Ma","year":"2018","journal-title":"Multimed. Tools Appl."},{"key":"10.1016\/j.ins.2023.03.058_br0120","article-title":"Deformable convolutional networks for multimodal human activity recognition using wearable sensors","volume":"71","author":"Xu","year":"2022","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.ins.2023.03.058_br0130","doi-asserted-by":"crossref","DOI":"10.1109\/TIM.2021.3091990","article-title":"Shallow convolutional neural networks for human activity recognition using wearable sensors","volume":"70","author":"Huang","year":"2021","journal-title":"IEEE Trans. Instrum. Meas."},{"key":"10.1016\/j.ins.2023.03.058_br0140","doi-asserted-by":"crossref","DOI":"10.1016\/j.engappai.2020.103679","article-title":"Improving physical activity recognition using a new deep learning architecture and post-processing techniques","volume":"92","author":"Gil-Mart\u00edn","year":"2020","journal-title":"Eng. Appl. Artif. Intell."},{"issue":"5","key":"10.1016\/j.ins.2023.03.058_br0150","doi-asserted-by":"crossref","first-page":"8553","DOI":"10.1109\/JIOT.2019.2920283","article-title":"IoT wearable sensor and deep learning: an integrated approach for personalized human activity recognition in a smart home environment","volume":"6","author":"Bianchi","year":"2019","journal-title":"IEEE Int. Things J."},{"key":"10.1016\/j.ins.2023.03.058_br0160","author":"Szegedy"},{"key":"10.1016\/j.ins.2023.03.058_br0170","first-page":"1","article-title":"A multichannel CNN-GRU model for human activity recognition","volume":"10","author":"Lu","year":"2022","journal-title":"IEEE Access"},{"issue":"6","key":"10.1016\/j.ins.2023.03.058_br0180","doi-asserted-by":"crossref","first-page":"6164","DOI":"10.1109\/JSEN.2022.3148431","article-title":"A novel deep learning bi-GRU-I model for real-time human activity recognition using inertial sensors","volume":"22","author":"Tong","year":"2022","journal-title":"IEEE Sens. J."},{"key":"10.1016\/j.ins.2023.03.058_br0190","doi-asserted-by":"crossref","first-page":"195","DOI":"10.1016\/j.neucom.2018.09.060","article-title":"A two-level attention-based interaction model for multi-person activity recognition","volume":"322","author":"Lu","year":"2018","journal-title":"Neurocomputing"},{"key":"10.1016\/j.ins.2023.03.058_br0200","doi-asserted-by":"crossref","DOI":"10.1016\/j.cviu.2019.102898","article-title":"Cascade multi-head attention networks for action recognition","volume":"192","author":"Wang","year":"2020","journal-title":"Comput. Vis. Image Underst."},{"issue":"10","key":"10.1016\/j.ins.2023.03.058_br0210","doi-asserted-by":"crossref","first-page":"4291","DOI":"10.1109\/TNNLS.2020.3019893","article-title":"Attention in natural language processing","volume":"32","author":"Galassi","year":"2021","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.ins.2023.03.058_br0220","doi-asserted-by":"crossref","first-page":"820","DOI":"10.1016\/j.future.2021.06.045","article-title":"Human action recognition using attention based LSTM network with dilated CNN features","volume":"125","author":"Muhammad Mustaqeem","year":"2021","journal-title":"Future Gener. Comput. Syst."},{"issue":"2","key":"10.1016\/j.ins.2023.03.058_br0230","doi-asserted-by":"crossref","first-page":"663","DOI":"10.1109\/TNNLS.2020.2978942","article-title":"Host-parasite: graph LSTM-in-LSTM for group activity recognition","volume":"32","author":"Shu","year":"2021","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"issue":"2","key":"10.1016\/j.ins.2023.03.058_br0240","doi-asserted-by":"crossref","first-page":"549","DOI":"10.1109\/TCSVT.2019.2894161","article-title":"StagNet: an attentive semantic RNN for group activity and individual action recognition","volume":"30","author":"Qi","year":"2020","journal-title":"IEEE Trans. Circuits Syst. Video Technol."},{"key":"10.1016\/j.ins.2023.03.058_br0250","doi-asserted-by":"crossref","first-page":"317","DOI":"10.1016\/j.ins.2016.06.016","article-title":"Interactive activity recognition using pose-based spatio-temporal relation features and four-level pachinko allocation model","volume":"369","author":"Thien","year":"2016","journal-title":"Inf. Sci."},{"issue":"13","key":"10.1016\/j.ins.2023.03.058_br0260","doi-asserted-by":"crossref","first-page":"17595","DOI":"10.1007\/s11042-022-11987-0","article-title":"A two-step shapelets based framework for interactional activities recognition","volume":"81","author":"Yang","year":"2022","journal-title":"Multimed. Tools Appl."},{"issue":"8","key":"10.1016\/j.ins.2023.03.058_br0270","doi-asserted-by":"crossref","DOI":"10.3390\/s20082346","article-title":"Human interaction recognition based on whole-individual detection","volume":"20","author":"Ye","year":"2020","journal-title":"Sensors (Switzerland)"},{"key":"10.1016\/j.ins.2023.03.058_br0280","doi-asserted-by":"crossref","first-page":"287","DOI":"10.1016\/j.ins.2018.12.050","article-title":"Action recognition for depth video using multi-view dynamic images","volume":"480","author":"Xiao","year":"2019","journal-title":"Inf. Sci."},{"key":"10.1016\/j.ins.2023.03.058_br0290","doi-asserted-by":"crossref","first-page":"275","DOI":"10.1016\/j.ins.2020.01.002","article-title":"Human behavior recognition from multiview videos","volume":"517","author":"Hsueh","year":"2020","journal-title":"Inf. Sci."},{"issue":"C","key":"10.1016\/j.ins.2023.03.058_br0300","doi-asserted-by":"crossref","first-page":"241","DOI":"10.1016\/j.inffus.2021.11.006","article-title":"Multi-sensor information fusion based on machine learning for real applications in human activity recognition: state-of-the-art and research challenges","volume":"80","author":"Qiu","year":"2022","journal-title":"Inf. Fusion"},{"issue":"2","key":"10.1016\/j.ins.2023.03.058_br0310","doi-asserted-by":"crossref","first-page":"393","DOI":"10.1007\/s11263-019-01248-3","article-title":"Semantic image networks for human action recognition","volume":"128","author":"Khowaja","year":"2020","journal-title":"Int. J. Comput. Vis."},{"issue":"4","key":"10.1016\/j.ins.2023.03.058_br0320","doi-asserted-by":"crossref","first-page":"510","DOI":"10.1109\/LSP.2016.2611485","article-title":"Beyond frame-level CNN: saliency-aware 3-D CNN with LSTM for video action recognition","volume":"24","author":"Wang","year":"2017","journal-title":"IEEE Signal Process. Lett."},{"issue":"4","key":"10.1016\/j.ins.2023.03.058_br0330","doi-asserted-by":"crossref","first-page":"1289","DOI":"10.1016\/j.patcog.2014.10.012","article-title":"Similar gait action recognition using an inertial sensor","volume":"48","author":"Ngo","year":"2015","journal-title":"Pattern Recognit."},{"key":"10.1016\/j.ins.2023.03.058_br0340","doi-asserted-by":"crossref","first-page":"108","DOI":"10.1016\/j.neucom.2015.08.096","article-title":"From action to activity: sensor-based activity recognition","volume":"181","author":"Liu","year":"2016","journal-title":"Neurocomputing"},{"issue":"2","key":"10.1016\/j.ins.2023.03.058_br0350","doi-asserted-by":"crossref","first-page":"1646","DOI":"10.1002\/int.22689","article-title":"Sensor network oriented human motion capture via wearable intelligent system","volume":"37","author":"Qiu","year":"2022","journal-title":"Int. J. Intell. Syst."},{"key":"10.1016\/j.ins.2023.03.058_br0360","first-page":"1","article-title":"Inception-LSTM human motion recognition with channel attention mechanism","volume":"2022","author":"Xu","year":"2022","journal-title":"Comput. Math. Methods Med."},{"key":"10.1016\/j.ins.2023.03.058_br0370","doi-asserted-by":"crossref","first-page":"864","DOI":"10.1016\/j.ins.2022.05.092","article-title":"Spatial-temporal interaction learning based two-stream network for action recognition","volume":"606","author":"Liu","year":"2022","journal-title":"Inf. Sci."},{"issue":"8","key":"10.1016\/j.ins.2023.03.058_br0380","doi-asserted-by":"crossref","first-page":"3703","DOI":"10.1109\/TIP.2019.2901707","article-title":"Deep attention network for egocentric action recognition","volume":"28","author":"Lu","year":"2019","journal-title":"IEEE Trans. Image Process."},{"key":"10.1016\/j.ins.2023.03.058_br0390","doi-asserted-by":"crossref","first-page":"338","DOI":"10.1016\/j.neucom.2021.06.088","article-title":"Nesting spatiotemporal attention networks for action recognition","volume":"459","author":"Li","year":"2021","journal-title":"Neurocomputing"},{"issue":"23","key":"10.1016\/j.ins.2023.03.058_br0400","doi-asserted-by":"crossref","first-page":"16439","DOI":"10.1007\/s00521-021-06239-5","article-title":"Local-aware spatio-temporal attention network with multi-stage feature fusion for human action recognition","volume":"33","author":"Hou","year":"2021","journal-title":"Neural Comput. Appl."},{"key":"10.1016\/j.ins.2023.03.058_br0410","doi-asserted-by":"crossref","first-page":"65689","DOI":"10.1109\/ACCESS.2020.2979742","article-title":"Group activity recognition by using effective multiple modality relation representation with temporal-spatial attention","volume":"8","author":"Xu","year":"2020","journal-title":"IEEE Access"},{"issue":"12","key":"10.1016\/j.ins.2023.03.058_br0420","doi-asserted-by":"crossref","first-page":"7574","DOI":"10.1109\/TNNLS.2021.3085567","article-title":"Position-aware participation-contributed temporal dynamic model for group activity recognition","volume":"33","author":"Yan","year":"2022","journal-title":"IEEE Trans. Neural Netw. Learn. Syst."},{"key":"10.1016\/j.ins.2023.03.058_br0430","author":"Vaswani"}],"container-title":["Information Sciences"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0020025523003535?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S0020025523003535?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2024,1,13]],"date-time":"2024-01-13T14:13:36Z","timestamp":1705155216000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S0020025523003535"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2023,7]]},"references-count":43,"alternative-id":["S0020025523003535"],"URL":"http:\/\/dx.doi.org\/10.1016\/j.ins.2023.03.058","relation":{},"ISSN":["0020-0255"],"issn-type":[{"value":"0020-0255","type":"print"}],"subject":[],"published":{"date-parts":[[2023,7]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"A novel two-level interactive action recognition model based on inertial data fusion","name":"articletitle","label":"Article Title"},{"value":"Information Sciences","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.ins.2023.03.058","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2023 Published by Elsevier Inc.","name":"copyright","label":"Copyright"}]}}