iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1016/J.COMNET.2019.06.006
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,9,15]],"date-time":"2024-09-15T19:06:00Z","timestamp":1726427160207},"reference-count":20,"publisher":"Elsevier BV","license":[{"start":{"date-parts":[[2019,10,1]],"date-time":"2019-10-01T00:00:00Z","timestamp":1569888000000},"content-version":"tdm","delay-in-days":0,"URL":"https:\/\/www.elsevier.com\/tdm\/userlicense\/1.0\/"}],"funder":[{"DOI":"10.13039\/501100001809","name":"National Natural Science Foundation of China","doi-asserted-by":"publisher","award":["61806067"],"id":[{"id":"10.13039\/501100001809","id-type":"DOI","asserted-by":"publisher"}]},{"DOI":"10.13039\/501100012326","name":"International Science & Technology Cooperation Program of China","doi-asserted-by":"publisher","award":["2015DFI12950"],"id":[{"id":"10.13039\/501100012326","id-type":"DOI","asserted-by":"publisher"}]}],"content-domain":{"domain":["elsevier.com","sciencedirect.com"],"crossmark-restriction":true},"short-container-title":["Computer Networks"],"published-print":{"date-parts":[[2019,10]]},"DOI":"10.1016\/j.comnet.2019.06.006","type":"journal-article","created":{"date-parts":[[2019,6,19]],"date-time":"2019-06-19T03:51:46Z","timestamp":1560916306000},"page":"138-149","update-policy":"http:\/\/dx.doi.org\/10.1016\/elsevier_cm_policy","source":"Crossref","is-referenced-by-count":30,"special_numbering":"C","title":["A Q-learning algorithm for task scheduling based on improved SVM in wireless sensor networks"],"prefix":"10.1016","volume":"161","author":[{"given":"Zhenchun","family":"Wei","sequence":"first","affiliation":[]},{"given":"Fei","family":"Liu","sequence":"additional","affiliation":[]},{"given":"Yan","family":"Zhang","sequence":"additional","affiliation":[]},{"given":"Juan","family":"Xu","sequence":"additional","affiliation":[]},{"given":"Jianjun","family":"Ji","sequence":"additional","affiliation":[]},{"given":"Zengwei","family":"Lyu","sequence":"additional","affiliation":[]}],"member":"78","reference":[{"issue":"2","key":"10.1016\/j.comnet.2019.06.006_bib0001","doi-asserted-by":"crossref","first-page":"714","DOI":"10.1109\/TVT.2014.2322356","article-title":"Maximum lifetime scheduling for target coverage and data collection in wireless sensor networks","volume":"64","author":"Lu","year":"2015","journal-title":"IEEE Trans. Veh. Technol."},{"issue":"10","key":"10.1016\/j.comnet.2019.06.006_bib0002","first-page":"1155","article-title":"Performance analysis of resource-aware task scheduling methods in wireless sensor networks","volume":"2014","author":"Khan","year":"2014","journal-title":"Int. J. Distrib. Sens.Netw."},{"issue":"1","key":"10.1016\/j.comnet.2019.06.006_bib0003","doi-asserted-by":"crossref","first-page":"141","DOI":"10.1016\/j.comnet.2017.06.005","article-title":"A task scheduling algorithm based on q-learning and shared value function for WSNs","volume":"126","author":"Wei","year":"2017","journal-title":"Comput. Netw."},{"issue":"4","key":"10.1016\/j.comnet.2019.06.006_bib0004","doi-asserted-by":"crossref","first-page":"244","DOI":"10.1504\/IJSNET.2017.087899","article-title":"Energy-aware task scheduling by a true online reinforcement learning in wireless sensor networks","volume":"25","author":"Khan","year":"2017","journal-title":"IJSNet"},{"key":"10.1016\/j.comnet.2019.06.006_bib0005","series-title":"IEEE International Conference on Mobile Adhoc and Sensor Systems","first-page":"1","article-title":"Distributed independent reinforcement learning (DIRL) approach to resource management in wireless sensor networks","author":"Shan","year":"2008"},{"key":"10.1016\/j.comnet.2019.06.006_bib0006","series-title":"IEEE International Conference on Networked Embedded Systems for Every Application","first-page":"1","article-title":"Resource coordination in wireless sensor networks by combinatorial auction based method","author":"Khan","year":"2013"},{"key":"10.1016\/j.comnet.2019.06.006_bib0007","series-title":"IEEE International Conference on Communications Workshops (ICC)","first-page":"871","article-title":"Energy-aware task scheduling in wireless sensor networks based on cooperative reinforcement learning","author":"Khan","year":"2014"},{"key":"10.1016\/j.comnet.2019.06.006_bib0008","series-title":"IEEE International Conference on Pervasive Computing and Communications Workshops","first-page":"895","article-title":"Resource coordination in wireless sensor networks based on cooperative reinforcement learning","author":"Khan","year":"2012"},{"key":"10.1016\/j.comnet.2019.06.006_bib0009","series-title":"International Conference on Micro-Electronics, Electromagnetics and Telecommunications","first-page":"603","article-title":"Improved process scheduling in real-time operating systems using support vector machines","author":"Satyanarayana","year":"2018"},{"issue":"4","key":"10.1016\/j.comnet.2019.06.006_bib0011","doi-asserted-by":"crossref","first-page":"2111","DOI":"10.1016\/j.jfranklin.2018.11.021","article-title":"Calibration of insensitive loss in support vector machines regression","volume":"356","author":"Tong","year":"2019","journal-title":"J. Frankl. Inst."},{"issue":"3","key":"10.1016\/j.comnet.2019.06.006_bib0012","doi-asserted-by":"crossref","first-page":"381","DOI":"10.1007\/s10462-012-9383-6","article-title":"Application of reinforcement learning to routing in distributed wireless networks: a review","volume":"43","author":"AI-Rawi","year":"2015","journal-title":"Artif. Intell. Rev."},{"issue":"2","key":"10.1016\/j.comnet.2019.06.006_bib0013","doi-asserted-by":"crossref","first-page":"160","DOI":"10.15837\/ijccc.2014.2.1016","article-title":"A multi-objective optimization algorithm of task scheduling in WSN","volume":"9","author":"Dai","year":"2014","journal-title":"Int. J. Comput. Commun. Control"},{"issue":"1","key":"10.1016\/j.comnet.2019.06.006_bib0014","doi-asserted-by":"crossref","first-page":"467","DOI":"10.1142\/S1469026815500054","article-title":"SVM-Based segmentation-verification of handwritten connected digits using the oriented sliding window","volume":"14","author":"Gattal","year":"2015","journal-title":"Int. J. Comput. Intell. Appl."},{"issue":"3","key":"10.1016\/j.comnet.2019.06.006_bib0015","doi-asserted-by":"crossref","first-page":"211","DOI":"10.1007\/s11263-015-0816-y","article-title":"Imagenet large scale visual recognition challenge","volume":"115","author":"Russakovsky","year":"2014","journal-title":"Int. J. Comput. Vis."},{"issue":"11","key":"10.1016\/j.comnet.2019.06.006_bib0016","doi-asserted-by":"crossref","first-page":"1765","DOI":"10.3724\/SP.J.1004.2012.01765","article-title":"A hybrid transfer algorithm for reinforcement learning based on spectral method","volume":"38","author":"Zhu","year":"2012","journal-title":"Acta Autom. Sin."},{"issue":"4","key":"10.1016\/j.comnet.2019.06.006_bib0017","doi-asserted-by":"crossref","first-page":"1996","DOI":"10.1109\/COMST.2014.2320099","article-title":"Machine learning in wireless sensor networks: algorithms, strategies, and applications","volume":"16","author":"Alsheikh","year":"2014","journal-title":"IEEE Commun. Surv. Tutor."},{"issue":"4","key":"10.1016\/j.comnet.2019.06.006_bib0018","doi-asserted-by":"crossref","first-page":"244","DOI":"10.1504\/IJSNET.2017.087899","article-title":"Energy-aware task scheduling by a true online reinforcement learning in wireless sensor networks","volume":"25","author":"Khan","year":"2017","journal-title":"Int. J. Sens. Netw."},{"issue":"9","key":"10.1016\/j.comnet.2019.06.006_bib0019","doi-asserted-by":"crossref","first-page":"1470","DOI":"10.1109\/TSMC.2017.2671848","article-title":"Model learning for multistep backward prediction in dyna-q learning","volume":"48","author":"Hwang","year":"2018","journal-title":"IEEE Trans. Syst. Man Cybern."},{"issue":"5","key":"10.1016\/j.comnet.2019.06.006_bib0020","first-page":"514","article-title":"Reinforcement learning based on FNN and its application in robot navigation","volume":"22","author":"Duan","year":"2007","journal-title":"Control Decis."},{"issue":"169","key":"10.1016\/j.comnet.2019.06.006_bib0021","first-page":"1","article-title":"A generalized reinforcement learning scheme for random neural networks","volume":"30","author":"Lent","year":"2017","journal-title":"Neural Comput. Appl."}],"container-title":["Computer Networks"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1389128618309289?httpAccept=text\/xml","content-type":"text\/xml","content-version":"vor","intended-application":"text-mining"},{"URL":"https:\/\/api.elsevier.com\/content\/article\/PII:S1389128618309289?httpAccept=text\/plain","content-type":"text\/plain","content-version":"vor","intended-application":"text-mining"}],"deposited":{"date-parts":[[2019,11,8]],"date-time":"2019-11-08T16:52:14Z","timestamp":1573231934000},"score":1,"resource":{"primary":{"URL":"https:\/\/linkinghub.elsevier.com\/retrieve\/pii\/S1389128618309289"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2019,10]]},"references-count":20,"alternative-id":["S1389128618309289"],"URL":"http:\/\/dx.doi.org\/10.1016\/j.comnet.2019.06.006","relation":{},"ISSN":["1389-1286"],"issn-type":[{"value":"1389-1286","type":"print"}],"subject":[],"published":{"date-parts":[[2019,10]]},"assertion":[{"value":"Elsevier","name":"publisher","label":"This article is maintained by"},{"value":"A Q-learning algorithm for task scheduling based on improved SVM in wireless sensor networks","name":"articletitle","label":"Article Title"},{"value":"Computer Networks","name":"journaltitle","label":"Journal Title"},{"value":"https:\/\/doi.org\/10.1016\/j.comnet.2019.06.006","name":"articlelink","label":"CrossRef DOI link to publisher maintained version"},{"value":"article","name":"content_type","label":"Content Type"},{"value":"\u00a9 2019 Elsevier B.V. All rights reserved.","name":"copyright","label":"Copyright"}]}}