iBet uBet web content aggregator. Adding the entire web to your favor.
iBet uBet web content aggregator. Adding the entire web to your favor.



Link to original content: https://api.crossref.org/works/10.1109/IJCNN55064.2022.9892256
{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,10,30]],"date-time":"2024-10-30T05:27:34Z","timestamp":1730266054989,"version":"3.28.0"},"reference-count":29,"publisher":"IEEE","license":[{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-029"},{"start":{"date-parts":[[2022,7,18]],"date-time":"2022-07-18T00:00:00Z","timestamp":1658102400000},"content-version":"stm-asf","delay-in-days":0,"URL":"https:\/\/doi.org\/10.15223\/policy-037"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":[],"published-print":{"date-parts":[[2022,7,18]]},"DOI":"10.1109\/ijcnn55064.2022.9892256","type":"proceedings-article","created":{"date-parts":[[2022,9,30]],"date-time":"2022-09-30T19:56:04Z","timestamp":1664567764000},"page":"1-8","source":"Crossref","is-referenced-by-count":5,"title":["Learning Intrinsic Symbolic Rewards in Reinforcement Learning"],"prefix":"10.1109","author":[{"given":"Hassam Ullah","family":"Sheikh","sequence":"first","affiliation":[{"name":"Intel Labs,United States"}]},{"given":"Shauharda","family":"Khadka","sequence":"additional","affiliation":[{"name":"Microsoft,United States"}]},{"given":"Santiago","family":"Miret","sequence":"additional","affiliation":[{"name":"Intel Labs,United States"}]},{"given":"Somdeb","family":"Majumdar","sequence":"additional","affiliation":[{"name":"Intel Labs,United States"}]},{"given":"Mariano","family":"Phielipp","sequence":"additional","affiliation":[{"name":"Intel Labs,United States"}]}],"member":"263","reference":[{"key":"ref10","first-page":"4644","article-title":"On learning intrinsic rewards for policy gradient methods","author":"zheng","year":"2018","journal-title":"Advances in neural information processing systems"},{"journal-title":"Learning gentle object manipulation with curiosity-driven deep reinforcement learning","year":"2019","author":"huang","key":"ref11"},{"key":"ref12","first-page":"4442","article-title":"Learning equations for extrapolation and control","author":"sahoo","year":"0","journal-title":"International Conference on Machine Learning"},{"journal-title":"ArXiv Preprint","article-title":"Symbolic regression methods for reinforcement learning","year":"2019","author":"kubal\u00edk","key":"ref13"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.engappai.2018.09.007"},{"journal-title":"Evolutionary Computation Toward a New Philosophy of Machine Intelligence","year":"1995","author":"fogel","key":"ref15"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.1007\/3-540-56602-3_163"},{"journal-title":"Evolved policy gradients","year":"2018","author":"houthooft","key":"ref17"},{"key":"ref18","first-page":"3341","article-title":"Collaborative evolutionary reinforcement learning","author":"khadka","year":"0","journal-title":"International Conference on Machine Learning"},{"journal-title":"Deep reinforcement learning","article-title":"Soft actor-critic: Off-policy maximum entropy deep reinforcement learning with a stochastic actor","year":"0","author":"haarnoja","key":"ref19"},{"journal-title":"CoRR","article-title":"Proximal policy optimization algorithms","year":"2017","author":"schulman","key":"ref28"},{"key":"ref4","first-page":"278","article-title":"Policy invariance under reward transformations: Theory and application to reward shaping","author":"ng","year":"0","journal-title":"Proceedings of the Sixteenth International Conference on Machine Learning"},{"key":"ref27","first-page":"1407","article-title":"Impala: Scalable distributed deep-rl with importance weighted actor-learner architectures","author":"espeholt","year":"0","journal-title":"International Conference on Machine Learning"},{"key":"ref3","first-page":"6550","article-title":"Towards generalization and simplicity in continuous control","volume":"30","author":"rajeswaran","year":"2017","journal-title":"Advances in neural information processing systems"},{"key":"ref6","first-page":"4403","article-title":"Liir: Learning individual intrinsic reward in multi-agent reinforcement learning","volume":"32","author":"du","year":"2019","journal-title":"Advances in neural information processing systems"},{"journal-title":"International Conference on Learning Representations","article-title":"Distributed prioritized experience replay","year":"0","author":"horgan","key":"ref29"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.1109\/CVPRW.2017.70"},{"key":"ref8","first-page":"3675","article-title":"Hierarchical deep reinforcement learning: Integrating temporal abstraction and intrinsic motivation","author":"kulkarni","year":"2016","journal-title":"Advances in neural information processing systems"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1109\/TAMD.2014.2362682"},{"journal-title":"Proc of the 3rd Int'l Conf on Learning Representations (ICLR-2015)","article-title":"Continuous control with deep reinforcement learning","year":"0","author":"lillicrap","key":"ref2"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1109\/TNNLS.2019.2891792"},{"key":"ref1","doi-asserted-by":"crossref","first-page":"529","DOI":"10.1038\/nature14236","article-title":"Human-level control through deep reinforcement learning","volume":"518","author":"mnih","year":"2015","journal-title":"Nature"},{"journal-title":"Proceeding of the International Conference on Learning Representations (ICLR-2020)","article-title":"Maxmin Q-learning: Controlling the estimation bias of Q-learning","year":"0","author":"lan","key":"ref20"},{"journal-title":"Gym compatible games for reinforcenment learning","year":"2019","author":"qingfeng","key":"ref22"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1109\/IROS.2012.6386109"},{"journal-title":"Modeling Purposeful Adaptive Behavior with the Principle of Maximum Causal Entropy","year":"2010","author":"ziebart","key":"ref24"},{"journal-title":"OpenAI Gym","year":"2016","author":"brockman","key":"ref23"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1609\/aaai.v34i04.5878"},{"journal-title":"ArXiv Preprint","article-title":"Reducing overestimation bias by increasing representation dissimilarity in ensemble based deep q-learning","year":"2020","author":"sheikh","key":"ref25"}],"event":{"name":"2022 International Joint Conference on Neural Networks (IJCNN)","start":{"date-parts":[[2022,7,18]]},"location":"Padua, Italy","end":{"date-parts":[[2022,7,23]]}},"container-title":["2022 International Joint Conference on Neural Networks (IJCNN)"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx7\/9891857\/9889787\/09892256.pdf?arnumber=9892256","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2022,11,3]],"date-time":"2022-11-03T22:59:30Z","timestamp":1667516370000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/9892256\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2022,7,18]]},"references-count":29,"URL":"https:\/\/doi.org\/10.1109\/ijcnn55064.2022.9892256","relation":{},"subject":[],"published":{"date-parts":[[2022,7,18]]}}}