@proceedings {antonelo2006, title = {Modular Neural Network and Classical Reinforcement Learning for Autonomous Robot Navigation: Inhibiting Undesirable Behaviors}, journal = {Proceedings of the International Joint Conference on Neural Networks (IJCNN)}, year = {2006}, month = {Jul.}, pages = {498-505}, publisher = {IEEE}, address = {Vancouver, BC}, abstract = {

Classical reinforcement learning mechanisms and a modular neural network are unified to conceive an intelligent autonomous system for mobile robot navigation. The conception aims at inhibiting two common navigation deficiencies: generation of unsuitable cyclic trajectories and ineffectiveness in risky configurations. Different design apparatuses are considered to compose a system to tackle with these navigation difficulties, for instance: 1) neuron parameter to simultaneously memorize neuron activities and function as a learning factor, 2) reinforcement learning mechanisms to adjust neuron parameters (not only synapse weights), and 3) a inner-triggered reinforcement. Simulation results show that the proposed system circumvents difficulties caused by specific environment configurations, improving the relation between collisions and captures.

}, keywords = {reinforcement learning}, isbn = {0-7803-9490-9}, doi = {10.1109/IJCNN.2006.246723}, url = {https://ieeexplore.ieee.org/document/1716134/}, author = {Eric A Antonelo and Albert-Jan Baerlvedt and Thorsteinn Rognvaldsson and Mauricio Figueiredo} } @mastersthesis {antonelo2006thesis, title = {A Neural Reinforcement Learning Approach for Behavior Acquisition in Intelligent Autonomous Systems}, volume = {Master of Science with a major in Computer Systems Engineering}, year = {2006}, school = {Halmstad University}, type = {masters}, abstract = {

In this work new artificial learning and innate control mechanisms are proposed for application in autonomous behavioral systems for mobile robots. An autonomous system (for mobile robots) existent in the literature is enhanced with respect to its capacity of exploring the environment and avoiding risky configurations (that lead to collisions with obstacles even after learning). The particular autonomous system is based on modular hierarchical neural networks. Initially,the autonomous system does not have any knowledge suitable for exploring the environment (and capture targets {\oe} foraging). After a period of learning,the system generates efficientobstacle avoid ance and target seeking behaviors. Two particular deficiencies of the forme rautonomous system (tendency to generate unsuitable cyclic trajectories and ineffectiveness in risky configurations) are discussed and the new learning and controltechniques (applied to the autonomous system) are verified through simulations. It is shown the effectiveness of the proposals: theautonomous system is able to detect unsuitable behaviors (cyclic trajectories) and decrease their probability of appearance in the future and the number of collisions in risky situations is significantly decreased. Experiments also consider maze environments (with targets distant from each other) and dynamic environments (with moving objects).

}, keywords = {reinforcement learning}, url = {http://hh.diva-portal.org/smash/record.jsf?pid=diva2\%3A237466\&dswid=7091}, author = {Eric A Antonelo} }