@article{, author = {Falotico, Egidio and Vannucci, Lorenzo and Ambrosano, Alessandro and Albanese, Ugo and Ulbrich, Stefan and Vasquez Tieck, Juan Camilo and Hinkel, Georg and Kirtay, Murat and Peric, Igor and Denninger, Oliver and Cauli, Nino and Roennau, Arne and Klinker, Gudrun and von Arnim, Axel and Guyot, Luc and Peppicelli, Daniel and Martinez-Canada, Pablo and Ros, Eduardo and Maier, Patrick and Weber, Sandro and Huber, Manuel and Plecher, David and R{\"{o}}hrbein, Florian and Deser, Stefan and Roitberg, Alina and van der Smagt, Patrick and Dillmann, R{\"{u}}diger and Levi, Paul and Laschi, Cecilia and Knoll, Alois and Gewaltig, Marc-Oliver}, title = {Connecting Artificial Brains to Robots in a Comprehensive Simulation Framework: The Neurorobotics Platform}, journal = {Frontiers in Neurorobotics}, year = {2017}, month = jan, abstract = {Combined efforts in the fields of neuroscience, computer science, and biology allowed to design biologically realistic models of the brain based on spiking neural networks. For a proper validation of these models, an embodiment in a dynamic and rich sensory environment, where the model is exposed to a realistic sensory-motor task, is needed. Due to the complexity of these brain models that, at the current stage, cannot deal with real-time constraints, it is not possible to embed them into a real-world task. Rather, the embodiment has to be simulated as well. While adequate tools exist to simulate either complex neural networks or robots and their environments, there is so far no tool that allows to easily establish a communication between brain and body models. The Neurorobotics Platform is a new web-based environment that aims to fill this gap by offering scientists and technology developers a software infrastructure allowing them to connect brain models to detailed simulations of robot bodies and environments and to use the resulting neurorobotic systems for in silico experimentation. In order to simplify the workflow and reduce the level of the required programming skills, the platform provides editors for the specification of experimental sequences and conditions, environments, robots, and brain–body connectors. In addition to that, a variety of existing robots and environments are provided. This work presents the architecture of the first release of the Neurorobotics Platform developed in subproject 10 “Neurorobotics” of the Human Brain Project (HBP).1 At the current state, the Neurorobotics Platform allows researchers to design and run basic experiments in neurorobotics using simulated robots and simulated environments linked to simplified versions of brain models. We illustrate the capabilities of the platform with three example experiments: a Braitenberg task implemented on a mobile robot, a sensory-motor learning task based on a robotic controller, and a visual tracking embedding a retina model on the iCub humanoid robot. These use-cases allow to assess the applicability of the Neurorobotics Platform for robotic tasks as well as in neuroscientific experiments.}, doi = {10.3389/fnbot.2017.00002}, keywords = {robotics, software architectures, robot programming, web technologies, human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, url = {https://www.frontiersin.org/articles/10.3389/fnbot.2017.00002/full}, } @inproceedings{Roitberg2015a, author = {Roitberg, Alina and Somani, Nikhil and Perzylo, Alexander and Rickert, Markus and Knoll, Alois}, title = {Multimodal Human Activity Recognition for Industrial Manufacturing Processes in Robotic Workcells}, booktitle = {Proceedings of the {ACM} International Conference on Multimodal Interaction ({ICMI})}, pages = {259--266}, year = {2015}, month = nov, address = {Seattle, WA, USA}, abstract = {We present an approach for monitoring and interpreting human activities based on a novel multimodal vision-based interface, aiming at improving the efficiency of human-robot interaction (HRI) in industrial environments. Multi-modality is an important concept in this design, where we combine inputs from several state-of-the-art sensors to provide a variety of information, e.g. skeleton and fingertip poses. Based on typical industrial workflows, we derived multiple levels of human activity labels, including large-scale activities (e.g. assembly) and simpler sub-activities (e.g. hand gestures), creating a duration- and complexity-based hierarchy. We train supervised generative classifiers for each activity level and combine the output of this stage with a trained Hierarchical Hidden Markov Model (HHMM), which models not only the temporal aspects between the activities on the same level, but also the hierarchical relationships between the levels.}, doi = {10.1145/2818346.2820738}, keywords = {robotics, smerobotics}, } @inproceedings{Roitberg2014, author = {Roitberg, Alina and Perzylo, Alexander and Somani, Nikhil and Giuliani, Manuel and Rickert, Markus and Knoll, Alois}, title = {Human Activity Recognition in the Context of Industrial Human-Robot Interaction}, booktitle = {Proceedings of the AsiaPacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)}, pages = {1--10}, year = {2014}, month = dec, address = {Siem Reap, Cambodia}, abstract = {Human activity recognition is crucial for intuitive cooperation between humans and robots. We present an approach for activity recognition for applications in the context of human-robot interaction in industrial settings. The approach is based on spatial and temporal features derived from skeletal data of human workers performing assembly tasks. These features were used to train a machine learning framework, which classifies discrete time frames with Random Forests and subsequently models temporal dependencies between the resulting states with a Hidden Markov Model. We considered the following three groups of activities: Movement, Gestures, and Object handling. A dataset has been collected which is comprised of 24 recordings of several human workers performing such activities in a human-robot interaction environment, as typically seen at small and medium-sized enterprises. The evaluation shows that the approach achieves a recognition accuracy of up to 88% for some activities and an average accuracy of 73%.}, doi = {10.1109/APSIPA.2014.7041588}, keywords = {robotics, james, smerobotics}, }