@article{, author = {Amaya, Camilo and von Arnim, Axel}, title = {Neurorobotic reinforcement learning for domains with parametrical uncertainty}, publisher = {Frontiers}, journal = {Frontiers in Neurorobotics}, volume = {17}, year = {2023}, month = oct, timestamp = 2023.10.25, location = {Lausanne, Switzerland}, abstract = {Neuromorphic hardware paired with brain-inspired learning strategies have enormous potential for robot control. Explicitly, these advantages include low energy consumption, low latency, and adaptability. Therefore, developing and improving learning strategies, algorithms, and neuromorphic hardware integration in simulation is a key to moving the state-of-the-art forward. In this study, we used the neurorobotics platform (NRP) simulation framework to implement spiking reinforcement learning control for a robotic arm. We implemented a force-torque feedback-based classic object insertion task (“peg-in-hole”) and controlled the robot for the first time with neuromorphic hardware in the loop. We therefore provide a solution for training the system in uncertain environmental domains by using randomized simulation parameters. This leads to policies that are robust to real-world parameter variations in the target domain, filling the sim-to-real gap.To the best of our knowledge, it is the first neuromorphic implementation of the peg-in-hole task in simulation with the neuromorphic Loihi chip in the loop, and with scripted accelerated interactive training in the Neurorobotics Platform, including randomized domains.}, howpublished = {article}, type = {article}, issn = {1662-5218}, doi = {10.3389/fnbot.2023.1239581}, keywords = {domain randomization, neuromorphic computing, neurorobotics, reinforcement learning, robot control, spiking neural networks}, url = {https://doi.org/10.3389/fnbot.2023.1239581}, } @inproceedings{, author = {Amaya, Camilo and Palinauskas, Gintautas and Eames, Evan and Neumeier, Michael and von Arnim, Axel}, title = {Generating Event-Based Datasets for Robotic Applications Using MuJoCo-ESIM}, booktitle = {Proceedings of the 2023 International Conference on Neuromorphic Systems}, publisher = {Association for Computing Machinery}, journal = {Proceedings of the 2023 International Conference on Neuromorphic Systems}, series = {ICONS 23}, volume = {1}, number = {11}, pages = {7}, year = {2023}, month = aug, timestamp = 2023.08.28, organization = {Association for Computing Machinery}, institution = {Association for Computing Machinery}, address = {New York, NY, USA}, location = {Santa Fe, NM, USA}, abstract = {Event-based cameras are cameras with high dynamic range that measure changes in light intensity at each pixel instead of capturing frames like traditional cameras. There are several event-based camera simulators for generating event-based datasets, each specialized towards a particular domain. However, none are designed specifically for robotic use-cases. This work addresses this issue using MuJoCo, a high performance physics engine; in combination with ESIM, an established event-generation method. To the authors' knowledge, this is the first robotic simulator tool for generating event-based datasets specifically designed for the robotics domain. Furthermore, to demonstrate its capabilities we generate an event-based visual dataset of industrial sockets, which is then used to train a SNN classifier.}, howpublished = {Proceedings}, isbn = {979-8-4007-0175-7}, doi = {10.1145/3589737.3605984}, keywords = {Neurorobotics, neuromorphic vision, ESIM, spiking classification, SLAYER, event-based dataset generation, industrial sockets, MuJoCO, spiking neural networks}, url = {https://doi.org/10.1145/3589737.3605984}, } @inproceedings{Lin2021-PCTMA-NET, author = {Lin, Jianjie and Rickert, Markus and Perzylo, Alexander and Knoll, Alois}, title = {PCTMA-Net: Point Cloud Transformer with Morphing Atlas-based Point Generation Network for Dense Point Cloud Completion}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, year = {2021}, month = sep, address = {Prague, Czech Republic}, abstract = {Inferring a complete 3D geometry given an incomplete point cloud is essential in many vision and robotics applications. Previous work mainly relies on a global feature extracted by a Multi-layer Perceptron (MLP) for predicting the shape geometry. This suffers from a loss of structural details, as its point generator fails to capture the detailed topology and structure of point clouds using only the global features. The irregular nature of point clouds makes this task more challenging. This paper presents a novel method for shape completion to address this problem. The Transformer structure is currently a standard approach for natural language processing tasks and its inherent nature of permutation invariance makes it well suited for learning point clouds. Furthermore, the Transformer's attention mechanism can effectively capture the local context within a point cloud and efficiently exploit its incomplete local structure details. A morphing-atlas-based point generation network further fully utilizes the extracted point Transformer feature to predict the missing region using charts defined on the shape. Shape completion is achieved via the concatenation of all predicting charts on the surface. Extensive experiments on the Completion3D and KITTI data sets demonstrate that the proposed PCTMA-Net outperforms the state-of-the-art shape completion approaches and has a 10% relative improvement over the next best-performing method.}, keywords = {robotics}, } @inproceedings{empress-rss-2021, author = {Huchler, Norbert and Kessler, Ingmar and Lay, Florian and Perzylo, Alexander and Seidler, Michael and Steinmetz, Franz}, title = {Empowering Workers in a Mixed Skills Concept for Collaborative Robot Systems}, booktitle = {Workshop on Accessibility of Robot Programming and Work of the Future, Robotics: Science and Systems (RSS)}, year = {2021}, month = jul, abstract = {One aspect of digital transformation in manufacturing is the trend toward mass customization, which requires a more flexible production paradigm. Human-robot collaboration and knowledge-based engineering are approaches that meet these requirements. In our work, we combine them in our mixed skills concept that incorporates the strengths of human workers and robots. We assume that while the workplaces of many workers may change, they will continue to play a vital role due to their experience and flexibility. They can perform various types of tasks that are still beyond the capabilities of robots. Yet, their responsibilities may shift towards decision makers and problem solvers for robots. Our approach to facilitate such collaboration is to apply insights from social science regarding empowerment in the work context to determine design goals and potential solutions for collaborative robot systems. The technical implementation is based on semantic descriptions of relevant aspects of automation using OWL ontologies and intuitive user interfaces.}, keywords = {robotics, empress}, url = {https://mediatum.ub.tum.de/doc/1616960/1616960.pdf}, } @incollection{Kessler2021, author = {Kessler, Ingmar and Perzylo, Alexander and Rickert, Markus}, editor = {Garoufallou, Emmanouel and Ovalle-Perandones, Maria-Antonia}, title = {Ontology-Based Decision Support System for the Nitrogen Fertilization of Winter Wheat}, booktitle = {Metadata and Semantics Research (MTSR 2020)}, publisher = {Springer International Publishing}, pages = {245--256}, year = {2021}, month = mar, address = {Cham}, abstract = {Digital technologies are already used in several aspects of agriculture. However, decision-making in crop production is still often a manual process that relies on various heterogeneous data sources. Small-scale farmers and their local consultants are particularly burdened by increasingly complex requirements. Regional circumstances and regulations play an essential role and need to be considered. This paper presents an ontology-based decision support system for the nitrogen fertilization of winter wheat in Bavaria, Germany. Semantic Web and Linked Data technologies were employed to both reuse and model new common semantic structures for interrelated knowledge. Many relevant general and regional data sources from multiple domains were not yet available in RDF. Hence, we used several tools to transform relevant data into corresponding OWL ontologies and combined them in a central knowledge base. The GUI application of the decision support system queries it to parameterize requests to external web services and to show relevant information in an integrated view. It further uses SPARQL queries to automatically generate recommendations for farmers and their consultants.}, isbn = {978-3-030-71903-6}, doi = {10.1007/978-3-030-71903-6_24}, keywords = {robotics, farmexpert}, } @article{, author = {Angelidis, Emmanouil and Buchholz, Emanuel and Arreguit O'Neil, Jonathan Patrick and Roug{\'{e}}, Alexis and Stewart, Terrence and von Arnim, Axel and Knoll, Alois and Ijspeert, Auke}, title = {A Spiking Central Pattern Generator for the control of a simulated lamprey robot running on SpiNNaker and Loihi neuromorphic boards}, journal = {ArXiv preprint}, year = {2021}, month = jan, abstract = {Central Pattern Generators (CPGs) models have been long used to investigate both the neural mechanisms that underlie animal locomotion as well as a tool for robotic research. In this work we propose a spiking CPG neural network and its implementation on neuromorphic hardware as a means to control a simulated lamprey model. To construct our CPG model, we employ the naturally emerging dynamical systems that arise through the use of recurrent neural populations in the Neural Engineering Framework (NEF). We define the mathematical formulation behind our model, which consists of a system of coupled abstract oscillators modulated by high-level signals, capable of producing a variety of output gaits. We show that with this mathematical formulation of the Central Pattern Generator model, the model can be turned into a Spiking Neural Network (SNN) that can be easily simulated with Nengo, an SNN simulator. The spiking CPG model is then used to produce the swimming gaits of a simulated lamprey robot model in various scenarios. We show that by modifying the input to the network, which can be provided by sensory information, the robot can be controlled dynamically in direction and pace. The proposed methodology can be generalized to other types of CPGs suitable for both engineering applications and scientific research. We test our system on two neuromorphic platforms, SpiNNaker and Loihi. Finally, we show that this category of spiking algorithms shows a promising potential to exploit the theoretical advantages of neuromorphic hardware in terms of energy efficiency and computational speed.}, keywords = {neurorobotics, neuromorphic, neuromorphic computing, HBP, NRP, virtual, robotics Neuromorphic Computing, HBP, Human Brain Project, Neurorobotics, Neurorobotics Platform, Neuroscience, Artificial Intelligence, KI, Spiking Neural Networks}, url = {https://arxiv.org/abs/2101.07001}, } @inproceedings{Lin2020a, author = {Lin, Jianjie and Rickert, Markus and Knoll, Alois}, title = {{6D} Pose Estimation for Flexible Production with Small Lot Sizes based on {CAD} Models using {G}aussian Process Implicit Surfaces}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, year = {2020}, month = oct, address = {Las Vegas, NV, USA}, abstract = {We propose a surface-to-surface (S2S) point registration algorithm by exploiting the Gaussian Process Implicit Surfaces for partially overlapping 3D surfaces to estimate the 6D pose transformation. Unlike traditional approaches, that separate the corresponding search and update steps in the inner loop, we formulate the point registration as a nonlinear non-constraints optimization problem which does not explicitly use any corresponding points between two point sets. According to the implicit function theorem, we form one point set as a Gaussian Process Implicit Surfaces utilizing the signed distance function, which implicitly creates three manifolds. Points on the same manifold share the same function value, indicated as \{1, 0, -1\}. The problem is thus converted into finding a rigid transformation that minimizes the inherent function value. This can be solved by using a Gauss-Newton (GN) or Levenberg-Marquardt (LM) solver. In the case of a partially overlapping 3D surface, the Fast Point Feature Histogram (FPFH) algorithm is applied to both point sets and a Principal Component Analysis (PCA) is performed on the result. Based on this, the initial transformation can then be computed. We conduct experiments on multiple point sets to evaluate the effectiveness of our proposed approach against existing state-of-the-art methods.}, keywords = {robotics}, } @inproceedings{Weser2020a, author = {Weser, Michael and Bock, J{\"{u}}rgen and Schmitt, Siwara and Perzylo, Alexander and Evers, Kathrin}, title = {An Ontology-based Metamodel for Capability Descriptions}, booktitle = {Proceedings of the IEEE International Conference on Emerging Technologies And Factory Automation (ETFA)}, pages = {1679--1685}, year = {2020}, month = sep, address = {Vienna, Austria}, abstract = {This paper presents an approach to describe abilities of manufacturing resources by a formal description of capabilities using Semantic Web technologies. A hierarchical ontology architecture is proposed to represent, publish, and extend knowledge on capabilities for different application domains and use cases. Furthermore, the paper describes patterns of how the underlying formal logic can be used in taxonomy modeling and the inference of implicit capability facts. The usability and performance of the approach was validated by formalizing capability knowledge of related work and evaluated in benchmarking a prototypical implemented tool for managing and querying catalogs of resources and their capabilities. The proposed concept is intended to be used as a foundation for a future multi-layered feasibility checking, which evaluates the compatibility of resources and their offered skills with the requirements of manufacturing tasks at symbolic and subsymbolic levels. Extended evaluations might be based on parameters, analytics, simulation, and other means.}, doi = {doi:10.1109/ETFA46521.2020.9212104}, keywords = {robotics, basys 4.0}, } @inproceedings{Perzylo2020a, author = {Perzylo, Alexander and Kessler, Ingmar and Profanter, Stefan and Rickert, Markus}, title = {Toward a Knowledge-Based Data Backbone for Seamless Digital Engineering in Smart Factories}, booktitle = {Proceedings of the IEEE International Conference on Emerging Technologies And Factory Automation (ETFA)}, pages = {164--171}, year = {2020}, month = sep, address = {Vienna, Austria}, abstract = {Digital transformation efforts in manufacturing companies bear the potential to reduce product costs and increase the flexibility of production systems. The semantic integration of data and information along the value chain enables the automated interpretation of interrelations between its different aspects such as product design, production process and manufacturing resources. These interrelations can be used to automatically generate semantic process descriptions and execute corresponding robot motions. An initial one-time effort to model the required knowledge of a particular application domain can make the manufacturing of high-variant products in small batches or even lot size one production more efficient. This paper introduces a knowledge-based digital engineering concept to automate engineering and production activities without human involvement. The concept was integrated and evaluated in a physical robot workcell where automotive fuse boxes are autonomously fitted with different fuse configurations.}, doi = {10.1109/ETFA46521.2020.9211943}, keywords = {robotics, data backbone}, url = {https://youtu.be/PtPd3YvTTzw}, } @article{, author = {Allegra Mascaro, Anna Letizia and Falotico, Egidio and Petkoski, Spase and Pasquini, Maria and Vannucci, Lorenzo and Tort-Colet, Nuria and Conti, Emilia and Resta, Francesco and Spalletti, Cristina and Tata Ramalingasetty, Shravan and von Arnim, Axel and Formento, Emanuele and Angelidis, Emmanouil and Blixhavn, Camilla and Leergaard, Trygve and Caleo, Matteo and Destexhe, Alain and Ijspeert, Auke and Micera, Silvestro and Laschi, Cecilia and Jirsa, Viktor and Gewaltig, Marc-Oliver and Pavone, Francesco}, title = {Experimental and Computational Study on Motor Control and Recovery After Stroke: Toward a Constructive Loop Between Experimental and Virtual Embodied Neuroscience}, journal = {Frontiers in Systems Neuroscience}, year = {2020}, month = jul, timestamp = 2020.07.07, abstract = {Being able to replicate real experiments with computational simulations is a unique opportunity to refine and validate models with experimental data and redesign the experiments based on simulations. However, since it is technically demanding to model all components of an experiment, traditional approaches to modeling reduce the experimental setups as much as possible. In this study, our goal is to replicate all the relevant features of an experiment on motor control and motor rehabilitation after stroke. To this aim, we propose an approach that allows continuous integration of new experimental data into a computational modeling framework. First, results show that we could reproduce experimental object displacement with high accuracy via the simulated embodiment in the virtual world by feeding a spinal cord model with experimental registration of the cortical activity. Second, by using computational models of multiple granularities, our preliminary results show the possibility of simulating several features of the brain after stroke, from the local alteration in neuronal activity to long-range connectivity remodeling. Finally, strategies are proposed to merge the two pipelines. We further suggest that additional models could be integrated into the framework thanks to the versatility of the proposed approach, thus allowing many researchers to achieve continuously improved experimental design.}, doi = {https://doi.org/10.3389/fnsys.2020.00031}, keywords = {Neuromorphic Computing, HBP, Human Brain Project, Neurorobotics, Neurorobotics Platform, Neuroscience, Artificial Intelligence, KI, Spiking Neural Networks neurorobotics, neuromorphic, neuromorphic computing, HBP, NRP, virtual, robotics}, url = {https://www.frontiersin.org/articles/10.3389/fnsys.2020.00031/full}, } @article{Weckesser2020a, author = {Weckesser, Fabian and Peisl, Sebastian and Beck, Michael and Hartmann, Anja and R{\"{o}}hrl, Gerhard and Kessler, Ingmar and Perzylo, Alexander and Rickert, Markus}, title = {Daten vernetzen, unabh{\"{a}}ngig beraten}, journal = {Bayerisches Landwirtschaftliches Wochenblatt}, volume = {16}, year = {2020}, month = apr, abstract = {Mit einem Forschungsprojekt steigt das Landeskuratorium f{\"{u}}r pflanzliche Erzeugung in die digitale Beratung ein. FarmExpert 4.0 soll eine F{\"{u}}lle an Daten in einer Softwarearchitektur zum Nutzen der Landwirte vernetzen. Landwirte und Berater m{\"{u}}ssen bei pflanzenbaulichen Fragestellungen oft von Hand Fachwissen in Datenquellen nach-schlagen und miteinander kombinieren. Hinzu kommt, dass die gesetzlichen Rahmenbedingungen komplexer werden. Ein aktuelles Beispiel daf{\"{u}}r ist die D{\"{u}}ngeverordnung (D{\"{u}}V). Sonder- und Ausnahmeregelungen machen die Entscheidungsfindung f{\"{u}}r den Landwirt nicht einfacher. Der Betriebsleiter w{\"{u}}nscht sich Unterst{\"{u}}tzung beim Management seines Betriebes und bei der Erf{\"{u}}llung seiner Dokumentationspflichten. Es w{\"{a}}re viel gewonnen, wenn es hierf{\"{u}}r eine neutrale Softwarearchitektur g{\"{a}}be, die zu den Fragen des Landwirts die wichtigsten Informationen auf all seinen mobilen Endger{\"{a}}ten bereitstellt.}, keywords = {robotics,farmexpert}, url = {http://www.hswt.de/fileadmin/download/Forschung/Forschungsprojekte/1093_FarmExpert/WB_Daten_vernetzen_unabhaengig_beraten.pdf}, } @article{Perzylo2019b, author = {Perzylo, Alexander and Grothoff, Julian and L{\'{u}}cio, Levi and Weser, Michael and Malakuti, Somayeh and Venet, Pierre and Aravantinos, Vincent and Deppe, Torben}, title = {Capability-based semantic interoperability of manufacturing resources: A {BaSys} 4.0 perspective}, journal = {IFAC-PapersOnLine}, volume = {52}, number = {13}, pages = {1590--1596}, year = {2019}, month = dec, note = {{IFAC} Conference on Manufacturing Modeling, Management, and Control ({MIM})}, abstract = {In distributed manufacturing systems, the level of interoperability of hardware and software components depends on the quality and flexibility of their information models. Syntactic descriptions of input and output parameters, e.g., using interface description languages (IDL), are not suffcient when it comes to evaluating whether a manufacturing resource provides the capabilities that are required for performing a particular process step on a product. The semantics of capabilities needs to be explicitly modelled and must be provided together with manufacturing resources. In this paper, we introduce concepts developed by the German BaSys 4.0 initiative dealing with semantically describing manufacturing skills, orchestrating higher-level skills from basic skills, and using them in a cognitive manufacturing framework.}, doi = {10.1016/j.ifacol.2019.11.427}, keywords = {robotics, basys 4.0, model-based systems engineering, MbSE}, } @inproceedings{Wildgrube2019a, author = {Wildgrube, Fabian and Perzylo, Alexander and Rickert, Markus and Knoll, Alois}, title = {Semantic Mates: Intuitive Geometric Constraints for Efficient Assembly Specifications}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {6180--6187}, year = {2019}, month = nov, address = {Macao, China}, abstract = {In this paper, we enhance our knowledge-based and constraint-based approach of robot programming with the concept of Semantic Mates. They describe intended mechanical connections between parts of an assembly. This allows deriving appropriate assembly poses from the type of connection and the geometric properties of the involved parts. The paper presents an ontology-based representation of Semantic Mates that is used to augment object models with additional information regarding their potential use in an assembly. Such semantically annotated object models can be used in our instruction framework to program a robot to perform assembly tasks through simple drag-and-drop operations in a graphical user interface. We conducted a user study with 21 participants in order to evaluate the efficiency and usability of the Semantic Mates concept based on a use-case from the domain of mechanical assembly. Across different experience levels in robotics, the participants achieved a significantly faster workflow and improved perceived usability compared to the manual specification of constraint-based assembly operations.}, doi = {10.1109/IROS40897.2019.8968041}, keywords = {robotics, smecobot}, url = {https://youtu.be/o5EiAut3N2c}, } @inproceedings{Perzylo2019c, author = {Perzylo, Alexander and Profanter, Stefan and Rickert, Markus and Knoll, Alois}, title = {{OPC} {UA} NodeSet Ontologies as a Pillar of Representing Semantic Digital Twins of Manufacturing Resources}, booktitle = {Proceedings of the IEEE International Conference on Emerging Technologies And Factory Automation ({ETFA})}, pages = {1085--1092}, year = {2019}, month = sep, address = {Zaragoza, Spain}, abstract = {The effectiveness of cognitive manufacturing systems in agile production environments heavily depend on the automatic assessment of various levels of interoperability between manufacturing resources. For taking informed decisions, a semantically rich representation of all resources in a workcell or production line is required. OPC UA provides means for communication and information exchange in such distributed settings. This paper proposes a semantic representation of a resource's properties, in which we use OWL ontologies to encode the information models that can be found in OPC UA NodeSet specifications. We further combine these models with an OWL-based description of the resource's geometry and -- if applicable -- its kinematic model. This leads to a comprehensive semantic representation of hardware and software features of a manufacturing resource, which we call semantic digital twin. Among other things, it reduces costs through virtual prototyping and enables the automatic deployment of manufacturing tasks in production lines. As a result, small-batch assemblies become financially viable. In order to minimize the effort of creating OWL-based UA NodeSet descriptions, we provide a software tool for the automatic transformation of XML-based NodeSet specifications that adhere to the OPC Foundation's NodeSet2 XML schema.}, doi = {10.1109/ETFA.2019.8868954}, keywords = {robotics, data backbone, basys 4.0}, } @inproceedings{Profanter2019b, author = {Profanter, Stefan and Breitkreuz, Ari and Rickert, Markus and Knoll, Alois}, title = {A Hardware-Agnostic OPC UA Skill Model for Robot Manipulators and Tools}, booktitle = {Proceedings of the {IEEE} International Conference on Emerging Technologies And Factory Automation ({ETFA})}, year = {2019}, month = sep, address = {Zaragoza, Spain}, abstract = {The current trend to lot-size-one production requires reduced integration effort and easy reuse of available devices inside the production line. These devices have to offer a uniform interface to fulfill these requirements. This paper presents a hardware-agnostic skill model using the semantic modeling capabilities of OPC~UA. The model provides a standardized interface to hardware or software functionality while offering an intuitive way of grouping multiple skills to a higher hierarchical abstraction. Our skill model is based on OPC~UA Programs and modeled as an open source NodeSet. We hereby focus on the reusability of the skills for many different domains. The model is evaluated by controlling three different industrial robots and their tools through the same skill interface. The evaluation shows that our generic OPC~UA skill model can be used as a standardized control interface for device and software components in industrial manufacturing. With our solution new components can easily be exchanged without changing the interface. This is not only true for industrial robots, but for any device which provides a controllable functionality.}, keywords = {robotics, data backbone}, } @article{, author = {Vasquez Tieck, Juan Camilo and Kaiser, Jacques and Steffen, Lea and Schulze, Martin and von Arnim, Axel and Reichard, Daniel and Roennau, Arne and Dillmann, R{\"{u}}diger}, title = {The Neurorobotics Platform for Teaching – Embodiment Experiments with Spiking Neural Networks and Virtual Robots}, publisher = {IEEE}, year = {2019}, month = sep, timestamp = 2019.09.20, institution = {2019 IEEE International Conference on Cyborg and Bionic Systems and HBP Workshop}, abstract = {Understanding the brain is an interdisciplinary effort spanning over the fields of computational neuroscience, machine learning and robotics. The collaboration between researchers across these fields should be encouraged by comprehensive simulation platforms. The Neurorobotics Platform (NRP) developed by the Human Brain Project enables such collaboration by allowing researchers to define virtual experiments in which brain models are connected to simulated robots. In this paper, we present how we use the NRP as an education tool to introduce master students to neurorobotics. The students are given the task to define, implement and solve three virtual neurorobotics challenges related to perception, arm motion and locomotion. Without any previous knowledge on neurorobotics, the students completed this task within the course of one semester. We present the challenges, which are now open-source benchmarks available online, as well as example solutions from the students to these challenges. This paper gives a glimpse of what new users are capable of by using the NRP to simulate their neurorobotics experiment. Aside from educating the students, this initiative also allowed to collect their direct feedback on the NRP. This feedback is valuable for the Human Brain Project as a whole since it helps identify how new users interact with the platform.}, doi = {https://doi.org/10.1109/CBS46900.2019.9114395}, keywords = {neurorobotics, neuromorphic, neuromorphic computing, HBP, NRP, virtual, robotics}, url = {https://ieeexplore.ieee.org/document/9114395}, } @article{, author = {Capolei, Marie-Claire and Angelidis, Emmanouil and Falotico, Egidio and Lund, Henrik Hautop and Tolu, Silvia}, title = {A Biomimetic Control Method Increases the Adaptability of a Humanoid Robot Acting in a Dynamic Environment}, journal = {Frontiers in Neurorobotics}, year = {2019}, month = aug, doi = {https://doi.org/10.3389/fnbot.2019.00070}, keywords = {neuromorphic, neurorobotics, icub, robotics, nrp, hbp}, url = {https://www.frontiersin.org/articles/10.3389/fnbot.2019.00070/full}, } @inproceedings{Kraemmer2019a, author = {Kr{\"{a}}mmer, Annkathrin and Sch{\"{o}}ller, Christoph and Gulati, Dhiraj and Knoll, Alois}, title = {Providentia - A Large Scale Sensing System for the Assistance of Autonomous Vehicles}, booktitle = {Robotics Science and Systems Workshops ({RSS} Workshops)}, publisher = {RSS Foundation}, year = {2019}, month = jun, address = {Freiburg, Germany}, abstract = {The environmental perception of autonomous vehicles is not only limited by physical sensor ranges and algorithmic performance, but also occlusions degrade their understanding of the current traffic situation. This poses a great threat for safety, limits their driving speed and can lead to inconvenient maneuvers that decrease their acceptance. Intelligent Transportation Systems can help to alleviate these problems. By providing autonomous vehicles with additional detailed information about the current traffic in form of a digital model of their world, i.e. a digital twin, an Intelligent Transportation System can fill in the gaps in the vehicle's perception and enhance its field of view. However, detailed descriptions of implementations of such a system and working prototypes demonstrating its feasibility are scarce. In this work, we propose a hardware and software architecture to build such a reliable Intelligent Transportation System. We have implemented this system in the real world and show that it is able to create an accurate digital twin of an extended highway stretch. Furthermore, we provide this digital twin to an autonomous vehicle and demonstrate how it extends the vehicle's perception beyond the limits of its on-board sensors.}, keywords = {Intelligent Transportation Systems, Autonomous Driving, Robotics}, url = {https://sites.google.com/view/uad2019/accepted-posters}, } @article{, author = {Bornet, Alban and Kaiser, Jacques and Kroner, Alexander and Falotico, Egidio and Ambrosano, Alessandro and Cantero, Kepa and Herzog, Michael and Francis, Gregory}, title = {Running Large-Scale Simulations on the Neurorobotics Platform to Understand Vision -- The Case of Visual Crowding}, journal = {Frontiers in Neurorobotics}, year = {2019}, month = may, abstract = {Traditionally, human vision research has focused on specific paradigms and proposed models to explain very specific properties of visual perception. However, the complexity and scope of modern psychophysical paradigms undermine the success of this approach. For example, perception of an element strongly deteriorates when neighboring elements are presented in addition (visual crowding). As it was shown recently, the magnitude of deterioration depends not only on the directly neighboring elements but on almost all elements and their specific configuration. Hence, to fully explain human visual perception, one needs to take large parts of the visual field into account and combine all the aspects of vision that become relevant at such scale. These efforts require sophisticated and collaborative modeling. The Neurorobotics Platform (NRP) of the Human Brain Project offers a unique opportunity to connect models of all sorts of visual functions, even those developed by different research groups, into a coherently functioning system. Here, we describe how we used the NRP to connect and simulate a segmentation model, a retina model, and a saliency model to explain complex results about visual perception. The combination of models highlights the versatility of the NRP and provides novel explanations for inward-outward anisotropy in visual crowding.}, howpublished = {Journal}, doi = {10.3389/fnbot.2019.00033}, keywords = {robotics, human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, url = {https://www.frontiersin.org/articles/10.3389/fnbot.2019.00033/full}, } @article{Perzylo2019a, author = {Perzylo, Alexander and Rickert, Markus and Kahl, Bj{\"{o}}rn and Somani, Nikhil and Lehmann, Christian and Kuss, Alexander and Profanter, Stefan and Billeso Beck, Anders and Haage, Mathias and Rath Hansen, Mikkel and Tofveson Nibe, Malene and Roa, M{\'{a}}ximo A. and S{\"{o}}rnmo, Olof and Gesteg{\aa}rd Robertz, Sven and Thomas, Ulrike and Veiga, Germano and Topp, Elin Anna and Kessler, Ingmar and Danzer, Marinus}, title = {{SME}robotics: Smart Robots for Flexible Manufacturing}, journal = {{IEEE} Robotics & Automation Magazine}, volume = {26}, number = {1}, pages = {78--90}, year = {2019}, month = mar, abstract = {Current market demands require an increasingly agile production environment throughout many manufacturing branches. Traditional automation systems and industrial robots, on the other hand, are often too inflexible to provide an economically viable business case for companies with rapidly changing products. The introduction of cognitive abilities into robotic and automation systems is, therefore, a necessary step toward lean changeover and seamless human–robot collaboration. In this article, we introduce the European Union (EU)-funded research project SMErobotics, which focuses on facilitating the use of robot systems in small and medium-sized enterprises (SMEs). We analyze open challenges for this target audience and develop multiple efficient technologies to address related issues. Real-world demonstrators of several end users and from multiple application domains show the impact these smart robots can have on SMEs. This article intends to give a broad overview of the research conducted in SMErobotics. Specific details of individual topics are provided through references to our previous publications.}, doi = {10.1109/MRA.2018.2879747}, keywords = {robotics, smerobotics}, } @article{, author = {Vandersompele, Alexander and Urbain, Gabriel and Mahmud, Hossain and Wyffels, Francis and Dambre, Joni}, title = {Body Randomization Reduces the Sim-to-Real Gap for Compliant Quadruped Locomotion}, journal = {Frontiers in Neurorobotics}, year = {2019}, month = mar, abstract = {Designing controllers for compliant, underactuated robots is challenging and usually requires a learning procedure. Learning robotic control in simulated environments can speed up the process whilst lowering risk of physical damage. Since perfect simulations are unfeasible, several techniques are used to improve transfer to the real world. Here, we investigate the impact of randomizing body parameters during learning of CPG controllers in simulation. The controllers are evaluated on our physical quadruped robot. We find that body randomization in simulation increases chances of finding gaits that function well on the real robot.}, howpublished = {Journal}, doi = {10.3389/fnbot.2019.00009}, keywords = {robotics, human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, url = {https://www.frontiersin.org/articles/10.3389/fnbot.2019.00009/full}, } @inproceedings{, author = {Weissker, Tim and Angelidis, Emmanouil and Kulik, Alexander and Beck, Stephan and Kunert, Andre and Frolov, Anton and Weber, Sandro and Kreskowski, Adrian and Froehlich, Bernd}, title = {The Collaborative Virtual Reality Neurorobotics Lab}, booktitle = {Proceedings of the {IEEE} Conference on Virtual Reality and 3D User Interfaces (VR)}, pages = {1671--1674}, year = {2019}, month = mar, address = {Osaka, Japan}, abstract = {We present the collaborative Virtual Reality Neurorobotics Lab, which allows multiple collocated and remote users to experience, discuss and participate in neurorobotic experiments in immersive virtual reality. We describe the coupling of the Neurorobotics Platform of the Human Brain Project with our collaborative virtual reality and 3D telepresence infrastructure and highlight future opportunities arising from our work for research on direct human interaction withsimulated robots and brains.}, doi = {10.1109/VR.2019.8798289}, keywords = {robotics, human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, } @incollection{Nafissi2019a, author = {Nafissi, Anahita and Weckesser, Fabian and Kessler, Ingmar and Rickert, Markus and Pfaff, Matthias and Peisl, Sebastian and Beck, Michael}, editor = {Meyer-Aurich, Andreas and Gandorfer, Markus and Barta, Norbert and Gronauer, Andreas and Kantelhardt, Jochen and Floto, Helga}, title = {{W}issensbasierte digitale {U}nterst{\"{u}}tzung in der {P}flanzenbauberatung}, booktitle = {Referate der 39. GIL-Jahrestagung in Wien: Informatik in der Land-, Forst- und Ern{\"{a}}hrungswirtschaft Fokus: Digitalisierung f{\"{u}}r landwirtschaftliche Betriebe in kleinstrukturierten Regionen -- ein Widerspruch in sich?}, publisher = {Gesellschaft f{\"{u}}r Informatik}, series = {Lecture Notes in Informatics}, volume = {287}, pages = {145--150}, year = {2019}, month = feb, address = {Bonn, Germany}, abstract = {Obwohl die Landwirtschaft schon immer technologische Neuerungen in der Produktion einsetzt, ist gerade die landwirtschaftliche Beratung bisher noch verh{\"{a}}ltnism{\"{a}}{\ss}ig wenig digitalisiert. Oft ist es g{\"{a}}ngige Praxis f{\"{u}}r Landwirte und Berater, gesetzliche Regelungen, Fachliteratur und Betriebsdaten in Papierform nachzuschlagen und in unstrukturierten, digitalen Dokumenten einzutragen. Zur Unterst{\"{u}}tzung der Landwirte und der Pflanzenbauberatung wird ein Entscheidungshilfesystem entwickelt, welches die Beratung in der Landwirtschaft digital unterst{\"{u}}tzen und erleichtern soll, indem es aktuelles Fach- und Expertenwissen sowie individuelle Betriebsdaten abruft, aufbereitet und zweckgebunden auswertet. Daf{\"{u}}r ist es notwendig, das entsprechende Fachwissen aus vielf{\"{a}}ltigen heterogenen Datenquellen in einer einheitlichen Wissensbasis verf{\"{u}}gbar zu machen. Der hier beschriebene Ansatz verwendet Semantic-Web-Technologien wie OWL-Ontologien und SPARQL-Abfragen, um diese Daten hinsichtlich ihrer Bedeutung, d.h. semantisch, zu modellieren und abzufragen.}, isbn = {9783885796817}, keywords = {farmexpert, robotics}, url = {https://www.gil-net.de/Publikationen/139_145.pdf}, } @inproceedings{Profanter2019a, author = {Profanter, Stefan and Tekat, Ayhun and Dorofeev, Kirill and Rickert, Markus and Knoll, Alois}, title = {OPC UA versus ROS, DDS, and MQTT: Performance Evaluation of Industry 4.0 Protocols}, booktitle = {Proceedings of the {IEEE} International Conference on Industrial Technology ({ICIT})}, year = {2019}, month = feb, address = {Melbourne, Australia}, abstract = {Ethernet-based protocols are getting more and more important for Industry 4.0 and the Internet of Things. In this paper, we compare the features, package overhead, and performance of some of the most important protocols in this area. First, we present a general feature comparison of OPC UA, ROS, DDS, and MQTT, followed by a more detailed wire protocol evaluation, which gives an overview over the protocol overhead for establishing a connection and sending data. In the performance tests we evaluate open-source implementations of these protocols by measuring the round trip time of messages in different system states: idle, high CPU load, and high network load. The performance analysis concludes with a test measuring the round trip time for 500 nodes on the same host.}, doi = {10.1109/ICIT.2019.8755050}, keywords = {robotics, smecobot}, } @inproceedings{Lin2018a, author = {Lin, Jianjie and Somani, Nikhil and Hu, Biao and Rickert, Markus and Knoll, Alois}, title = {An Efficient and Time-Optimal Trajectory Generation Approach for Waypoints under Kinematic Constraints and Error Bounds}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, year = {2018}, month = oct, address = {Madrid, Spain}, abstract = {This paper presents an approach to generate the time-optimal trajectory for a robot manipulator under certain kinematic constraints such as joint position, velocity, acceleration, and jerk limits. This problem of generating a trajectory that takes the minimum time to pass through specified waypoints is formulated as a nonlinear constraint optimization problem. Unlike prior approaches that model the motion of consecutive waypoints as a Cubic Spline, we model this motion with a seven-segment acceleration profile, as this trajectory results in a shorter overall motion time while staying within the bounds of the robot manipulator's constraints. The optimization bottleneck lies in the complexity that increases exponentially with the number of waypoints. To make the optimization scale well with the number of waypoints, we propose an approach that has linear complexity. This approach first divides all waypoints to consecutive batches, each with an overlap of two waypoints. The overlapping waypoints then act as a bridge to concatenate the optimization results of two consecutive batches. The whole trajectory is effectively optimized by successively optimizing every batch. We conduct experiments on practical scenarios and trajectories generated by motion planners to evaluate the effectiveness of our proposed approach over existing state-of-the-art approaches.}, doi = {10.1109/IROS.2018.8593577}, keywords = {robotics, trajectory generation}, } @incollection{Rickert2018a, author = {Rickert, Markus and Gaschler, Andre and Knoll, Alois}, editor = {Goswami, Ambarish and Vadakkepat, Prahlad}, title = {Applications in {HHI}: Physical Cooperation}, booktitle = {Humanoid Robotics: A Reference}, publisher = {Springer}, year = {2018}, month = oct, abstract = {Humans critically depend on permanent verbal and nonverbal interaction - for aligning their mental states, for synchronizing their intentions and goals, and also for performing joint tasks, such as carrying a heavy object together, manipulating of objects in a common workspace, or handing over components and building or assembling larger structures in teams. Typically, physical interaction is initiated by a short joint planning dialog and then further accompanied by a stream of verbal utterances. For obtaining a smooth interaction flow in a given situation, humans typically use all their communication modalities and senses, and this often happens even unconsciously. As we move toward the introduction of robotic co-workers that serve humans - some of them will be humanoids; others will be of a different shape - humans will expect them to be integrated into the execution of the task at hand, just as well as if a human co-worker was involved. Such a flawless replacement will only be possible if these robots provide a number of basic action primitives, for example, handover from human to robot and vice versa. The robots must also recognize and anticipate the intention of the human by analyzing and understanding the scene as far as necessary for jointly working on the task. Most importantly, the robotic co-worker must be able to carry on a verbal and nonverbal dialog with the human partner, in parallel with and relating to the physical interaction process. In this chapter, we give an overview of the ingredients of an integrated physical interaction scenario. This includes methods to plan activities, to produce safe and human-interpretable motion, to interact through multimodal communication, to schedule actions for a joint task, and to align and synchronize the interaction by understanding human intentions. We summarize the state of the art in physical human-humanoid interaction systems and conclude by presenting three humanoid systems as case studies.}, isbn = {9789400760455}, doi = {10.1007/978-94-007-6046-2_129}, keywords = {robotics, human-robot interaction, humanoid robotics}, url = {https://link.springer.com/content/pdf/10.1007%2F978-94-007-7194-9_129-1.pdf}, } @article{Thuerauf2018a, author = {Th{\"{u}}rauf, Sabine and Hornung, Oliver and K{\"{o}}rner, Mario and Vogt, Florian and Knoll, Alois and Nasseri, M. Ali}, title = {Model-Based Calibration of a Robotic C-Arm System Using X-Ray Imaging}, journal = {Journal of Medical Robotics Research}, year = {2018}, month = apr, abstract = {In interventional radiology or surgery, C-arm systems are typical imaging modalities. Apart from 2D X-ray images, C-arm systems are able to perform 2D/3D overlays. For this application, a previously recorded 3D volume is projected on a 2D X-ray image for providing additional information to the clinician. The required accuracy for this application is 1.5 mm. Such a spatial accuracy is only achievable with C-arms, if a calibration is performed. State-of-the-art approaches interpolate between values of lookup tables of a sampled Cartesian volume. However, due to the non-linear system behavior in Cartesian space, a trade-off between the calibration effort and the calibrated volume is necessary. This leads to the calibration of the most relevant subvolume and high calibration times. We discuss a new model-based calibration approach for C-arm systems which potentially leads to a smaller calibration effort and simultaneously to an increased calibrated volume. In this work, we demonstrate that it is possible to calibrate a robotic C-arm system using X-ray images and that a static model of the system is required to achieve the desired accuracy for 2D/3D overlays, if re-orientations of the system are performed.}, doi = {10.1142/S2424905X18410027}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Cheng2018a, author = {Cheng, Chih-Hong and Diehl, Frederik and Hinz, Gereon Michael and Hamza, Yassine and N{\"{u}}hrenberg, Georg and Rickert, Markus and Rue{\ss}, Harald and Truong Le, Michael}, title = {Neural Networks for Safety-Critical Applications - {C}hallenges, Experiments and Perspectives}, booktitle = {Proceedings of the Design, Automation \& Test in Europe Conference \& Exhibition (DATE)}, pages = {1005--1006}, year = {2018}, month = mar, address = {Dresden, Germany}, abstract = {We propose a methodology for designing dependable Artificial Neural Networks (ANNs) by extending the concepts of understandability, correctness, and validity that are crucial ingredients in existing certification standards. We apply the concept in a concrete case study for designing a highway ANN-based motion predictor to guarantee safety properties such as impossibility for the ego vehicle to suggest moving to the right lane if there exists another vehicle on its right.}, doi = {10.23919/DATE.2018.8342158}, keywords = {autonomous driving, robotics, neural networks, safety}, } @misc{, author = {Tsakiridou, Evdoxia}, title = {Roboter mit Hirn}, publisher = {Blog Innovations Report}, year = {2018}, month = mar, abstract = {fortiss hat f{\"{u}}r ein Teilprojekt des „Human Brain Project“ einen neuen Simulator entwickelt, mit dem Neurowissenschaftler ihre Hirnmodelle in virtuelle Roboter "verpflanzen" k{\"{o}}nnen. Das Besondere dabei: Die virtuellen Roboter sind mit einem Gehirn ausgestattet, das mit so genannten gepulsten neuronalen Netzen arbeitet. Diese sind ihrem biologischen Vorbild n{\"{a}}her als die aus dem maschinellen Lernen bekannten neuronalen Netze der ersten Generation. Sie versprechen eine bessere Kodierung von Nervenimpulsen und somit eine feinere Abstimmung von Bewegungen. Die Idee: Wenn Roboter sich {\"{a}}hnlich wie Menschen bewegen, k{\"{o}}nnen sie in Zukunft leichter gebaut und sicherer gesteuert werden.}, keywords = {human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, url = {https://www.innovations-report.de/html/berichte/informationstechnologie/roboter-mit-hirn.html}, } @article{Gaschler2018a, author = {Gaschler, Andre and Petrick, Ronald P. A. and Khatib, Oussama and Knoll, Alois}, title = {{KAB}ou{M}: Knowledge-Level Action and Bounding Geometry Motion Planner}, journal = {Journal of Artificial Intelligence Research}, volume = {61}, pages = {323--362}, year = {2018}, month = feb, abstract = {For robots to solve real world tasks, they often require the ability to reason about both symbolic and geometric knowledge. We present a framework, called KABouM, for integrating knowledge-level task planning and motion planning in a bounding geometry. By representing symbolic information at the knowledge level, we can model incomplete information, sensing actions and information gain; by representing all geometric entities--objects, robots and swept volumes of motions--by sets of convex polyhedra, we can efficiently plan manipulation actions and raise reasoning about geometric predicates, such as collisions, to the symbolic level. At the geometric level, we take advantage of our bounded convex decomposition and swept volume computation with quadratic convergence, and fast collision detection of convex bodies. We evaluate our approach on a wide set of problems using real robots, including tasks with multiple manipulators, sensing and branched plans, and mobile manipulation.}, doi = {10.1613/jair.5560}, keywords = {robotics, smerobotics, james}, } @inproceedings{Rickert2017a, author = {Rickert, Markus and Gaschler, Andre}, title = {{R}obotics {L}ibrary: An Object-Oriented Approach to Robot Applications}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems (IROS)}, year = {2017}, month = sep, address = {Vancouver, BC, Canada}, abstract = {We discuss the architecture and software engineering principles of the Robotics Library (RL). Driven by requirements of robot systems, research projects, industrial applications, and education, we identify relevant design requirements and present an approach to manage hardware and real-time, provide a user-friendly, object-oriented interface to powerful kinematics and dynamics calculations, and support various platforms. After over ten years of development that started in 2004 and evaluating many variants of the architecture, we discuss the design choices for the components of the library in its current version.}, doi = {10.1109/IROS.2017.8202232}, keywords = {robotics, robotics library}, url = {https://www.roboticslibrary.org/}, } @inproceedings{Profanter2017a, author = {Profanter, Stefan and Dorofeev, Kirill and Zoitl, Alois and Knoll, Alois}, title = {OPC UA for Plug & Produce: Automatic Device Discovery using LDS-ME}, booktitle = {Proceedings of the {IEEE} International Conference on Emerging Technologies And Factory Automation ({ETFA})}, year = {2017}, month = sep, address = {Limassol, Cyprus}, abstract = {Current manufacturing and production are changing more and more into a flexible and adaptable factory layout that requires rapid changeover and short reconfiguration times of machines. Additionally the setup time for new devices should be as short as possible. In this paper we propose a hierarchical architecture for a multi-level Plug \& Produce system and evaluate the proposed structure using open source OPC UA implementations for easy integration of new devices into an existing system. Aside from the requirements for such a system, basic concepts of the OPC UA Discovery Service Set are described and different open source OPC UA implementations for C/C++ and Java are compared.}, keywords = {robotics, openmos}, } @inproceedings{Kraft2017a, author = {Kraft, Martin and Rickert, Markus}, title = {How to Teach Your Robot in 5 Minutes: Applying {UX} Paradigms to Human-Robot-Interaction}, booktitle = {Proceedings of the {IEEE} International Symposium on Robot and Human Interactive Communication ({RO-MAN})}, year = {2017}, month = aug, address = {Lisbon, Portugal}, abstract = {When creating modern and visually appealing user experiences for the interaction with industrial robots, previously known and universally applicable paradigms in app and web design can be utilized to increase accessibility and usability of the to be created service. This is especially the case when the expected user group consists of untrained and inexperienced users and therefore system interaction focus is laid more on build progress overview, safety for human and robot, as well as overall simplification of complicated features. In this paper, we present four of the most important paradigms of modern graphical user experiences in web and app design that can be used to forward the concept of interacting with an industrial robot without any experience-related thresholds. By redesigning an existing interaction concept of a working robot cell system for assembly tasks in a small and medium-sized enterprise environment the presented paradigms are being utilized. The achieved improvements are then examined in a before-after user study to analyze the paradigm's success in suiting the user's expectation and anticipation using the redesigned service.}, doi = {10.1109/ROMAN.2017.8172416}, keywords = {robotics, smerobotics}, } @article{Foster2017a, author = {Foster, Mary Ellen and Gaschler, Andre and Giuliani, Manuel}, title = {Automatically Classifying User Engagement for Dynamic Multi-party Human–Robot Interaction}, journal = {International Journal of Social Robotics}, pages = {1--16}, year = {2017}, month = jul, abstract = {A robot agent designed to engage in real-world human–robot joint action must be able to understand the social states of the human users it interacts with in order to behave appropriately. In particular, in a dynamic public space, a crucial task for the robot is to determine the needs and intentions of all of the people in the scene, so that it only interacts with people who intend to interact with it. We address the task of estimating the engagement state of customers for a robot bartender based on the data from audiovisual sensors. We begin with an offline experiment using hidden Markov models, confirming that the sensor data contains the information necessary to estimate user state. We then present two strategies for online state estimation: a rule-based classifier based on observed human behaviour in real bars, and a set of supervised classifiers trained on a labelled corpus. These strategies are compared in offline cross-validation, in an online user study, and through validation against a separate test corpus. These studies show that while the trained classifiers are best in a cross-validation setting, the rule-based classifier performs best with novel data; however, all classifiers also change their estimate too frequently for practical use. To address this issue, we present a final classifier based on Conditional Random Fields: this model has comparable performance on the test data, with increased stability. In summary, though, the rule-based classifier shows competitive performance with the trained classifiers, suggesting that for this task, such a simple model could actually be a preferred option, providing useful online performance while avoiding the implementation and data-scarcity issues involved in using machine learning for this task.}, doi = {10.1007/s12369-017-0414-y}, keywords = {robotics, james}, } @inproceedings{Thuerauf2017a, author = {Th{\"{u}}rauf, Sabine and K{\"{o}}rner, Mario and Vogt, Florian and Hornung, Oliver and Nasseri, M. Ali and Knoll, Alois}, title = {Environment Effects at Phantom-Based {X}-Ray Pose Measurements}, booktitle = {Proceedings of the International Conference of the {IEEE} Engineering in Medicine and Biology Society ({EMBC})}, pages = {1836--1839}, year = {2017}, month = jul, address = {Seogwipo, South Korea}, abstract = {Image-based pose measurements relative to phantoms are used for various applications. Some examples are: tracking, registration or calibration. If highly precise measurements are needed, even changes of environment factors influence the measurements. This work evaluates how humidity and room temperature affect an image based pose measurements using a phantom. The pose measurement is used for the specific use case of an absolute accurate calibration of a C-arm X-ray system. However, the results are transferable to other applications, too. We describe the effects on different measurement parameters and experimentally evaluate the imprecisions caused by water absorption and thermal expansion of the phantom. The real world results show, that it is needed to monitor the environment effects if measurement precisions in the submillimeter scale are necessary.}, doi = {10.1109/EMBC.2017.8037203}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Thuerauf2017b, author = {Th{\"{u}}rauf, Sabine and Hornung, Oliver and K{\"{o}}rner, Mario and Vogt, Florian and Nasseri, M. Ali and Knoll, Alois}, title = {Absolute Accurate Calibration of a Robotic {C}-Arm System based on {X}-Ray Observations using a Kinematic Model}, booktitle = {Proceedings of the Workshop on Surgical Robots: Compliant, Continuum, Cognitive, and Collaborative, {IEEE} International Conference on Robotics and Automation ({ICRA})}, year = {2017}, month = jun, address = {Singapore, Singapore}, abstract = {C-arm X-ray systems are commonly used imaging modalities in surgery or interventional radiology. In addition to typical 2D X-ray images, 2D/3D overlays can be performed by these systems. For the 2D/3D overlay a high spatial accuracy of 1:5mm is needed. This accuracy is only achievable, if a spatial calibration of the system is performed. We introduce a new calibration technique for C-arm systems based on an absolute accurate robot calibration to speed up the calibration process and increase the calibrated working volume in future. This work is a proof of concept and shows that the accuracy needed for 2D/3D overlays is achievable with an absolute accurate robot calibration based on X-ray images. However, a deformation model is needed if the C-arm system is reorientated.}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Chen2017a, author = {Chen, Chao and Rickert, Markus and Knoll, Alois}, title = {Motion Planning under Perception and Control Uncertainties with Space Exploration Guided Heuristic Search}, booktitle = {Proceedings of the {IEEE} Intelligent Vehicles Symposium}, year = {2017}, month = jun, address = {Redondo Beach, CA, USA}, abstract = {Reliability and safety are extremely important for autonomous driving in real traffic scenarios. However, due to imperfect control and sensing, the actual state of the vehicle cannot be flawlessly predicted or measured, but estimated with uncertainty. Therefor, it is important to consider the execution risk advance in motion planning for a solution with a high success rate. The Space Exploration Guided Heuristic Search (SEHS) method is extended to deal with perception and control uncertainty in its two planning stages. First, the localization uncertainty is evaluated with a simple probabilistic robot model by the Space Exploration to find a path corridor with sufficient localization quality for the desired motion accuracy. Then, a trajectory controller is modeled with nonholonomic kinematics for the belief propagation of a robot state with primitive motions. The dynamic model and the control feedback are approximated in a close neighborhood of the reference trajectory. In this case, the Heuristic Search can propagate the state uncertainty as a normal distribution in the search tree to guarantee a high probability of safety and to achieve the required final accuracy. The belief-based SEHS is evaluated in several simulated scenarios. Compared to the basic SEHS method that assumes perfection, motions with higher execution successful rate are produced, especially the human-like behaviors for driving through narrow passages and precise parking. This confirms the major contribution of this work in exploiting the uncertainties for motion planning in autonomous driving.}, doi = {10.1109/IVS.2017.7995801}, keywords = {robotics, autonomous driving, path planning}, } @article{Somani2017a, author = {Somani, Nikhil and Rickert, Markus and Knoll, Alois}, title = {An Exact Solver for Geometric Constraints with Inequalities}, journal = {{IEEE} Robotics and Automation Letters}, volume = {2}, number = {2}, pages = {1148--1155}, year = {2017}, month = apr, note = {Accepted for presentation at ICRA 2017}, abstract = {CAD/CAM approaches have been used in the manufacturing industry for a long time, and their use in robotic systems is becoming more popular. One common element in these approaches is the use of geometric constraints to define relative object poses. Hence, approaches for solving these geometric constraints are critical to their performance. In this work, we present an exact solver for geometric constraints. Our approach is based on mathematical models of constraints and geometric properties of constraint nullspaces. Our constraint solver supports non-linear constraints with inequalities, and also mixed transformation manifolds, i.e., cases where the rotation and translation components of the constraints are not independent. Through several applications, we show how inequality constraints and mixed transformation manifolds increase the expressive power of constraint-based task definitions. The exact solver provides repeatable solutions with deterministic runtimes and our experiments show that it is also much faster than comparable iterative solvers.}, doi = {10.1109/LRA.2017.2655113}, keywords = {robotics}, } @article{, author = {Falotico, Egidio and Vannucci, Lorenzo and Ambrosano, Alessandro and Albanese, Ugo and Ulbrich, Stefan and Vasquez Tieck, Juan Camilo and Hinkel, Georg and Kirtay, Murat and Peric, Igor and Denninger, Oliver and Cauli, Nino and Roennau, Arne and Klinker, Gudrun and von Arnim, Axel and Guyot, Luc and Peppicelli, Daniel and Martinez-Canada, Pablo and Ros, Eduardo and Maier, Patrick and Weber, Sandro and Huber, Manuel and Plecher, David and R{\"{o}}hrbein, Florian and Deser, Stefan and Roitberg, Alina and van der Smagt, Patrick and Dillmann, R{\"{u}}diger and Levi, Paul and Laschi, Cecilia and Knoll, Alois and Gewaltig, Marc-Oliver}, title = {Connecting Artificial Brains to Robots in a Comprehensive Simulation Framework: The Neurorobotics Platform}, journal = {Frontiers in Neurorobotics}, year = {2017}, month = jan, abstract = {Combined efforts in the fields of neuroscience, computer science, and biology allowed to design biologically realistic models of the brain based on spiking neural networks. For a proper validation of these models, an embodiment in a dynamic and rich sensory environment, where the model is exposed to a realistic sensory-motor task, is needed. Due to the complexity of these brain models that, at the current stage, cannot deal with real-time constraints, it is not possible to embed them into a real-world task. Rather, the embodiment has to be simulated as well. While adequate tools exist to simulate either complex neural networks or robots and their environments, there is so far no tool that allows to easily establish a communication between brain and body models. The Neurorobotics Platform is a new web-based environment that aims to fill this gap by offering scientists and technology developers a software infrastructure allowing them to connect brain models to detailed simulations of robot bodies and environments and to use the resulting neurorobotic systems for in silico experimentation. In order to simplify the workflow and reduce the level of the required programming skills, the platform provides editors for the specification of experimental sequences and conditions, environments, robots, and brain–body connectors. In addition to that, a variety of existing robots and environments are provided. This work presents the architecture of the first release of the Neurorobotics Platform developed in subproject 10 “Neurorobotics” of the Human Brain Project (HBP).1 At the current state, the Neurorobotics Platform allows researchers to design and run basic experiments in neurorobotics using simulated robots and simulated environments linked to simplified versions of brain models. We illustrate the capabilities of the platform with three example experiments: a Braitenberg task implemented on a mobile robot, a sensory-motor learning task based on a robotic controller, and a visual tracking embedding a retina model on the iCub humanoid robot. These use-cases allow to assess the applicability of the Neurorobotics Platform for robotic tasks as well as in neuroscientific experiments.}, doi = {10.3389/fnbot.2017.00002}, keywords = {robotics, software architectures, robot programming, web technologies, human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, url = {https://www.frontiersin.org/articles/10.3389/fnbot.2017.00002/full}, } @inproceedings{Perzylo2016a, author = {Perzylo, Alexander and Somani, Nikhil and Profanter, Stefan and Kessler, Ingmar and Rickert, Markus and Knoll, Alois}, title = {Intuitive Instruction of Industrial Robots: Semantic Process Descriptions for Small Lot Production}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {2293--2300}, year = {2016}, month = oct, address = {Daejeon, Republic of Korea}, abstract = {In this paper, we introduce a novel robot programming paradigm. It focuses on reducing the required expertise in robotics to a level that allows shop floor workers to use robots in their application domain without the need of extensive training. Our approach is user-centric and can interpret underspecified robot tasks, enabling communication on an abstract level. Such high-level task descriptions make the system amenable for users that are experts in a particular domain, but have limited knowledge about robotics and are thus not able to specify low-level details and instructions. Semantic models for all involved entities, i.e., processes, workpieces, and workcells, enable automatic reasoning about underspecified tasks and missing pieces of information. We showcase and evaluate this methodology on two industrial use cases from the domains of assembly and woodworking, comparing it to state-of-the-art solutions provided by robot manufacturers.}, doi = {10.1109/IROS.2016.7759358}, keywords = {robotics, smerobotics}, url = {https://youtu.be/bbInEMEF5zU}, } @inproceedings{Somani2016a, author = {Somani, Nikhil and Rickert, Markus and Gaschler, Andre and Cai, Caixia and Perzylo, Alexander and Knoll, Alois}, title = {Task Level Robot Programming using Prioritized Non-Linear Inequality Constraints}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {430--437}, year = {2016}, month = oct, address = {Daejeon, Republic of Korea}, abstract = {In this paper, we propose a framework for prioritized constraint-based specification of robot tasks. This framework is integrated with a cognitive robotic system based on semantic models of processes, objects, and workcells. The target is to enable intuitive (re-)programming of robot tasks, in a way that is suitable for non-expert users typically found in SMEs. Using CAD semantics, robot tasks are specified as geometric interrelational constraints. During execution, these are combined with constraints from the environment and the workcell, and solved in real-time. Our constraint model and solving approach supports a variety of constraint functions that can be non-linear and also include bounds in the form of inequalities, e.g., geometric interrelations, distance, collision avoidance and posture constraints. It is a hierarchical approach where priority levels can be specified for the constraints, and the nullspace of higher priority constraints is exploited to optimize the lower priority constraints. The presented approach has been applied to several typical industrial robotic use-cases to highlight its advantages compared to other state-of-the-art approaches.}, doi = {10.1109/IROS.2016.7759090}, keywords = {robotics}, url = {https://youtu.be/baet9IkTK04}, } @inproceedings{Thuerauf2016a, author = {Th{\"{u}}rauf, Sabine and Vogt, Florian and Hornung, Oliver and K{\"{o}}rner, Mario and Nasseri, M. Ali and Knoll, Alois}, title = {Experimental Evaluation of the Accuracy at the {C}-Arm Pose Estimation with {X}-Ray Images}, booktitle = {Proceedings of the International Conference of the {IEEE} Engineering in Medicine and Biology Society ({EMBC})}, pages = {3859--3862}, year = {2016}, month = aug, address = {Orlando, FL, USA}, abstract = {C-arm X-ray systems need a high spatial accuracy for applications like cone beam computed tomography and 2D/3D overlay. One way to achieve the needed precision is a model-based calibration of the C-arm system. For such a calibration a kinematic and dynamic model of the system is constructed whose parameters are computed by pose measurements of the C-arm. Instead of common measurement systems used for a model-based calibration for robots like laser trackers, we use X-ray images of a calibration phantom to measure the C-arm pose. By the direct use of the imaging system, we overcome registration errors between the measurement device and the C-arm system. The C-arm pose measurement by X-ray imaging, the new measurement technique, has to be evaluated to check if the measurement accuracy is sufficient for the modelbased calibration regarding the two mentioned applications. The scope of this work is a real world evaluation of the C-arm pose measurement accuracy with X-ray images of a calibration phantom using relative phantom movements and a laser tracker as ground truth.}, doi = {10.1109/EMBC.2016.7591570}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Thuerauf2016b, author = {Th{\"{u}}rauf, Sabine and Wolf, Markus and K{\"{o}}rner, Mario and Vogt, Florian and Hornung, Oliver and Nasseri, M. Ali and Knoll, Alois}, title = {A Realistic {X}-Ray Simulation for {C}-Arm Geometry Calibration}, booktitle = {Proceedings of the {IEEE} International Conference on Biomedical Robotics and Biomechatronics ({B}io{R}ob)}, pages = {383--388}, year = {2016}, month = jun, address = {Singapore, Singapore}, abstract = {Applications like cone beam computed tomographies (CBCTs) or 2D-3D overlays need a high geometrical accuracy of the C-arm system. In order to achieve this, geometry calibrations are performed to increase the accuracy given by the kinematics of the system. Commonly X-ray images of a phantom with known geometry are taken for the calibration. The images, together with a 3D model of the phantom, serve as input for an optimizer, which estimates the pose of the C-arm relative to the phantom. Afterwards, the estimates are used to increase the geometrical accuracy of the system. Inaccuracies due to real world effects appear, e.g. manufacturing or assembly inaccuracies of the phantom, the position of the X-ray tube, or the pose of the detector. To evaluate these factors separately a simulation is helpful, which needs to be as realistic as possible. To achieve this we defined three requirements, which have to be fulfilled: realistic noise, realistic absolute errors and similar error distributions within the working volume. By means of these criteria we investigate if our simulation mirrors a real world C-arm pose measurement for C-arm geometry calibration sufficiently.}, doi = {10.1109/BIOROB.2016.7523656}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Haage2016, author = {Haage, Mathias and Profanter, Stefan and Kessler, Ingmar and Perzylo, Alexander and Somani, Nikhil and S{\"{o}}rnmo, Olof and Karlsson, Martin and Gesteg{\aa}rd Robertz, Sven and Nilsson, Klas and Resch, Ludovic and Marti, Michael}, title = {On Cognitive Robot Woodworking in {SME}robotics}, booktitle = {Proceedings of the International Symposium on Robotics (ISR)}, year = {2016}, month = jun, address = {Munich, Germany}, abstract = {This paper details and discusses work performed at the woodworking SME Mivelaz Techniques Bois SA within the EU FP7 project SMErobotics. The aim is to improve non-expert handling of the cell by introduction of cognitive abilities in the robot system. Three areas are considered; intuitive programming, process adaptation and system integration. Proposed cognitive components are described together with experiments performed.}, keywords = {robotics, smerobotics}, } @article{Rickert2016a, author = {Rickert, Markus and Perzylo, Alexander}, title = {{I}ndustrieroboter f{\"{u}}r {KMU}: {F}lexible und intuitive {P}rozessbeschreibung}, journal = {{I}ndustrie {M}anagement}, volume = {32}, number = {2}, pages = {46--49}, year = {2016}, month = apr, abstract = {Die Produktion in kleinen und mittelst{\"{a}}ndischen Unternehmen (KMU) findet oft im Rahmen von Kleinserien oder sogar Einzelfertigung statt. Um diese KMU durch Roboter-basierte Automatisierung zu unterst{\"{u}}tzen, bedarf es eines Umdenkens in der Ausgestaltung der Bedienkonzepte. In diesen Anwendungsszenarien gewinnt die effiziente Programmierung und Adaption enorm an Stellenwert. Zudem muss das ben{\"{o}}tigte Vorwissen im Umgang mit Robotern soweit reduziert werden, dass Facharbeiter ohne gro{\ss}en Ausbildungsaufwand Robotersysteme selbst bedienen k{\"{o}}nnen. In diesem Beitrag stellen wir einen wissensbasierten Ansatz vor, der die genannten Herausforderungen behandelt.}, keywords = {robotics, smerobotics}, } @inproceedings{Somani2015c, author = {Somani, Nikhil and Perzylo, Alexander and Cai, Caixia and Rickert, Markus and Knoll, Alois}, title = {Object Detection Using Boundary Representations of Primitive Shapes}, booktitle = {Proceedings of the {IEEE} International Conference on Robotics and Biomimetics ({ROBIO})}, pages = {108--113}, year = {2015}, month = dec, address = {Zhuhai, China}, abstract = {In this paper, an approach for matching of primitive shapes detected from point clouds, to boundary representations of primitive shapes contained in CAD models of objects/workpieces is presented. The primary target application is object detection and pose estimation from noisy RGBD sensor data. This approach can also be used to determine incomplete object poses, including those of symmetrical objects. Detection and reasoning about these under-specified object poses is useful in several practical applications such as robotic manipulation, which are also presented in this paper.}, doi = {10.1109/ROBIO.2015.7414632}, keywords = {robotics, smerobotics}, } @inproceedings{Cai2015a, author = {Cai, Caixia and Somani, Nikhil and Rickert, Markus and Knoll, Alois}, title = {Prioritized Motion-Force Control of Multi-Constraints for Industrial Manipulators}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Biomimetics (ROBIO)}, pages = {952--957}, year = {2015}, month = dec, address = {Zhuhai, China}, abstract = {To synthesize whole-body behaviors interactively, multiple tasks and constraints need to be simultaneously controlled, including those that guarantee that the constraints imposed by the robot's structure and the external environment are satisfied. In this paper, we present a prioritized, multiple-task control framework that is able to control forces in systems ranging from humanoids to industrial robots. Priorities between tasks are accomplished through null-space projection. Several relevant constraints (i.e., motion constraints, joint limits, force control) are tested to evaluate the control framework. Further, we evaluate the proposed approach in two typical industrial robotics applications: grasping of cylindrical objects and welding.}, doi = {10.1109/ROBIO.2015.7418894}, keywords = {robotics}, } @inproceedings{, author = {Vannucci, Lorenzo and Ambrosano, Alessandro and Cauli, Nino and Albanese, Ugo and Falotico, Egidio and Ulbrich, Stefan and Pfotzer, Lars and Hinkel, Georg and Denninger, Oliver and Peppicelli, Daniel and Guyot, Luc and von Arnim, Axel}, title = {A visual tracking model implemented on the iCub robot as a use case for a novel neurorobotic toolkit integrating brain and physics simulation}, booktitle = {Proceedings of the {IEEE}-{RAS} International Conference on Humanoid Robots (Humanoids)}, pages = {1179--1184}, year = {2015}, month = nov, address = {Seoul, South Korea}, abstract = {Developing neuro-inspired computing paradigms that mimic nervous system function is an emerging field of research that fosters our model understanding of the biological system and targets technical applications in artificial systems. The computational power of simulated brain circuits makes them a very promising tool for the development for brain-controlled robots. Early phases of robotic controllers development make extensive use of simulators as they are easy, fast and cheap tools. In order to develop robotics controllers that encompass brain models, a tool that include both neural simulation and physics simulation is missing. Such a tool would require the capability of orchestrating and synchronizing simulations as well as managing the exchange of data both between them. The Neurorobotics Platform (NRP) aims at filling this gap through an integrated software toolkit enabling an experimenter to design and execute a virtual experiment with a simulated robot using customized brain models. As a use case for the NRP, the iCub robot has been integrated into the platform and connected to a spiking neural network. In particular, experiments of visual tracking have been conducted in order to demonstrate the potentiality of such a platform.}, doi = {10.1109/HUMANOIDS.2015.7363512}, keywords = {robotics, human brain project, HBP, neurorobotics, neuromorphics, brain simulation, spiking neural networks, NRP, robot simulation}, language = {English}, url = {http://vislab.isr.ist.utl.pt/wp-content/uploads/2017/11/evannucci-humanoids2015.pdf}, } @inproceedings{Roitberg2015a, author = {Roitberg, Alina and Somani, Nikhil and Perzylo, Alexander and Rickert, Markus and Knoll, Alois}, title = {Multimodal Human Activity Recognition for Industrial Manufacturing Processes in Robotic Workcells}, booktitle = {Proceedings of the {ACM} International Conference on Multimodal Interaction ({ICMI})}, pages = {259--266}, year = {2015}, month = nov, address = {Seattle, WA, USA}, abstract = {We present an approach for monitoring and interpreting human activities based on a novel multimodal vision-based interface, aiming at improving the efficiency of human-robot interaction (HRI) in industrial environments. Multi-modality is an important concept in this design, where we combine inputs from several state-of-the-art sensors to provide a variety of information, e.g. skeleton and fingertip poses. Based on typical industrial workflows, we derived multiple levels of human activity labels, including large-scale activities (e.g. assembly) and simpler sub-activities (e.g. hand gestures), creating a duration- and complexity-based hierarchy. We train supervised generative classifiers for each activity level and combine the output of this stage with a trained Hierarchical Hidden Markov Model (HHMM), which models not only the temporal aspects between the activities on the same level, but also the hierarchical relationships between the levels.}, doi = {10.1145/2818346.2820738}, keywords = {robotics, smerobotics}, } @inproceedings{Perzylo2015c, author = {Perzylo, Alexander and Somani, Nikhil and Profanter, Stefan and Gaschler, Andre and Griffiths, Sascha and Rickert, Markus and Knoll, Alois}, title = {Ubiquitous Semantics: Representing and Exploiting Knowledge, Geometry, and Language for Cognitive Robot Systems}, booktitle = {Proceedings of the Workshop Towards Intelligent Social Robots - Current Advances in Cognitive Robotics, {IEEE}/{RAS} International Conference on Humanoid Robots ({HUMANOIDS})}, year = {2015}, month = nov, address = {Seoul, South Korea}, abstract = {In this paper, we present an integrated approach to knowledge representation for cognitive robots. We combine knowledge about robot tasks, interaction objects including their geometric shapes, the environment, and natural language in a common ontological description. This description is based on the Web Ontology Language (OWL) and allows to automatically link and interpret these different kinds of information. Semantic descriptions are shared between object detection and pose estimation, task-level manipulation skills, and human-friendly interfaces. Through lifting the level of communication between the human operator and the robot system to an abstract level, we achieve more human-suitable interaction and thus a higher level of acceptance by the user. Furthermore, it increases the efficiency of communication. The benefits of our approach are highlighted by examples from the domains of industrial assembly and service robotics.}, keywords = {robotics, smerobotics}, } @inproceedings{Thuerauf2015a, author = {Th{\"{u}}rauf, Sabine and Hornung, Oliver and K{\"{o}}rner, Mario and Vogt, Florian and Nasseri, M. Ali and Knoll, Alois}, title = {Evaluation of a 9{D}-Position Measurement Method of a {C}-Arm Based on {X}-Ray Projections}, booktitle = {Proceedings of the Workshop on Interventional Microscopy, International Conference on Medical Image Computing and Computer Assisted Intervention ({MICCAI})}, year = {2015}, month = oct, address = {Munich, Germany}, abstract = {For features like X-ray CTs (computed tomography) a high absolute pose accuracy of the C-arm system is needed. Therefore, an extensive calibration has to be performed to make the C-arm system sufficiently accurate. One new approach to calibrate the system is to perform an absolute robot calibration with end effector pose measurements as input. To measure the poses, X-ray images of a calibration phantom can be used. This work determines a lower bound for an X-raybased 9D-position measurement technique for a C-arm system (including the 3D-position of the tube, the 3D-position of the detector and the 3Dorientation of the detector) using a helical calibration phantom regarding its accuracy by a simulation.}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Thuerauf2015b, author = {Th{\"{u}}rauf, Sabine and Vogt, Florian and Hornung, Oliver and K{\"{o}}rner, Mario and Nasseri, M. Ali and Knoll, Alois}, title = {Tuning of {X}-Ray Parameters for Noise Reduction of an Image-Based Focus Position Measurement of a {C}-Arm {X}-Ray System}, booktitle = {Proceedings of the Workshop on Alternative Sensing for Robot Perception, {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, year = {2015}, month = oct, address = {Hamburg, Germany}, abstract = {In surgery or interventional radiology 2D/3D overlays of X-rays combined with previously recorded 3D volumes support the physician with additional visual information. Typical X-ray systems in these fields are C-arm systems. First of all, the 3D volume and the 2D images need to be registered. Afterwards, if the system moves, errors between the real world pose and the nominal pose of the system appear. One possible way to overcome this problem is an absolute calibration of the system. With this approach the projection geometry can be defined accurately enough for a whole working volume. Therefore, measurements of the end effector pose serve as input. A potential method to determine the pose of the C-arm, is a pose estimation by X-ray observations of a calibration phantom. This work evaluates how the focal spot size, the pixel size and the dose affect the noise at the pose estimation of the C-arm by X-ray observations.}, keywords = {robotics, roboterr{\"{o}}ntgen}, } @inproceedings{Perzylo2015b, author = {Perzylo, Alexander and Somani, Nikhil and Rickert, Markus and Knoll, Alois}, title = {An Ontology for {CAD} Data and Geometric Constraints as a Link Between Product Models and Semantic Robot Task Descriptions}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, pages = {4197--4203}, year = {2015}, month = sep, address = {Hamburg, Germany}, abstract = {In this paper, we introduce an approach for leveraging CAD description to a semantic level, in order to link additional knowledge to CAD models and to exploit resulting synergy effects. This has been achieved by designing a description language, based on the Web Ontology Language (OWL), that is used to define boundary representations (BREP) of objects. This involves representing geometric entities in a semantic meaningful way, e.g., a circle is defined by a coordinate frame and a radius instead of a set of polygons. Furthermore, the scope of this semantic description language also covers geometric constraints between multiple objects. Constraints can be specified not only on the object level, but down to single edges or faces of an object. This semantic representation is used to improve a variety of applications, ranging from shape-based object recognition to constraint-based robot task descriptions. Results from a quantitative evaluation are presented to assess the practicability of this approach.}, doi = {10.1109/IROS.2015.7353971}, keywords = {robotics, smerobotics}, } @inproceedings{Profanter2015a, author = {Profanter, Stefan and Perzylo, Alexander and Somani, Nikhil and Rickert, Markus and Knoll, Alois}, title = {Analysis and Semantic Modeling of Modality Preferences in Industrial Human-Robot Interaction}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, pages = {1812--1818}, year = {2015}, month = sep, address = {Hamburg, Germany}, abstract = {Intuitive programming of industrial robots is especially important for small and medium-sized enterprises. We evaluated four different input modalities (touch, gesture, speech, 3D tracking device) regarding their preference, usability, and intuitiveness for robot programming. A Wizard-of-Oz experiment was conducted with 30 participants and its results show that most users prefer touch and gesture input over 3D tracking device input, whereas speech input was the least preferred input modality. The results also indicate that there are gender specific differences for preferred input modalities. We show how the results of the user study can be formalized in a semantic description language in such a way that a cognitive robotic workcell can benefit from the additional knowledge of input and output modalities, task parameter types, and preferred combinations of the two.}, doi = {10.1109/IROS.2015.7353613}, keywords = {robotics, smerobotics}, } @inproceedings{Somani2015b, author = {Somani, Nikhil and Gaschler, Andre and Rickert, Markus and Perzylo, Alexander and Knoll, Alois}, title = {Constraint-Based Task Programming with {CAD} Semantics: From Intuitive Specification to Real-Time Control}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, pages = {2854--2859}, year = {2015}, month = sep, address = {Hamburg, Germany}, abstract = {In this paper, we propose a framework for intuitive task-based programming of robots using geometric inter-relational constraints. The intended applications of this framework are robot programming interfaces that use semantically rich task descriptions, allow intuitive (re-)programming, and are suitable for non-expert users typically found in SMEs. A key concept in this work is the use of CAD semantics to represent geometric entities in the robotic workcell. The robot tasks are then represented as a set of geometrical inter-relational constraints, which are solved in real-time to be executed on the robot. Since these constraints often specify the target pose only partially, the robot can be controlled to move in the constraints' null space in order to handle external disturbances or further optimize the robot's pose during runtime. Geometrical inter-relational constraints are easy to understand and can be intuitively specified using CAD software. A number of applications common in industrial robotic scenarios have been chosen to highlight the advantages of the presented approach vis-{\`a}-vis the state-of-the-art approaches.}, doi = {10.1109/IROS.2015.7353770}, keywords = {robotics}, url = {https://youtu.be/qRJ1JmNoFEw}, } @inproceedings{Lenz2015a, author = {Lenz, David and Rickert, Markus and Knoll, Alois}, title = {Heuristic Search in Belief Space for Motion Planning under Map and Actuator Uncertainties}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, pages = {2659--2665}, year = {2015}, month = sep, address = {Hamburg, Germany}, abstract = {In order to fully exploit the capabilities of a robotic system, it is necessary to consider the limitations and errors of actuators and sensors already during the motion planning phase. In this paper, a framework for path planning is introduced, that uses heuristic search to build up a search graph in belief space, an extension to the deterministic state space considering the uncertainty associated with this space. As sources of uncertainty, actuator errors and map uncertainties are considered. We apply this framework to various scenarios for a non-holonomic vehicle and compare the resulting paths to heuristic state space planners and LQG-MP with the help of simulations. As a result, paths generated with this framework could either not be found with worst-case assumptions or have a higher probability of being successfully executed compared to planners with more relaxed constraints.}, doi = {10.1109/IROS.2015.7353740}, keywords = {autonomous driving, robotics}, } @inproceedings{Chen2015c, author = {Chen, Chao and Rickert, Markus and Knoll, Alois}, title = {Kinodynamic Motion Planning with Space-Time Exploration Guided Heuristic Search for Car-Like Robots in Dynamic Environments}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, pages = {2666--2671}, year = {2015}, month = sep, address = {Hamburg, Germany}, abstract = {The Space Exploration Guided Heuristic Search (SEHS) method solves the motion planning problem, especially for car-like robots, in two steps: a circle-based space exploration in the workspace followed by a circle-guided heuristic search in the configuration space. This paper extends this approach for kinodynamic planning in dynamic environments by performing the exploration in both space and time domains. Thus, a time-dependent heuristic is constructed to guide the search algorithm applying a kinodynamic vehicle model. Furthermore, the search step-size and state resolution are adapted incrementally to guarantee resolution completeness with a trade-off for efficiency. The performance of Space-Time Exploration Guided Heuristic Search (STEHS) approach is verified in two scenarios and compared with several search-based and sampling-based methods.}, doi = {10.1109/IROS.2015.7353741}, keywords = {autonomous driving, robotics}, url = {https://www.youtube.com/watch?v=AmyweePd1HU}, } @inproceedings{Somani2015a, author = {Perzylo, Alexander and Somani, Nikhil and Profanter, Stefan and Rickert, Markus and Knoll, Alois}, title = {Multimodal Binding of Parameters for Task-Based Robot Programming Based on Semantic Descriptions of Modalities and Parameter Types}, booktitle = {Proceedings of the Workshop on Multimodal Semantics for Robotic Systems, {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, year = {2015}, month = sep, address = {Hamburg, Germany}, abstract = {In this paper, we describe our ongoing efforts to design a cognition-enabled industrial robotic workcell, which significantly increases the efficiency of teaching and adapting robot tasks. We have designed a formalism to match task parameter and input modality types, in order to infer suitable means for binding values to those parameters. All modalities are integrated through a graphical user interface, which a human operator can use to program industrial robots in an intuitive way by arbitrarily choosing modalities according to his or her preference.}, keywords = {robotics, smerobotics}, } @inproceedings{Chen2015a, author = {Chen, Chao and Rickert, Markus and Knoll, Alois}, title = {Path Planning with Orientation-Aware Space Exploration Guided Heuristic Search for Autonomous Parking and Maneuvering}, booktitle = {Proceedings of the {IEEE} Intelligent Vehicles Symposium ({IV})}, pages = {1148--1153}, year = {2015}, month = jul, address = {Seoul, South Korea}, abstract = {Due to the nonholonomic constraints of the vehicle kinematics, parking and maneuvering a car in a narrow clustered space are among the most challenging driving tasks. This paper introduces an extended version of Space Exploration Guided Heuristic Search (SEHS) method, called Orientation-Aware Space Exploration Guided Heuristic Search (OSEHS), to solve the path planning problems for parking and maneuvering. This method considers the orientation of a vehicle in the space exploration phase to achieve knowledge about driving directions. Such information is exploited later in the heuristic search phase to improve the planning efficiency in maneuvering scenarios. This approach is not bound to the specific domain knowledge about a parking or maneuvering task, but obtains the space dimension and orientation information through a generic exploration procedure. Therefore, it is convenient to integrate the maneuvering ability into a general SEHS motion planning framework. Experiments show that the OSEHS approach produces better results than common random-sampling methods and general heuristic search methods.}, doi = {10.1109/IVS.2015.7225838}, keywords = {autonomous driving, robotics}, } @inproceedings{ChenChao2015b, author = {Chen, Chao and Gaschler, Andre and Rickert, Markus and Knoll, Alois}, title = {Task Planning for Highly Automated Driving}, booktitle = {Proceedings of the {IEEE} Intelligent Vehicles Symposium ({IV})}, pages = {940--945}, year = {2015}, month = jul, address = {Seoul, South Korea}, abstract = {A hybrid planning approach is presented in this paper with the focus of integrating task planning and motion planning for highly automated driving. In the context of task planning, the vehicle and environment states are transformed from the continuous configuration space to a discrete state space. A planning problem is solved by a search algorithm for an optimal task sequence to reach the goal conditions in the symbolic space, regarding constraints such as space topology, place occupation, and traffic rules. Each task can be mapped to a specific driving maneuver and solved with a dedicated motion planning method in the continuous configuration space. The task planning approach not only bridges the gap between high-level navigation and low-level motion planning, but also provides a modular domain description that can be developed and verified individually. Our task planner for automated driving is evaluated in several scenarios with prior knowledge about the road-map and sensing range of the vehicle. Behavior that is otherwise complex to achieve is planned according to traffic rules and re-planned regarding the on-line perception.}, doi = {10.1109/IVS.2015.7225805}, keywords = {autonomous driving, robotics}, } @inproceedings{Perzylo2015a, author = {Perzylo, Alexander and Somani, Nikhil and Profanter, Stefan and Rickert, Markus and Knoll, Alois}, title = {Toward Efficient Robot Teach-In and Semantic Process Descriptions for Small Lot Sizes}, booktitle = {Proceedings of the Workshop on Combining AI Reasoning and Cognitive Science with Robotics, Robotics: Science and Systems ({RSS})}, year = {2015}, month = jul, address = {Rome, Italy}, abstract = {We present a novel robot programming methodology that is aimed at reducing the level of robotics expert knowledge needed to operate industrial robotic systems by explicitly modeling this knowledge and abstracting it from the user. Most of the current robot programming paradigms are either user-centric and fully-specify the robot's task to the lowest detail (used mostly in large industrial robotic systems) or fully autonomous solutions that generate the tasks from a problem description (used often in service and personal robotics). We present an approach that is user-centric and can interpret underspecified robot tasks. Such task descriptions make the system amenable for users that are experts in a particular domain, but have limited knowledge about robotics and are thus not able to specify low-level details and instructions. Semantic models for all involved entities enable automatic reasoning about underspecified tasks and missing pieces of information. We demonstrate this approach on an industrial assembly use-case and present a preliminary evaluation---both qualitatively and quantitatively---vis-{\`{a}}-vis state-of-the-art solutions available from industrial robot manufacturers.}, keywords = {robotics, smerobotics}, url = {http://youtu.be/B1Qu8Mt3WtQ}, } @inproceedings{Gaschler2015b, author = {Gaschler, Andre and Kessler, Ingmar and Petrick, Ronald P. A. and Knoll, Alois}, title = {Extending the Knowledge of Volumes Approach to Robot Task Planning with Efficient Geometric Predicates}, booktitle = {Proceedings of the {IEEE} International Conference on Robotics and Automation (ICRA)}, year = {2015}, month = jun, doi = {10.1109/ICRA.2015.7139619}, keywords = {robotics, james, smerobotics}, } @inproceedings{, author = {Lenz, David and Kessler, Tobias and Knoll, Alois}, title = {Stochastic Model Predictive Controller with Chance Constraints for Comfortable and Safe Driving Behavior of Autonomous Vehicles}, booktitle = {Proceedings of the IEEE Intelligent Vehicles Symposium}, year = {2015}, month = jun, location = {COEX, Seoul, Korea}, doi = {10.1109/IVS.2015.7225701}, keywords = {autonomous driving, robotics}, } @inproceedings{mopl, author = {Jentzsch, S{\"{o}}ren and Gaschler, Andre and Khatib, Oussama and Knoll, Alois}, title = {{MOPL}: A Multi-Modal Path Planner for Generic Manipulation Tasks}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, year = {2015}, address = {Hamburg, Germany}, doi = {10.1109/IROS.2015.7354263}, keywords = {robotics, james, smerobotics}, } @inproceedings{Perzylo2015d, author = {Perzylo, Alexander and Griffiths, Sascha and Lafrenz, Reinhard and Knoll, Alois}, title = {Generating Grammars for Natural Language Understanding from Knowledge about Actions and Objects}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Biomimetics (ROBIO)}, pages = {2008--2013}, year = {2015}, address = {Zhuhai, China}, abstract = {Many applications in the fields of Service Robotics and Industrial Human-Robot Collaboration, require interaction with a human in a potentially unstructured environment. In many cases, a natural language interface can be helpful, but it requires powerful means of knowledge representation and processing, e.g., using ontologies and reasoning. In this paper we present a framework for the automatic generation of natural language grammars from ontological descriptions of robot tasks and interaction objects, and their use in a natural language interface. Robots can use it locally or even share this interface component through the RoboEarth framework in order to benefit from features such as referent grounding, ambiguity resolution, task identification, and task assignment.}, doi = {10.1109/ROBIO.2015.7419068}, keywords = {robotics, smerobotics}, url = {http://youtu.be/mgPQevfTWP8}, } @inproceedings{Gaschler2015rss, author = {Petrick, Ronald P. A. and Gaschler, Andre}, title = {Knowledge-Level Planning for Robot Task Planning and Human-Robot Interaction}, booktitle = {RSS Workshop on Combining AI Reasoning and Cognitive Science with Robotics}, year = {2015}, keywords = {robotics, james}, } @inproceedings{Gaschler2015e, author = {M{\"{u}}hlbacher-Karrer, Stephan and Gaschler, Andre and Zangl, Hubert}, title = {Responsive Fingers -- Capacitive Sensing During Object Manipulation}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, year = {2015}, doi = {10.1109/IROS.2015.7354001}, keywords = {robotics}, } @techreport{Gaschler2015d, author = {Gaschler, Andre and Fischer, Quirin and Knoll, Alois}, title = {The Bounding Mesh Algorithm}, number = {TUM-I1522}, year = {2015}, institution = {Technische Universit{\"{a}}t M{\"{u}}nchen}, address = {Munich, Germany}, abstract = {We present an algorithm to generate a one-sided approximation of a given triangular mesh. We refer to such an approximate mesh as a bounding mesh, which includes the original mesh and has fewer vertices. Likewise, an inner bounding mesh is defined as an approximate mesh that is included by a given mesh. Our proposed bounding mesh algorithm performs iterative edge contractions and can generate both types of approximation. Contrary to regular, two-sided mesh approximation, which is a well studied subject in computer graphics, our algorithm is novel and one of a handful approaches to one-sided mesh approximation. While we are the first to apply bounding meshes to safe collision detection, path planning, and robot motion planning, applications range further to computer geometry and computer graphics. The bounding mesh algorithm helps pre-processing complex geometries and increases the efficiency of existing geometric algorithms, especially those that search in a bounding volume hierarchy. It can speed up search, intersection and inclusion detection, as well as silhouette, clipping, and other operations, acting as an intermediate level of approximation between coarser bounding boxes or bounding spheres and the exact mesh. Furthermore, the bounding mesh algorithm combines well with approximate convex decomposition to generate a bounding set of convexes with very few vertices, which is an efficient data structure for intersection, distance and normal computation, as well as other geometric operations.}, keywords = {robotics, james, smerobotics}, url = {https://mediatum.ub.tum.de/?id=1255722}, } @article{Rickert2014a, author = {Rickert, Markus and Sieverling, Arne and Brock, Oliver}, title = {Balancing Exploration and Exploitation in Sampling-Based Motion Planning}, journal = {{IEEE} Transactions on Robotics}, volume = {30}, number = {6}, pages = {1305--1317}, year = {2014}, month = dec, abstract = {We present the exploring/exploiting tree (EET) algorithm for motion planning. The EET planner deliberately trades probabilistic completeness for computational efficiency. This tradeoff enables the EET planner to outperform state-of-the-art sampling-based planners by up to three orders of magnitude. We show that these considerable speedups apply for a variety of challenging real-world motion planning problems. The performance improvements are achieved by leveraging work space information to continuously adjust the sampling behavior of the planner. When the available information captures the planning problem's inherent structure, the planner's sampler becomes increasingly exploitative. When the available information is less accurate, the planner automatically compensates by increasing local configuration space exploration. We show that active balancing of exploration and exploitation based on workspace information can be a key ingredient to enabling highly efficient motion planning in practical scenarios.}, doi = {10.1109/TRO.2014.2340191}, keywords = {robotics, motion planning, path planning, robotics library}, } @inproceedings{Roitberg2014, author = {Roitberg, Alina and Perzylo, Alexander and Somani, Nikhil and Giuliani, Manuel and Rickert, Markus and Knoll, Alois}, title = {Human Activity Recognition in the Context of Industrial Human-Robot Interaction}, booktitle = {Proceedings of the AsiaPacific Signal and Information Processing Association Annual Summit and Conference (APSIPA ASC)}, pages = {1--10}, year = {2014}, month = dec, address = {Siem Reap, Cambodia}, abstract = {Human activity recognition is crucial for intuitive cooperation between humans and robots. We present an approach for activity recognition for applications in the context of human-robot interaction in industrial settings. The approach is based on spatial and temporal features derived from skeletal data of human workers performing assembly tasks. These features were used to train a machine learning framework, which classifies discrete time frames with Random Forests and subsequently models temporal dependencies between the resulting states with a Hidden Markov Model. We considered the following three groups of activities: Movement, Gestures, and Object handling. A dataset has been collected which is comprised of 24 recordings of several human workers performing such activities in a human-robot interaction environment, as typically seen at small and medium-sized enterprises. The evaluation shows that the approach achieves a recognition accuracy of up to 88% for some activities and an average accuracy of 73%.}, doi = {10.1109/APSIPA.2014.7041588}, keywords = {robotics, james, smerobotics}, } @inproceedings{Somani2014a, author = {Somani, Nikhil and Cai, Caixia and Perzylo, Alexander and Rickert, Markus and Knoll, Alois}, title = {Object Recognition Using Constraints from Primitive Shape Matching}, booktitle = {Proceedings of the 10th International Symposium on Visual Computing (ISVC'14)}, publisher = {Springer}, pages = {783--792}, year = {2014}, month = dec, address = {Las Vegas, NV, USA}, abstract = {In this paper, an object recognition and pose estimation approach based on constraints from primitive shape matching is presented. Additionally, an approach for primitive shape detection from point clouds using an energy minimization formulation is presented. Each primitive shape in an object adds geometric constraints on the object's pose. An algorithm is proposed to find minimal sets of primitive shapes which are sufficient to determine the complete 3D position and orientation of a rigid object. The pose is estimated using a linear least squares solver over the combination of constraints enforced by the primitive shapes. Experiments illustrating the primitive shape decomposition of object models, detection of these minimal sets, feature vector calculation for sets of shapes and object pose estimation have been presented on simulated and real data.}, doi = {10.1007/978-3-319-14249-4_75}, keywords = {robotics, smerobotics}, } @inproceedings{Zaraki2014a, author = {Zaraki, Abolfazl and Giuliani, Manuel and Dehkordi, Maryam Banitalebi and Mazzei, Daniele and D'ursi, Annamaria and Rossi, Danilo De}, title = {An RGB-D Based Social Behavior Interpretation System for a Humanoid Social Robot}, booktitle = {Proceedings of 2nd RSI International Conference on Robotics and Mechatronics (ICRoM 2014)}, year = {2014}, month = oct, address = {Tehran, Iran}, note = {Best Paper Award, Best Presentation Award}, keywords = {robotics}, } @inproceedings{Cai2014a, author = {Cai, Caixia and Dean-Leon, Emmanuel and Somani, Nikhil and Knoll, Alois}, title = {6D Image-based Visual Servoing for Robot Manipulators with Uncalibrated Stereo Cameras}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems (IROS)}, year = {2014}, month = sep, keywords = {robotics}, } @inproceedings{, author = {Chen, Guang and Clarke, Daniel and Giuliani, Manuel and Gaschler, Andre and Weikersdorfer, David and Knoll, Alois}, title = {Multi-modality gesture detection and recognition with un-supervision, randomization and discrimination}, booktitle = {In ChaLearn Looking at People Workshop, European Conference on Computer Vision (ECCV2014)}, year = {2014}, month = sep, keywords = {robotics}, } @inproceedings{Chen2014a, author = {Chen, Chao and Rickert, Markus and Knoll, Alois}, title = {A Traffic Knowledge Aided Vehicle Motion Planning Engine Based on Space Exploration Guided Heuristic Search}, booktitle = {Proceedings of the IEEE Intelligent Vehicles Symposium}, pages = {535--540}, year = {2014}, month = jun, address = {Dearborn, MI, USA}, location = {Dearborn, Michigan, USA}, abstract = {A real-time vehicle motion planning engine is presented in this paper, with the focus on exploiting the prior and online traffic knowledge, e.g., predefined roadmap, prior environment information, behaviour-based motion primitives, within the space exploration guided heuristic search (SEHS) framework. The SEHS algorithm plans a kinodynamic vehicle motion in two steps: a geometric investigation of the free space, followed by a grid-free heuristic search employing primitive motions. These two procedures are generic and possible to take advantage of traffic knowledge. In this paper, the space exploration is supported by a roadmap and the heuristic search benefits from the behaviour-based primitives. Based on this idea, a light weighted motion planning engine is built, with the purpose to handle the traffic knowledge and the planning time in real-time motion planning. The experiments demonstrate that this SEHS motion planning engine is flexible and scalable for practical traffic scenarios with better results than the baseline SEHS motion planner regarding the provided traffic knowledge.}, doi = {10.1109/IVS.2014.6856458}, keywords = {autonomous driving, robotics}, } @inproceedings{, author = {Chen, Guang and Giuliani, Manuel and Clarke, Daniel and Knoll, Alois}, title = {Action recognition using ensemble weighted multi-instance learning}, booktitle = {In IEEE International Conference on Robotics and Automation (ICRA)}, year = {2014}, month = jun, keywords = {robotics}, } @inproceedings{Gaschler2014b, author = {Gaschler, Andre and Springer, Maximilian and Rickert, Markus and Knoll, Alois}, title = {Intuitive Robot Tasks with Augmented Reality and Virtual Obstacles}, booktitle = {Proceedings of the {IEEE} International Conference on Robotics and Automation ({ICRA})}, pages = {6026--6031}, year = {2014}, month = jun, address = {Hong Kong, China}, abstract = {Today's industrial robots require expert knowledge and are not profitable for small and medium sized enterprises with their small lot sizes. It is our strong belief that more intuitive robot programming in an augmented reality robot work cell can dramatically simplify re-programming and leverage robotics technology in short production cycles. In this paper, we present a novel augmented reality system for defining virtual obstacles, specifying tool positions, and specifying robot tasks. We evaluate the system in a user study and, more specifically, investigate the input of robot end-effector orientations in general.}, doi = {10.1109/ICRA.2014.6907747}, keywords = {robotics, james, smerobotics}, } @inproceedings{Giuliani2014a, author = {Giuliani, Manuel and Marschall, Thomas and Isard, Amy}, title = {Using Ellipsis Detection and Word Similarity for Transformation of Spoken Language into Grammatically Valid Sentences}, booktitle = {Proceedings of the 15th Annual SIGdial Meeting on Discourse and Dialogue (SIGDIAL 2014)}, year = {2014}, month = jun, address = {Philadelphia, USA}, keywords = {robotics, james}, } @article{Zaraki2014, author = {Zaraki, Abolfazl and Mazzei, Daniele and Giuliani, Manuel and Rossi, Danilo De}, title = {Designing and Evaluating a Social Gaze-Control System for a Humanoid Robot}, journal = {IEEE Transactions on Human-Machine Systems}, volume = {2}, number = {44}, pages = {157--168}, year = {2014}, month = apr, issn = {2168-2291}, doi = {10.1109/thms.2014.2303083}, keywords = {Active vision, context-dependent social gaze behavior, human--robot interaction, robotics, scene analysis, social attention}, } @inproceedings{Loth2014a, author = {Loth, Sebastian and Giuliani, Manuel and de Ruiter, Jan P.}, title = {Ghost-in-the-Machine: Initial Results}, booktitle = {Proceedings of the 9th ACM/IEEE International Conference on Human-Robot Interaction (HRI 2014)}, year = {2014}, month = mar, address = {Bielefeld, Germany}, keywords = {robotics, james}, } @inproceedings{Gaschler2014d, author = {Petrick, Ronald P. A. and Gaschler, Andre}, title = {Extending Knowledge-Level Contingent Planning to Robot Task Planning}, booktitle = {International Conference on Automated Planning and Scheduling Workshop on Planning and Robotics (PlanRob 2014)}, year = {2014}, keywords = {robotics, james}, } @inproceedings{Keizer2014, author = {Keizer, Simon and Foster, Mary Ellen and Gaschler, Andre and Giuliani, Manuel and Isard, Amy and Lemon, Oliver}, title = {Handling uncertain input in multi-user human-robot interaction}, booktitle = {Proceedings of the 23rd IEEE International Symposium on Robot and Human Interactive Communication (RO-MAN)}, year = {2014}, keywords = {robotics, james}, } @inproceedings{Gaschler-etal:2014, author = {Gaschler, Andre and Nogina, Svetlana and Petrick, Ronald P. A. and Knoll, Alois}, title = {Planning Perception and Action for Cognitive Mobile Manipulators}, booktitle = {Proceedings of {SPIE} Volume 9025 -- Intelligent Robots and Computer Vision {XXXI}: Algorithms and Techniques}, year = {2014}, keywords = {robotics, james}, } @inproceedings{Gaschler2013c, author = {Gaschler, Andre and Petrick, Ronald P. A. and Giuliani, Manuel and Rickert, Markus and Knoll, Alois}, title = {{KVP}: A Knowledge of Volumes Approach to Robot Task Planning}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems ({IROS})}, pages = {202--208}, year = {2013}, month = nov, address = {Tokyo, Japan}, abstract = {Robot task planning is an inherently challenging problem, as it covers both continuous-space geometric reasoning about robot motion and perception, as well as purely symbolic knowledge about actions and objects. This paper presents a novel "knowledge of volumes" framework for solving generic robot tasks in partially known environments. In particular, this approach (abbreviated, KVP) combines the power of symbolic, knowledge-level AI planning with the efficient computation of volumes, which serve as an intermediate representation for both robot action and perception. While we demonstrate the effectiveness of our framework in a bimanual robot bartender scenario, our approach is also more generally applicable to tasks in automation and mobile manipulation, involving arbitrary numbers of manipulators.}, doi = {10.1109/IROS.2013.6696354}, keywords = {robotics, james}, } @inproceedings{Cai2013b, author = {Cai, Caixia and Dean-Leon, Emmanuel and Somani, Nikhil and Knoll, Alois}, title = {3D Image-based Dynamic Visual Servoing with Uncalibrated Stereo Cameras}, booktitle = {The 44th Internation Symposium on Robotics (ISR)}, year = {2013}, month = oct, keywords = {robotics}, } @article{Foster2013a, author = {Foster, Mary Ellen and Giuliani, Manuel and Isard, Amy}, editor = {Mester, Rudolf and Felsberg, Michael}, title = {Task-Based Evaluation of Context-Sensitive Referring Expressions in Human-Robot Dialogue}, journal = {Language and Cognitive Processes}, year = {2013}, month = oct, doi = {10.1080/01690965.2013.855802}, keywords = {robotics, james}, } @inproceedings{Somani2013b, author = {Somani, Nikhil and Dean-Leon, Emmanuel and Cai, Caixia and Knoll, Alois}, title = {Scene Perception and Recognition for Human-Robot Co-Operation}, booktitle = {ICIAP'13 workshop: First International Workshop on Assistive Computer Vision and Robotics (ACVR 2013). The 17th International Conference on Image Analysis and Processing.}, year = {2013}, month = sep, keywords = {robotics, smerobotics}, } @inproceedings{Tenorth2013b, author = {Tenorth, Moritz and Perzylo, Alexander and Lafrenz, Reinhard and Beetz, Michael}, title = {{The RoboEarth language: Representing and Exchanging Knowledge about Actions, Objects and Environments (Extended Abstract)}}, booktitle = {IJCAI'13: Proceedings of the 23rd international joint conference on Artifical intelligence}, pages = {3091--3095}, year = {2013}, month = aug, address = {Beijing, China}, note = {Best papers and thesis in sister conferences track. (Invited Paper)}, abstract = {The community-based generation of content has been tremendously successful in the World Wide Web - people help each other by providing information that could be useful to others. We are trying to transfer this approach to robotics in order to help robots acquire the vast amounts of knowledge needed to competently perform everyday tasks. RoboEarth is intended to be a web community by robots for robots to autonomously share descriptions of tasks they have learned, object models they have created, and environments they have explored. In this paper, we report on the formal language we developed for encoding this information and present our approaches to solve the inference problems related to finding information, to determining if information is usable by a robot, and to grounding it on the robot platform.}, keywords = {robotics}, } @inproceedings{Somani2013c, author = {Somani, Nikhil and Dean-Leon, Emmanuel and Cai, Caixia and Knoll, Alois}, title = {Perception and Reasoning for Scene Understanding in Human-Robot Interaction Scenarios}, booktitle = {CAIP'13 workshop: Recognition and Action for Scene understanding (REACTS 2013). The 15th International Conference of Computer Analysis of Images and Patterns.}, year = {2013}, month = aug, keywords = {robotics, smerobotics}, } @inproceedings{Somani2013a, author = {Somani, Nikhil and Dean-Leon, Emmanuel and Cai, Caixia and Knoll, Alois}, title = {Scene Perception and Recognition in Industrial Environments for Human-Robot Interaction}, booktitle = {9th International Symposium on Visual Computing (ISVC'13)}, publisher = {Springer}, year = {2013}, month = jul, keywords = {robotics, smerobotics}, } @inproceedings{DiMarco2013, author = {Marco, Daniel Di and Janssen, Rob and Perzylo, Alexander and de Molengraft, Marinus J. G. Van and Levi, Paul}, title = {{A Deliberation Layer for Instantiating Robot Execution Plans from Abstract Task Descriptions}}, booktitle = {International Conference on Automated Planning and Scheduling (ICAPS) Workshop on Planning and Robotics (PlanRob)}, pages = {12--19}, year = {2013}, month = jun, address = {Rome, Italy}, abstract = {We present an application of Hierarchical Task Network (HTN) planning to create robot execution plans, that are adapted to the environment and the robot hardware from abstract task descriptions. Our main intention is to show that different robotic platforms can make use of the same high level symbolic task description. As an off-the-shelf planning component, the SHOP2 HTN planner is adopted. All the domain knowledge is encoded in the Web Ontology Language (OWL) and stored in a world wide accessible database, which allows multiple systems to reuse and improve upon this knowledge. For task execution, the execution plan is generated using the CRAM plan language (CPL). We demonstrate the functionality of the system in executing a pick-and-place task in a simulated environment with two different service robots, the TU/e Amigo robot prototype and the Fraunhofer IPA Care-O-Bot 3. The experiment shows that although the robots differ in hardware capabilities, the use of HTN planning adds information that is crucial for successful task execution and enables both systems to successfully execute the instructed task.}, keywords = {robotics}, } @inproceedings{Chen2013a, author = {Chen, Chao and Rickert, Markus and Knoll, Alois}, title = {Combining Space Exploration and Heuristic Search in Online Motion Planning for Nonholonomic Vehicles}, booktitle = {Proceedings of the IEEE Intelligent Vehicles Symposium}, pages = {1307--1312}, year = {2013}, month = jun, address = {Gold Coast, Australia}, abstract = {This paper presents an efficient motion planning method for nonholonomic vehicles, which combines space exploration and heuristic search to achieve online performance. The space exploration employs simple geometric shapes to investigate the collision-free space for the dimension and topology information. Then, the heuristic search is guided by this knowledge to generate vehicle motions under kinodynamic constraints. The overall performance of this framework greatly benefits from the cooperation of these two simple generic algorithms in suitable domains, which sequentially handles the free-space information and kinodynamic constraints. Experimental results show that this method is able to generate motions for nonholonomic vehicles in a time frame of less than 100 milliseconds for the given problem settings. The contribution of this work is the development of a Space Exploration Guided Heuristic Search with a circle-path based heuristics and adaptable search step size. The approach is grid-free and able to plan nonholonomic vehicle motions under kinodynamic constraints.}, doi = {10.1109/IVS.2013.6629647}, keywords = {autonomous driving, robotics, path planning, motion planning}, } @article{Tenorth2013a, author = {Tenorth, Moritz and Perzylo, Alexander and Lafrenz, Reinhard and Beetz, Michael}, title = {{Representation and Exchange of Knowledge about Actions, Objects, and Environments in the RoboEarth Framework}}, journal = {IEEE Transactions on Automation Science and Engineering (T-ASE)}, volume = {10}, number = {3}, pages = {643-651}, year = {2013}, note = {(Best Paper Award Finalist)}, abstract = {The community-based generation of content has been tremendously successful in the World Wide Web - people help each other by providing information that could be useful to others. We are trying to transfer this approach to robotics in order to help robots acquire the vast amounts of knowledge needed to competently perform everyday tasks. RoboEarth is intended to be a web community by robots for robots to autonomously share descriptions of tasks they have learned, object models they have created, and environments they have explored. In this paper, we report on the formal language we developed for encoding this information and present our approaches to solve the inference problems related to finding information, to determining if information is usable by a robot, and to grounding it on the robot platform. Note to practitioners: In this paper, we report on a formal language for knowledge representation that is used in the ROBOEARTH system, a web-based knowledge base intended to be like a "Wikipedia for robots." The objective is to enable robots to share information about how to perform actions, how to recognize and interact with objects, and where to find objects in an environment. The developed language allows to store such information in a format that supports logical inference, so that robots can for example autonomously decide if they have all prerequisites needed for performing a described action. In laboratory experiments, the system has been applied to the exchange of pick-and-place style activities between two mobile manipulation robots. We are currently extending the representation towards more fine-grained action specifications.}, doi = {10.1109/tase.2013.2244883}, keywords = {robotics}, } @inproceedings{giuliani-etal-icmi2013, author = {Giuliani, Manuel and Petrick, Ronald P. A. and Foster, Mary Ellen and Gaschler, Andre and Isard, Amy and Pateraki, Maria and Sigalas, Markos}, title = {Comparing Task-Based and Socially Intelligent Behaviour in a Robot Bartender}, booktitle = {Proceedings of the 15th ACM International Conference on Multimodal Interaction ({ICMI} 2013)}, year = {2013}, address = {Sydney, Australia}, doi = {10.1145/2522848.2522869}, keywords = {james, robotics}, } @inproceedings{foster-etal-icmi2013, author = {Foster, Mary Ellen and Gaschler, Andre and Giuliani, Manuel}, title = {How Can I Help You? Comparing Engagement Classification Strategies for a Robot Bartender}, booktitle = {Proceedings of the 15th ACM International Conference on Multimodal Interaction ({ICMI} 2013)}, year = {2013}, address = {Sydney, Australia}, doi = {10.1145/2522848.2522879}, keywords = {james, robotics}, } @inproceedings{Gaschler2013b, author = {Gaschler, Andre and Petrick, Ronald P. A. and Kr{\"{o}}ger, Torsten and Khatib, Oussama and Knoll, Alois}, title = {Robot Task and Motion Planning with Sets of Convex Polyhedra}, booktitle = {Robotics: Science and Systems (RSS) Workshop on Combined Robot Motion Planning and AI Planning for Practical Applications}, year = {2013}, keywords = {robotics, james}, } @inproceedings{Gaschler2013a, author = {Gaschler, Andre and Petrick, Ronald P. A. and Kr{\"{o}}ger, Torsten and Knoll, Alois and Khatib, Oussama}, title = {Robot Task Planning with Contingencies for Run-time Sensing}, booktitle = {Proceedings of the Workshop on Combining Task and Motion Planning, IEEE International Conference on Robotics and Automation (ICRA)}, year = {2013}, keywords = {robotics, james}, } @article{Hees2013, author = {van Hees, Vincent T. and Gorzelniak, Lukas and Dean-Leon, Emmanuel and Eder, Martin and Pias, Marcelo and Taherian, Salman and Ekelund, Ulf and Renstr{\"{o}}m, Frida and Franks, Paul W. and Horsch, Alexander and Brage, Soren}, title = {Separating Movement and Gravity Components in an Acceleration Signal and Implications for the Assessment of Human Daily Physical Activity}, publisher = {Public Library of Science}, journal = {PLoS ONE}, volume = {8}, number = {4}, pages = {1-10}, year = {2013}, timestamp = 2013.07.23, owner = {jeraj}, keywords = {robotics}, url = {http://www.plosone.org/article/info%3Adoi%2F10.1371%2Fjournal.pone.0061691}, } @inproceedings{Nasseri2013, author = {Nasseri, M. Ali and Eder, Martin and Dean-Leon, Emmanuel and Nair, Suraj and Zapp, Daniel and Maier, Mathias and Lohmann, C. P. and Knoll, Alois}, title = {The Introduction of a New Robot for Assistance in Ophthalmic Surgery}, booktitle = {35th Annual International Conference of the IEEE Engineering in Medicine and Biology and Biology Society (EMBC'13)}, publisher = {IEEE Press}, year = {2013}, timestamp = 2013.07.23, owner = {jeraj}, keywords = {robotics}, } @inproceedings{Gaschler2013e, author = {Keizer, Simon and Foster, Mary Ellen and Lemon, Oliver and Gaschler, Andre and Giuliani, Manuel}, title = {Training and evaluation of an {MDP} model for social multi-user human-robot interaction}, booktitle = {Proceedings of the 14th Annual SIGdial Meeting on Discourse and Dialogue}, year = {2013}, keywords = {robotics, james}, } @incollection{Jentzsch2013a, author = {Jentzsch, S{\"{o}}ren and Riedel, Sebastian and Denz, Sebastian and Brunner, Sebastian}, editor = {Chen, Xiaoping and Stone, Peter and Sucar, LuisEnrique and van der Zant, Tijn}, title = {TUMsBendingUnits from TU Munich: RoboCup 2012 Logistics League Champion}, booktitle = {RoboCup 2012: Robot Soccer World Cup XVI}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, volume = {7500}, pages = {48-58}, year = {2013}, isbn = {978-3-642-39249-8}, doi = {10.1007/978-3-642-39250-4_5}, keywords = {robotics}, language = {English}, } @inproceedings{Cai2013, author = {Cai, Caixia and Dean-Leon, Emmanuel and Mendoza, Dario and Somani, Nikhil and Knoll, Alois}, title = {Uncalibrated 3D Stereo Image-based Dynamic Visual Servoing for Robot Manipulators}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, year = {2013}, timestamp = 2013.07.23, owner = {jeraj}, note = {accepted}, keywords = {robotics}, } @article{Giuliani2013a, author = {Giuliani, Manuel and Knoll, Alois}, title = {Using Embodied Multimodal Fusion to Perform Supportive and Instructive Robot Roles in Human-Robot Interaction}, publisher = {Springer Netherlands}, journal = {International Journal of Social Robotics}, volume = {5}, number = {3}, pages = {345--356}, year = {2013}, issn = {1875-4791}, doi = {10.1007/s12369-013-0194-y}, keywords = {Embodied multimodal fusion, Human-robot interaction, Robot roles, robotics, james}, language = {English}, } @inproceedings{Gaschler2013d, author = {Schlegl, Thomas and Kr{\"{o}}ger, Torsten and Gaschler, Andre and Khatib, Oussama and Zangl, Hubert}, editor = {Kr{\"{o}}ger, Torsten and Wahl, Friedrich M.}, title = {Virtual Whiskers -- Highly Responsive Robot Collision Avoidance}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, year = {2013}, keywords = {robotics}, } @inproceedings{Wittmeier2012a, author = {Wittmeier, Steffen and Gaschler, Andre and Jantsch, M. and Dalamagkidis, K. and Knoll, Alois}, title = {Calibration of a physics-based model of an anthropomimetic robot using Evolution Strategies}, booktitle = {Intelligent Robots and Systems (IROS), 2012 IEEE/RSJ InternationalConference on}, pages = {445--450}, year = {2012}, month = oct, issn = {2153-0858}, doi = {10.1109/iros.2012.6385591}, keywords = {eccerobot, robotics}, } @inproceedings{Zhang2012b, author = {Zhang, Feihu and St{\"{a}}hle, Hauke and Gaschler, Andre and Buckl, Christian and Knoll, Alois}, title = {Single Camera Visual Odometry Based on Random Finite Set Statistics}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, year = {2012}, month = oct, note = {accepted}, keywords = {robotics}, } @inproceedings{Giuliani2012a, author = {Giuliani, Manuel and de Ruiter, Jan}, title = {Combining Classical and Embodied Multimodal Fusion for Human-Robot Interaction}, booktitle = {Proceedings of the Embodied and Situated Language Processing 2012 (ESLP 2012)}, year = {2012}, month = aug, address = {Newcastle Upon Tyne, England}, keywords = {robotics}, } @inproceedings{Nasseri2012, author = {Nasseri, M. Ali and Dean-Leon, Emmanuel and Nair, Suraj and Eder, Martin and Maier, Mathias and Lohmann, C. P. and Knoll, Alois}, title = {Clinical Motion Tracking and Motion Analysis during Ophthalmic Surgery Using Electromagnetic Tracking System}, booktitle = {Proceedings of the 5th International Conference on BioMedical Engineering and Informatics (BMEI 2012)}, publisher = {IEEE Press}, pages = {1006-1010}, year = {2012}, timestamp = 2013.07.23, owner = {jeraj}, keywords = {augmented reality, Electromagnetic Tracking, ophthalmology, robotic surgery, robotics}, } @inproceedings{Can2012, author = {Can, S. and Jensen, B. and Dean-Leon, Emmanuel and Staub, C. and Knoll, Alois and Fiolka, A. and Meining, A. and Feussner, H.}, title = {Kinematics, Control and Workspace Analysis of a Bowden Wire Actuated Manipulator for Minimally Invasive Single-Port Surgery}, booktitle = {IEEE International Conference on Robotics and Biomimetics (ROBIO)}, year = {2012}, timestamp = 2013.07.23, owner = {jeraj}, keywords = {robotics}, } @inproceedings{Gaschler2012a, author = {Gaschler, Andre and Huth, Kerstin and Giuliani, Manuel and Kessler, Ingmar and de Ruiter, Jan and Knoll, Alois}, title = {Modelling State of Interaction from Head Poses for Social {H}uman-{R}obot {I}nteraction}, booktitle = {Proceedings of the {G}aze in {H}uman-{R}obot {I}nteraction Workshop held at the 7th ACM/IEEE International Conference on Human-Robot Interaction (HRI 2012)}, year = {2012}, address = {Boston, MA}, keywords = {james, robotics}, } @inproceedings{Natekin2012, author = {Natekin, Alexey and Kalinkin, Mikhail}, title = {Optimization of the Fuzzy Neural Classifiers by Means of Modelbased Cluster Analysis}, booktitle = {In Proceedings of the IX System Identification and Control Problems Conference (SICPRO)}, year = {2012}, timestamp = 2013.07.23, owner = {jeraj}, address = {Moscow, Russia}, keywords = {robotics}, } @inproceedings{Nair2012, author = {Nair, Suraj and Dean-Leon, Emmanuel and Knoll, Alois}, title = {Real-Time 3D Multiple Human Tracking With Robustness Enchancement Through Machine Learning}, booktitle = {International Conference on Computer Vision Theory and Applications}, year = {2012}, timestamp = 2013.07.23, owner = {jeraj}, location = {Rome, Italy}, keywords = {robotics}, } @inproceedings{Gaschler2012b, author = {Gaschler, Andre and Jentzsch, S{\"{o}}ren and Giuliani, Manuel and Huth, Kerstin and de Ruiter, Jan and Knoll, Alois}, title = {Social Behavior Recognition Using Body Posture and Head Pose for Human-Robot Interaction}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)}, year = {2012}, doi = {10.1109/iros.2012.6385460}, keywords = {james, robotics}, } @inproceedings{Foster2012, author = {Foster, Mary Ellen and Gaschler, Andre and Giuliani, Manuel and Isard, Amy and Pateraki, Maria and Petrick, Ronald P. A.}, title = {Two People Walk Into a Bar: Dynamic Multi-Party Social Interaction with a Robot Agent}, booktitle = {Proceedings of the 14th ACM International Conference on Multimodal Interaction (ICMI 2012)}, year = {2012}, keywords = {james, robotics}, } @inproceedings{Dean-Leon2012, author = {Dean-Leon, Emmanuel and Nair, Suraj and Knoll, Alois}, title = {User Friendly Matlab-Toolbox for Symbolic Robot Dynamic Modeling used for Control Design}, booktitle = {IEEE International Conference on Robotics and Biomimetics ROBIO-2012}, year = {2012}, timestamp = 2013.07.23, owner = {jeraj}, keywords = {robotics}, } @article{Kupferberg2011a, author = {Kupferberg, Aleksandra and Glasauer, Stefan and Huber, Markus and Rickert, Markus and Knoll, Alois and Brandt, Thomas}, title = {Biological Movement Increases Acceptance of Humanoid Robots as Human Partners in Motor Interaction}, journal = {{AI} \& Society}, volume = {26}, number = {4}, pages = {339--345}, year = {2011}, month = nov, abstract = {The automatic tendency to anthropomorphize our interaction partners and make use of experience acquired in earlier interaction scenarios leads to the suggestion that social interaction with humanoid robots is more pleasant and intuitive than that with industrial robots. An objective method applied to evaluate the quality of human–robot interaction is based on the phenomenon of motor interference (MI). It claims that a face-to-face observation of a different (incongruent) movement of another individual leads to a higher variance in one’s own movement trajectory. In social interaction, MI is a consequence of the tendency to imitate the movement of other individuals and goes along with mutual rapport, sense of togetherness, and sympathy. Although MI occurs while observing a human agent, it disappears in case of an industrial robot moving with piecewise constant velocity. Using a robot with human-like appearance, a recent study revealed that its movements led to MI, only if they were based on human prerecording (biological velocity), but not on constant (artificial) velocity profile. However, it remained unclear, which aspects of the human prerecorded movement triggered MI: biological velocity profile or variability in movement trajectory. To investigate this issue, we applied a quasi-biological minimum-jerk velocity profile (excluding variability in the movement trajectory as an influencing factor of MI) to motion of a humanoid robot, which was observed by subjects performing congruent or incongruent arm movements. The increase in variability in subjects’ movements occurred both for the observation of a human agent and for the robot performing incongruent movements, suggesting that an artificial human-like movement velocity profile is sufficient to facilitate the perception of humanoid robots as interaction partners.}, doi = {10.1007/s00146-010-0314-2}, keywords = {robotics}, } @inproceedings{Wittmeier2011a, author = {Wittmeier, Steffen and J{\"{a}}ntsch, Michael and Dalamagkidis, Konstantinos and Rickert, Markus and Marques, Hugo Gravato and Knoll, Alois}, title = {Caliper: A Universal Robot Simulation Framework for Tendon-Driven Robots}, booktitle = {Proceedings of the {IEEE}/{RSJ} International Conference on Intelligent Robots and Systems}, pages = {1063--1068}, year = {2011}, month = sep, address = {San Francisco, CA, USA}, abstract = {The development of increasingly complex robots in recent years has been characterized by an extensive use of physics-based simulations for controller design and optimization. Today, a variety of open-source and commercial simulators exist for this purpose for mobile and industrial robots. However, existing simulation engines still lack support for the emerging class of tendon-driven robots. In this paper, an innovative simulation framework for the simulation of tendon-driven robots is presented. It consists of a generic physics simulator capable of utilizing CAD robot models and a set of additional tools for simulation control, data acquisition and system investigation. The framework software architecture has been designed using component-based development principles to facilitate the framework extension and customization. Furthermore, for inter-component communication, the operating-system and programming language independent Common Object Request Broker Architecture (CORBA) has been used which simplifies the integration of the framework into existing software environments.}, doi = {10.1109/IROS.2011.6094455}, keywords = {robotics, eccerobot}, } @inproceedings{Weissmann2011, author = {Wei{\ss}mann, Markus and Bedenk, Stefan and Buckl, Christian and Knoll, Alois}, title = {{Model Checking Industrial Robot Systems}}, booktitle = {Proceedings of the 18th International Workshop on Model Checking of Software (SPIN 2011)}, publisher = {Springer-Verlag}, series = {LNCS}, pages = {161--176}, year = {2011}, month = jul, owner = {weissmam}, doi = {10.1007/978-3-642-22306-8_11}, keywords = {abstract interpretation, distributed systems, industrial robots, model checking, robotics}, url = {Weissmann2011.pdf}, } @inproceedings{Lenz2011a, author = {Lenz, Claus and R{\"{o}}der, Thorsten and Rickert, Markus and Knoll, Alois}, editor = {Elmoataz, Abderrahim and Lezoray, Olivier and Nouboud, Fathallah and Mammass, Driss}, title = {Distance-Weighted {K}alman Fusion for Precise Docking Problems}, booktitle = {Proceedings of the International Conference on Mobile Robots and Competitions}, year = {2011}, month = apr, address = {Lisbon, Portugal}, abstract = {This paper proposes a way to solve a highly precise docking problem for a flexible delivery in production environments. The docking problem is seen as one of the fundamental problems to enable more flexible automation using mobile robots. A non-holonomic differential-driven robot with two conveyor belts is used to deliver boxes with goods to two docking slots on an assembly belt and unload them precisely. In order to localize the robot in front of the docking slots, a safety LIDAR and two "minimal invasive" reflecting markers are used that are completely light invariant, thus reaching industrial robustness. This measurement is fused with odometry using a Kalman filter and a distance weighted way to compute the reliability of the data streams.}, keywords = {robotics}, } @inproceedings{Gaschler2011b, author = {Gaschler, Andre}, editor = {Mester, Rudolf and Felsberg, Michael}, title = {Visual Motion Capturing for Kinematic Model Estimation of a Humanoid Robot}, booktitle = {Pattern Recognition, 33rd DAGM Symposium}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, volume = {6835}, pages = {438--443}, year = {2011}, address = {Frankfurt, Germany}, isbn = {978-3-642-23122-3}, doi = {10.1007/978-3-642-23123-0_45}, keywords = {robotics}, } @inproceedings{, author = {Wojtczyk, Martin and Barner, Simon and Geisinger, Michael and Knoll, Alois}, title = {Rapid Prototyping of an Adaptive Light-source for Mobile Manipulators with {EasyKit} and {EasyLab}}, booktitle = {Proceedings of the 8th International Conference on Solid State Lighting: Applications, {SPIE} Optics and Photonics, Illumination Engineering}, year = {2008}, month = aug, abstract = {While still not common in day-to-day business, mobile robot platforms form a growing market in robotics. Mobile platforms equipped with a manipulator for increased flexibility have been used successfully in biotech laboratories for sample management as shown on the well-known ESACT meetings. Navigation and object recognition is carried out by the utilization of a mounted machine vision camera. To cope with the different illumination conditions in a large laboratory, development of an adaptive light source was indispensable. We present our approach of rapid developing a computer controlled, adaptive LED light within one single business day, by utilizing the hardware toolbox EasyKit and our appropriate software counterpart EasyLab.}, doi = {10.1117/12.795019}, keywords = {Adaptive Light-source, Service Robotics, Rapid Hardware Prototyping, Zero Code Development, easykit, easylab, robotics, labautomation}, }