@inproceedings{, author = {Wozniak, Ernest and Putzer, Henrik J. and C{\^{a}}rlan, Carmen}, title = {{AI}-Blueprint for Deep Neural Networks}, booktitle = {Proceedings of the Workshop on Artificial Intelligence Safety ({SafeAI} '21)}, publisher = {{CEUR}}, volume = {2808}, year = {2021}, month = feb, abstract = {Development of trustworthy (e.g., safety and/or security critical) hardware/software-based systems needs to rely on well-defined process models. However, engineering trustworthy systems implemented with artificial intelligence (AI) is still poorly discussed. This is, to large extend, due to the standpoint in which AI is a technique applied within software engineering. This work follows a different viewpoint in which AI represents a 3rd kind technology (next to software and hardware), with close connections to software. Consequently, the contribution of this paper is the presentation of a process model, tailored to AI engineering. Its objective is to support the development of trustworthy systems, for which parts of their safety and/or security critical functionality are implemented with AI. As such, it considers methods and metrics at different AI development phases that shall be used to achieve higher confidence in the satisfaction of trustworthiness prop- erties of a developed system.}, keywords = {Safety Case, Model-based Systems Engineering, MbSE}, url = {http://ceur-ws.org/Vol-2808/Paper_22.pdf}, } @inproceedings{, author = {Wozniak, Ernest and C{\^{a}}rlan, Carmen and Acar-Celik, Esra and Putzer, Henrik J.}, title = {A Safety Case Pattern for Systems with Machine Learning Components}, booktitle = {Proceedings of the International Conference on Computer Safety, Reliability, and Security (SAFECOMP)}, publisher = {Springer}, series = {LNCS}, volume = {12235}, pages = {370--382}, year = {2020}, month = sep, abstract = {Several standards from the domain of safety critical systems, in order to support the argumentation of the safety assurance of a system under development, recommend the construction of a safety case. This activity is guided by the objectives to be met, recommended or required by the standards along the safety lifecycle. Ongoing attempts to use Machine Learning (ML) for safety critical functionality revealed certain deficits. For instance, the widely recognized standard for functional safety of automotive systems, ISO 26262, which can be used as a basis to construct a safety case, does not reason about ML. To this end, the goal of this work is to provide a pattern for arguing about the correct implementation of safety requirements in system components based on ML. The pattern is integrated within an overall encompassing approach for safety case generation for automotive systems and its applicability is showcased on a pedestrian avoidance system.}, doi = {10.1007/978-3-030-55583-2_28}, keywords = {Model-based systems engineering, MbSE}, }