@misc{https://doi.org/10.48550/arxiv.2202.12443, author = {Baracaldo, Nathalie and Anwar, Ali and Purcell, Mark and Rawat, Ambrish and Sinn, Mathieu and Altakrouri, Bashar and Balta, Dian and Sellami, Mahdi and Kuhn, Peter and Buchinger, Matthias}, title = {Towards an Accountable and Reproducible Federated Learning: A FactSheets Approach}, publisher = {arXiv}, year = {2022}, month = feb, owner = {arXiv.org perpetual, non-exclusive license}, abstract = {Federated Learning (FL) is a novel paradigm for the shared training of models based on decentralized and private data. With respect to ethical guidelines, FL is promising regarding privacy, but needs to excel vis-{\`{a}}-vis transparency and trustworthiness. In particular, FL has to address the accountability of the parties involved and their adherence to rules, law and principles. We introduce AF^2 Framework, where we instrument FL with accountability by fusing verifiable claims with tamper-evident facts, into reproducible arguments. We build on AI FactSheets for instilling transparency and trustworthiness into the AI lifecycle and expand it to incorporate dynamic and nested facts, as well as complex model compositions in FL. Based on our approach, an auditor can validate, reproduce and certify a FL process. This can be directly applied in practice to address the challenges of AI engineering and ethics.}, doi = {10.48550/ARXIV.2202.12443}, keywords = {Artificial Intelligence (cs.AI), Machine Learning (cs.LG), FOS: Computer and information sciences, FOS: Computer and information sciences}, url = {https://arxiv.org/abs/2202.12443}, } @inproceedings{, author = {Bludau, Peter and Pretschner, Alexander}, title = {Feature Sets in Just-in-Time Defect Prediction: An Empirical Evaluation}, booktitle = {Proceedings of the 18th International Conference on Predictive Models and Data Analytics in Software Engineering}, publisher = {Association for Computing Machinery}, pages = {22-31}, year = {2022}, abstract = {Just-in-time defect prediction assigns a defect risk to each new change to a software repository in order to prioritize review and testing efforts. Over the last decades different approaches were proposed in literature to craft more accurate prediction models. However, defect prediction is still not widely used in industry, due to predictions with varying performance. In this study, we evaluate existing features on six open-source projects and propose two new features sets, not yet discussed in literature. By combining all feature sets, we improve MCC by on average 21\%, leading to the best performing models when compared to state-of-the-art approaches. We also evaluate effort-awareness and find that on average 14\% more defects can be identified, inspecting 20\% of changed lines.}, doi = {10.1145/3558489.3559068}, keywords = {machine learning, JIT defect prediction, empirical evaluation}, } @inproceedings{, author = {Weber, Thomas and Han, Zhiwei and Matthes, Stefan and Hu{\ss}mann, Heinrich and Liu, Yuanting}, title = {Draw with Me: Human-in-the-Loop for Image Restoration}, booktitle = {the 43rd German Conference on AI}, publisher = {Springer}, pages = {245-248}, year = {2020}, abstract = {The purpose of image restoration is to recover the original state of damaged images. To overcome the disadvantages of the traditional, manual image restoration process, like the high time consumption and required domain knowledge, automatic inpainting methods have been developed. These methods, however, can have limitations for complex images and may require a lot of input data. To mitigate those, we present "interactive Deep Image Prior", a combination of manual and automated, Deep-Image-Prior-based restoration in the form of an interactive process with the human in the loop. In this process a human can iteratively embed knowledge to provide guidance and control for the automated inpainting process. For this purpose, we extended Deep Image Prior with a user interface which we subsequently analyzed in a user study. Our key question is whether the interactivity increases the restoration quality subjectively and objectively. Secondarily, we were also interested in how such a collaborative system is perceived by users. Our evaluation shows that, even with very little human guidance, our interactive approach has a restoration performance on par or superior to other methods. Meanwhile, very positive results of our user study suggest that learning systems with the human-in-the-loop positively contribute to user satisfaction. We therefore conclude that an interactive, cooperative approach is a viable option for image restoration and potentially other ML tasks where human knowledge can be a correcting or guiding influence.}, keywords = {Interactive Machine Learning, Image Restoration, Image Prior}, url = {https://dl.acm.org/doi/abs/10.1145/3377325.3377509}, } @inproceedings{, author = {Weber, Thomas and Han, Zhiwei and Matthes, Stefan and Hu{\ss}mann, Heinrich and Liu, Yuanting}, title = {Draw with me: human-in-the-loop for image restoration}, booktitle = {the 25th International Conference on Intelligent User Interfaces}, publisher = {ACM}, pages = {243-253}, year = {2020}, abstract = {The purpose of image restoration is to recover the original state of damaged images. To overcome the disadvantages of the traditional, manual image restoration process, like the high time consumption and required domain knowledge, automatic inpainting methods have been developed. These methods, however, can have limitations for complex images and may require a lot of input data. To mitigate those, we present "interactive Deep Image Prior", a combination of manual and automated, Deep-Image-Prior-based restoration in the form of an interactive process with the human in the loop. In this process a human can iteratively embed knowledge to provide guidance and control for the automated inpainting process. For this purpose, we extended Deep Image Prior with a user interface which we subsequently analyzed in a user study. Our key question is whether the interactivity increases the restoration quality subjectively and objectively. Secondarily, we were also interested in how such a collaborative system is perceived by users. Our evaluation shows that, even with very little human guidance, our interactive approach has a restoration performance on par or superior to other methods. Meanwhile, very positive results of our user study suggest that learning systems with the human-in-the-loop positively contribute to user satisfaction. We therefore conclude that an interactive, cooperative approach is a viable option for image restoration and potentially other ML tasks where human knowledge can be a correcting or guiding influence.}, keywords = {Interactive Machine Learning, Image Restoration, Image Prior}, url = {https://dl.acm.org/doi/abs/10.1145/3377325.3377509}, } @conference{, author = {Schmidmaier, Matthias and Han, Zhiwei and Weber, Thomas and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Real-Time Personalization in Adaptive IDEs}, booktitle = {the 27th Conference on User Modeling, Adaptation and Personalization (UMAP)}, publisher = {ACM}, pages = {81-86}, year = {2019}, owner = {Matthi}, abstract = {Integrated Development Environments (IDEs) are used for a varietyof software development tasks. Their complexity makes them chal-lenging to use though, especially for less experienced developers. In this paper, we outline our approach for an user-adaptive IDE that is able to track the interactions, recognize the user's intent and expertise, and provide relevant, personalized recommendations in real-time. To obtain a user model and provide recommendations, interaction data is processed in a two-stage process: first, we derive a bandit based global model of general task patterns from a dataset of labeled interactions. Second, when the user is working with the IDE, we apply a pre-trained classifier in real-time to get task labels from the user's interactions. With those and user feedback we fine-tune a local copy of the global model. As a result, we obtain a personalized user model which provides user-specific recommendations. We finally present various approaches for using these recommendations to adapt the IDE's interface. Modifications range from visual highlighting to task automation, including explanatory feedback.}, keywords = {Adaptive IDE; User Modeling; Personalized Recommendation Systems; Human-Centered Machine Learning}, url = {https://www.medien.ifi.lmu.de/pubdb/publications/pub/schmidmaier2019umap-lbw/schmidmaier2019umap-lbw.pdf}, } @inproceedings{8719411, author = {Iqbal, Tahira and Elahidoost, Parisa and L{\'{u}}cio, Levi}, title = {A Bird's Eye View on Requirements Engineering and Machine Learning}, booktitle = {Proceedings of the 25th Asia-Pacific Software Engineering Conference (APSEC)}, pages = {11-20}, year = {2018}, month = dec, abstract = {Machine learning (ML) has demonstrated practical impact in a variety of application domains. Software engineering is a fertile domain where ML is helping in automating different tasks. In this paper, our focus is the intersection of software requirement engineering (RE) and ML. To obtain an overview of how ML is helping RE and the research trends in this area, we have surveyed a large number of research articles. We found that the impact of ML can be observed in requirement elicitation, analysis and specification, validation and management. Furthermore, in these categories, we discuss the specific problem solved by ML, the features and ML algorithms used as well as datasets, when available. We outline lessons learned and envision possible future directions for the domain.}, doi = {10.1109/APSEC.2018.00015}, keywords = {Requirements Engineering, Machine learning, State of the art, Overview, Model-based Systems Engineering, MbSE}, }