@inproceedings{, author = {Zhang, Zelun Tony and Tong, Sven and Liu, Yuanting and Butz, Andreas}, title = {Is Overreliance on AI Provoked by Study Design?}, booktitle = {Human-Computer Interaction -- INTERACT 2023}, publisher = {Springer Nature Switzerland}, pages = {49--58}, year = {2023}, month = aug, address = {Cham}, abstract = {Recent studies found that humans tend to overrely on AI when making decisions with AI support. AI explanations were often insufficient as mitigation, and sometimes even increased overreliance. However, typical AI-assisted decision-making studies consist of long series of decision tasks, potentially causing complacent behavior, and not properly reflecting many real-life scenarios. We therefore raise the question whether these findings might be favored by the design of these studies. In a first step to answer this question, we compared different study designs in an experiment and found indications that observations of overreliance might indeed be favored by common study designs. Further research is needed to clarify to what extent overreliance can be attributed to study designs rather than more fundamental human-AI interaction issues.}, isbn = {978-3-031-42286-7}, doi = {10.1007/978-3-031-42286-7_3}, keywords = {human-AI interaction, AI-assisted decision-making, explainable AI, overreliance}, url = {https://doi.org/10.1007/978-3-031-42286-7_3}, } @inproceedings{, author = {Zhang, Zelun Tony and Liu, Yuanting and Butz, Andreas}, title = {Designing AI for Appropriation Will Calibrate Trust}, booktitle = {CHI TRAIT '23: Workshop on Trust and Reliance in AI-Assisted Tasks at CHI 2023}, year = {2023}, month = apr, location = {Hamburg, Germany}, abstract = {Calibrating users' trust on AI to an appropriate level is widely considered one of the key mechanisms to manage brittle AI performance. However, trust calibration is hard to achieve, with numerous interacting factors that can tip trust into one direction or the other. In this position paper, we argue that instead of focusing on trust calibration to achieve resilient human-AI interactions, it might be helpful to design AI systems for appropriation first, i.e. allowing users to use an AI system according to their intention, beyond what was explicitly considered by the designer. We observe that rather than suggesting end results without human involvement, appropriable AI systems tend to offer users incremental support. Such systems do not eliminate the need for trust calibration, but we argue that they may calibrate users' trust as a side effect and thereby achieve an appropriate level of trust by design.}, type = {Workshop}, keywords = {appropriation, artificial intelligence, iterative problem solving, incremental support, trust calibration}, url = {https://www.researchgate.net/publication/369185707_Designing_AI_for_Appropriation_Will_Calibrate_Trust}, } @inproceedings{, author = {Zhang, Zelun Tony and Storath, Cara and Liu, Yuanting and Butz, Andreas}, title = {Resilience Through Appropriation: Pilots' View on Complex Decision Support}, booktitle = {28th International Conference on Intelligent User Interfaces (IUI '23)}, publisher = {ACM}, year = {2023}, month = mar, location = {Sydney, NSW, Australia}, abstract = {Intelligent decision support tools (DSTs) hold the promise to improve the quality of human decision-making in challenging situations like diversions in aviation. To achieve these improvements, a common goal in DST design is to calibrate decision makers' trust in the system. However, this perspective is mostly informed by controlled studies and might not fully reflect the real-world complexity of diversions. In order to understand how DSTs can be beneficial in the view of those who have the best understanding of the complexity of diversions, we interviewed professional pilots. To facilitate discussions, we built two low-fidelity prototypes, each representing a different role a DST could assume: (a) actively suggesting and ranking airports based on pilot-specified criteria, and (b) unobtrusively hinting at data points the pilot should be aware of. We find that while pilots would not blindly trust a DST, they at the same time reject deliberate trust calibration in the moment of the decision. We revisit appropriation as a lens to understand this seeming contradiction as well as a range of means to enable appropriation. Aside from the commonly considered need for transparency, these include directability and continuous support throughout the entire decision process. Based on our design exploration, we encourage to expand the view on DST design beyond trust calibration at the point of the actual decision.}, doi = {10.1145/3581641.3584056}, keywords = {human-AI interaction, decision support tools, intelligent decision support, AI-assisted decision-making, naturalistic decision-making, imperfect AI, appropriation, aviation}, } @inproceedings{https://doi.org/10.1145/3557915.3560995, author = {Resce, Pierpaolo and Vorwerk, Lukas and Han, Zhiwei and Cornacchia, Giuliano and Isfahani Alamdari, Omid and Nanni, Mirco and Pappalardo, Luca and Weimer, Daniel and Liu, Yuanting}, title = {Connected vehicle simulation framework for parking occupancy prediction (demo paper)}, booktitle = {SIGSPATIAL '22: Proceedings of the 30th International Conference on Advances in Geographic Information Systems}, publisher = {ACM}, year = {2022}, month = nov, abstract = {This paper demonstrates a simulation framework that collects data about connected vehicles' locations and surroundings in a realistic traffic scenario. Our focus lies on the capability to detect parking spots and their occupancy status. We use this data to train machine learning models that predict parking occupancy levels of specific areas in the city center of San Francisco. By comparing their performance to a given ground truth, our results show that it is possible to use simulated connected vehicle data as a base for prototyping meaningful AI-based applications.}, howpublished = {Procedding}, doi = {ttps://doi.org/10.1145/3557915.3560995}, url = {https://dl.acm.org/doi/10.1145/3557915.3560995}, } @inproceedings{, author = {Storath, Cara and Zhang, Zelun Tony and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Building Trust by Supporting Situation Awareness: Exploring Pilots' Design Requirements for Decision Support Tools}, booktitle = {CHI TRAIT '22: Workshop on Trust and Reliance in Human-AI Teams at CHI 2022}, year = {2022}, month = apr, location = {New Orleans, LA}, abstract = {Supporting pilots with a decision support tool (DST) during high-workload scenarios is a promising and potentially very helpful application for AI in aviation. Nevertheless, design requirements and opportunities for trustworthy DSTs within the aviation domain have not been explored much in the scientific literature. To address this gap, we explore the decision-making process of pilots with respect to user requirements for the use case of diversions. We do so via two prototypes, each representing a role the AI could have in a DST: A) Unobtrusively hinting at data points the pilot should be aware of. B) Actively suggesting and ranking diversion options based on criteria the pilot has previously defined. Our work-in-progress feedback study reveals four preliminary main findings: 1) Pilots demand guaranteed trustworthiness of such a system and refuse trust calibration in the moment of emergency. 2) We may need to look beyond trust calibration for isolated decision points and rather design for the process leading to the decision. 3) An unobtrusive, augmenting AI seems to be preferred over an AI proposing and ranking diversion options at decision time. 4) Shifting the design goal toward supporting situation awareness rather than the decision itself may be a promising approach to increase trust and reliance.}, type = {Workshop}, keywords = {human-AI interaction, decision support tools, decision support systems, human-AI teaming, aviation}, url = {https://www.researchgate.net/publication/360166947_Building_Trust_by_Supporting_Situation_Awareness_Exploring_Pilots'_Design_Requirements_for_Decision_Support_Tools}, } @inproceedings{, author = {Zhang, Zelun Tony and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Pilot Attitudes Toward AI in the Cockpit: Implications for Design}, booktitle = {2021 IEEE 2nd International Conference on Human-Machine Systems (ICHMS)}, publisher = {IEEE}, year = {2021}, month = sep, location = {Magdeburg, Germany}, abstract = {As the aviation industry is actively working on adopting AI for air traffic, stakeholders agree on the need for a human-centered approach. However, automation design is often driven by user-centered intentions, while the development is actually technology-centered. This can be attributed to a discrepancy between the system designers’ perspective and complexities in real-world use. The same can be currently observed with AI applications where most design efforts focus on the interface between humans and AI, while the overall system design is built on preconceived assumptions. To understand potential usability issues of AI-driven cockpit assistant systems from the users’ perspective, we conducted interviews with four experienced pilots. While our participants did discuss interface issues, they were much more concerned about how autonomous systems could be a burden if the operational complexity exceeds their capabilities. Besides commonly addressed human-AI interface issues, our results thus point to the need for more consideration of operational complexities on a system-design level.}, doi = {10.1109/ICHMS53169.2021.9582448}, keywords = {interviews, thematic analysis, intelligent cockpit assistant systems, human-AI interaction, imperfect AI}, url = {https://doi.org/10.1109/ICHMS53169.2021.9582448}, } @inproceedings{, author = {Zhang, Zelun Tony and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Forward Reasoning Decision Support: Toward a More Complete View of the Human-AI Interaction Design Space}, booktitle = {CHItaly '21: 14th Biannual Conference of the Italian SIGCHI Chapter}, pages = {18:1-18:5}, year = {2021}, month = jul, abstract = {Decision support systems based on AI are usually designed to generate complete outputs entirely automatically and to explain those to users. However, explanations, no matter how well designed, might not adequately address the output uncertainty of such systems in many applications. This is especially the case when the human-out-of-the-loop problem persists, which is a fundamental human limitation. There is no reason to limit decision support systems to such backward reasoning designs, though. We argue how more interactive forward reasoning designs where users are actively involved in the task can be effective in managing output uncertainty. We therefore call for a more complete view of the design space for decision support systems that includes both backward and forward reasoning designs. We argue that such a more complete view is necessary to overcome the barriers that hinder AI deployment especially in high-stakes applications.}, doi = {10.1145/3464385.3464696}, } @inproceedings{, author = {Klingner, S{\"{o}}ren and Han, Zhiwei and Liu, Yuanting and Fang, Fang and Altakrouri, Bashar and Michel, Bruno and Weiss, Jonas R. M. and Sridhar, Arvind and Chau, Sophie Mai}, title = {Firefighter Virtual Reality Simulation for Personalized Stress Detection}, booktitle = {the 43rd German Conference on AI}, publisher = {Springer}, pages = {343-347}, year = {2020}, month = sep, abstract = {Classifying stress in firefighters poses challenges, such as accurate personalized labeling, unobtrusive recording, and training of adequate models. Acquisition of labeled data and verification in cage mazes or during hot trainings is time consuming. Virtual Reality (VR) and Internet of Things (IoT) wearables provide new opportunities to create better stressors for firefighter missions through an immersive simulation. In this demo, we present a VR-based setup that enables to simulate firefighter missions to trigger and more easily record specific stress levels. The goal is to create labeled datasets for personalized multilevel stress detection models that include multiple biosignals, such as heart rate variability from electrocardiographic RR intervals. The multi-level stress setups can be configured, consisting of different levels of mental stressors. The demo shows how we established the recording of a baseline and virtual missions with varying challenge levels to create a personalized stress calibration.}, } @inproceedings{, author = {Weber, Thomas and Han, Zhiwei and Matthes, Stefan and Hu{\ss}mann, Heinrich and Liu, Yuanting}, title = {Draw with Me: Human-in-the-Loop for Image Restoration}, booktitle = {the 43rd German Conference on AI}, publisher = {Springer}, pages = {245-248}, year = {2020}, abstract = {The purpose of image restoration is to recover the original state of damaged images. To overcome the disadvantages of the traditional, manual image restoration process, like the high time consumption and required domain knowledge, automatic inpainting methods have been developed. These methods, however, can have limitations for complex images and may require a lot of input data. To mitigate those, we present "interactive Deep Image Prior", a combination of manual and automated, Deep-Image-Prior-based restoration in the form of an interactive process with the human in the loop. In this process a human can iteratively embed knowledge to provide guidance and control for the automated inpainting process. For this purpose, we extended Deep Image Prior with a user interface which we subsequently analyzed in a user study. Our key question is whether the interactivity increases the restoration quality subjectively and objectively. Secondarily, we were also interested in how such a collaborative system is perceived by users. Our evaluation shows that, even with very little human guidance, our interactive approach has a restoration performance on par or superior to other methods. Meanwhile, very positive results of our user study suggest that learning systems with the human-in-the-loop positively contribute to user satisfaction. We therefore conclude that an interactive, cooperative approach is a viable option for image restoration and potentially other ML tasks where human knowledge can be a correcting or guiding influence.}, keywords = {Interactive Machine Learning, Image Restoration, Image Prior}, url = {https://dl.acm.org/doi/abs/10.1145/3377325.3377509}, } @inproceedings{, author = {Weber, Thomas and Han, Zhiwei and Matthes, Stefan and Hu{\ss}mann, Heinrich and Liu, Yuanting}, title = {Draw with me: human-in-the-loop for image restoration}, booktitle = {the 25th International Conference on Intelligent User Interfaces}, publisher = {ACM}, pages = {243-253}, year = {2020}, abstract = {The purpose of image restoration is to recover the original state of damaged images. To overcome the disadvantages of the traditional, manual image restoration process, like the high time consumption and required domain knowledge, automatic inpainting methods have been developed. These methods, however, can have limitations for complex images and may require a lot of input data. To mitigate those, we present "interactive Deep Image Prior", a combination of manual and automated, Deep-Image-Prior-based restoration in the form of an interactive process with the human in the loop. In this process a human can iteratively embed knowledge to provide guidance and control for the automated inpainting process. For this purpose, we extended Deep Image Prior with a user interface which we subsequently analyzed in a user study. Our key question is whether the interactivity increases the restoration quality subjectively and objectively. Secondarily, we were also interested in how such a collaborative system is perceived by users. Our evaluation shows that, even with very little human guidance, our interactive approach has a restoration performance on par or superior to other methods. Meanwhile, very positive results of our user study suggest that learning systems with the human-in-the-loop positively contribute to user satisfaction. We therefore conclude that an interactive, cooperative approach is a viable option for image restoration and potentially other ML tasks where human knowledge can be a correcting or guiding influence.}, keywords = {Interactive Machine Learning, Image Restoration, Image Prior}, url = {https://dl.acm.org/doi/abs/10.1145/3377325.3377509}, } @incollection{, author = {Han, Zhiwei and Anwaar, Muhammad Umer and Arumugaswamy, Shyam and Weber, Thomas and Qiu, Tianming and Shen, Hao and Liu, Yuanting and Kleinsteuber, Martin}, title = {Metapath- and Entity-aware Graph Neural Network for Recommendation}, booktitle = {Arxiv}, year = {2020}, abstract = {In graph neural networks (GNNs), message passing iteratively aggregates nodes' information from their direct neighbors while neglecting the sequential nature of multi-hop node connections. Such sequential node connections e.g., metapaths, capture critical insights for downstream tasks. Concretely, in recommender systems (RSs), disregarding these insights leads to inadequate distillation of collaborative signals. In this paper, we employ collaborative subgraphs (CSGs) and metapaths to form metapath-aware subgraphs, which explicitly capture sequential semantics in graph structures. We propose meta-path and entity awared graph neural network, which trains multilayer GNNs to perform metapath-aware information aggregation on such subgraphs. This aggregated information from different metapaths is then fused using attention mechanism. Finally, PEAGNN gives us the representations for node and subgraph, which can be used to train MLP for predicting score for target user-item pairs. To leverage the local structure of CSGs, we present entity-awareness that acts as a contrastive regularizer on node embedding. Moreover, PEAGNN can be combined with prominent layers such as GAT, GCN and GraphSage. Our empirical evaluation shows that our proposed technique outperforms competitive baselines on several datasets for recommendation tasks. Further analysis demonstrates that PEAGNN also learns meaningful metapath combinations from a given set of metapaths.}, howpublished = {preprint}, url = {https://arxiv.org/abs/2010.11793}, } @article{221069791, author = {Matthes, Stefan and Han, Zhiwei and Qiu, Tianming and Michel, Bruno and Klingner, S{\"{o}}ren and Shen, Hao and Liu, Yuanting and Altakrouri, Bashar}, title = {Personalized Stress Detection with Self-supervised Learned Features}, publisher = {Arxiv}, year = {2020}, abstract = {Automated stress detection using physiological sensors is challenging due to inaccurate labeling and individual bias in the sensor data. Previous methods consider stress detection as a supervised classification task, where bad labeling leads to a large performance drop. Furthermore, the poor generalizability to unseen subjects reveals the importance of personalizing stress detection for both interand intra-individual sensor data variability. Towards this end we present a label-free feature extractor and an efficient personalization method with the ”human in the loop” approach. First, we capture the intra-individual variability and encode it in self-supervised learned features, which are usually well separable and independent of noisy stress labels. Next, personalization is achieved by assigning labels to critical reference points via very few interactions between subject and wearable device. The promising results of the conducted experiments show the effectiveness and efficiency of our proposed method.}, howpublished = {preprint}, keywords = {stress detection; self-supervised learning}, url = {https://www.semanticscholar.org/paper/Personalized-Stress-Detection-with-Self-supervised-Matthes-Han/95f19123d9fc0351e2311e93390604c81141f74b}, } @incollection{CoRR abs/1910.11059, author = {Han, Zhiwei and Weber, Thomas and Matthes, Stefan and Liu, Yuanting and Shen, Hao}, title = {Interactive Image Restoration}, booktitle = {the Neural Information Processing Systems 2019, Human-centric Machine Learning Workshop}, series = {Human-centric Machine Learning Workshop}, number = {1910.11059}, year = {2019}, location = {Arxiv}, abstract = {Machine learning and many of its applications are considered hard to approach due to their complexity and lack of transparency. One mission of human-centric machine learning is to improve algorithm transparency and user satisfaction while ensuring an acceptable task accuracy. In this work, we present an interactive image restoration framework, which exploits both image prior and human painting knowledge in an iterative manner such that they can boost on each other. Additionally, in this system users can repeatedly get feedback of their interactions from the restoration progress. This informs the users about their impact on the restoration results, which leads to better sense of control, which can lead to greater trust and approachability. The positive results of both objective and subjective evaluation indicate that, our interactive approach positively contributes to the approachability of restoration algorithms in terms of algorithm performance and user experience.}, howpublished = {Preprint}, url = {https://arxiv.org/abs/1910.11059}, } @conference{, author = {Schmidmaier, Matthias and Han, Zhiwei and Weber, Thomas and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Real-Time Personalization in Adaptive IDEs}, booktitle = {the 27th Conference on User Modeling, Adaptation and Personalization (UMAP)}, publisher = {ACM}, pages = {81-86}, year = {2019}, owner = {Matthi}, abstract = {Integrated Development Environments (IDEs) are used for a varietyof software development tasks. Their complexity makes them chal-lenging to use though, especially for less experienced developers. In this paper, we outline our approach for an user-adaptive IDE that is able to track the interactions, recognize the user's intent and expertise, and provide relevant, personalized recommendations in real-time. To obtain a user model and provide recommendations, interaction data is processed in a two-stage process: first, we derive a bandit based global model of general task patterns from a dataset of labeled interactions. Second, when the user is working with the IDE, we apply a pre-trained classifier in real-time to get task labels from the user's interactions. With those and user feedback we fine-tune a local copy of the global model. As a result, we obtain a personalized user model which provides user-specific recommendations. We finally present various approaches for using these recommendations to adapt the IDE's interface. Modifications range from visual highlighting to task automation, including explanatory feedback.}, keywords = {Adaptive IDE; User Modeling; Personalized Recommendation Systems; Human-Centered Machine Learning}, url = {https://www.medien.ifi.lmu.de/pubdb/publications/pub/schmidmaier2019umap-lbw/schmidmaier2019umap-lbw.pdf}, } @inproceedings{3265957, author = {Wiegand, Gesa and Mai, Christian and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Early Take-Over Preparation in Stereoscopic 3D}, booktitle = {Adjunct Proceedings of the 10th International Conference on Automotive User Interfaces and Interactive Vehicular Applications}, publisher = {ACM}, series = {AutomotiveUI '18}, pages = {142--146}, year = {2018}, location = {Toronto, ON, Canada}, isbn = {978-1-4503-5947-4}, doi = {10.1145/3239092.3265957}, url = {http://doi.acm.org/10.1145/3239092.3265957}, } @inproceedings{, author = {Wiegand, Gesa and Liu, Yuanting}, title = {Highway Sensor System as Enabler for Autonomous Driving}, volume = {CHI 2018 Workshop - Interacting with Autonomous Vehicles: Learning from other Domains(2018)}, year = {2018}, }