@inproceedings{, author = {Zhang, Zelun Tony and Tong, Sven and Liu, Yuanting and Butz, Andreas}, title = {Is Overreliance on AI Provoked by Study Design?}, booktitle = {Human-Computer Interaction -- INTERACT 2023}, publisher = {Springer Nature Switzerland}, pages = {49--58}, year = {2023}, month = aug, address = {Cham}, abstract = {Recent studies found that humans tend to overrely on AI when making decisions with AI support. AI explanations were often insufficient as mitigation, and sometimes even increased overreliance. However, typical AI-assisted decision-making studies consist of long series of decision tasks, potentially causing complacent behavior, and not properly reflecting many real-life scenarios. We therefore raise the question whether these findings might be favored by the design of these studies. In a first step to answer this question, we compared different study designs in an experiment and found indications that observations of overreliance might indeed be favored by common study designs. Further research is needed to clarify to what extent overreliance can be attributed to study designs rather than more fundamental human-AI interaction issues.}, isbn = {978-3-031-42286-7}, doi = {10.1007/978-3-031-42286-7_3}, keywords = {human-AI interaction, AI-assisted decision-making, explainable AI, overreliance}, url = {https://doi.org/10.1007/978-3-031-42286-7_3}, } @inproceedings{, author = {Zhang, Zelun Tony and Liu, Yuanting and Butz, Andreas}, title = {Designing AI for Appropriation Will Calibrate Trust}, booktitle = {CHI TRAIT '23: Workshop on Trust and Reliance in AI-Assisted Tasks at CHI 2023}, year = {2023}, month = apr, location = {Hamburg, Germany}, abstract = {Calibrating users' trust on AI to an appropriate level is widely considered one of the key mechanisms to manage brittle AI performance. However, trust calibration is hard to achieve, with numerous interacting factors that can tip trust into one direction or the other. In this position paper, we argue that instead of focusing on trust calibration to achieve resilient human-AI interactions, it might be helpful to design AI systems for appropriation first, i.e. allowing users to use an AI system according to their intention, beyond what was explicitly considered by the designer. We observe that rather than suggesting end results without human involvement, appropriable AI systems tend to offer users incremental support. Such systems do not eliminate the need for trust calibration, but we argue that they may calibrate users' trust as a side effect and thereby achieve an appropriate level of trust by design.}, type = {Workshop}, keywords = {appropriation, artificial intelligence, iterative problem solving, incremental support, trust calibration}, url = {https://www.researchgate.net/publication/369185707_Designing_AI_for_Appropriation_Will_Calibrate_Trust}, } @inproceedings{, author = {Zhang, Zelun Tony and Storath, Cara and Liu, Yuanting and Butz, Andreas}, title = {Resilience Through Appropriation: Pilots' View on Complex Decision Support}, booktitle = {28th International Conference on Intelligent User Interfaces (IUI '23)}, publisher = {ACM}, year = {2023}, month = mar, location = {Sydney, NSW, Australia}, abstract = {Intelligent decision support tools (DSTs) hold the promise to improve the quality of human decision-making in challenging situations like diversions in aviation. To achieve these improvements, a common goal in DST design is to calibrate decision makers' trust in the system. However, this perspective is mostly informed by controlled studies and might not fully reflect the real-world complexity of diversions. In order to understand how DSTs can be beneficial in the view of those who have the best understanding of the complexity of diversions, we interviewed professional pilots. To facilitate discussions, we built two low-fidelity prototypes, each representing a different role a DST could assume: (a) actively suggesting and ranking airports based on pilot-specified criteria, and (b) unobtrusively hinting at data points the pilot should be aware of. We find that while pilots would not blindly trust a DST, they at the same time reject deliberate trust calibration in the moment of the decision. We revisit appropriation as a lens to understand this seeming contradiction as well as a range of means to enable appropriation. Aside from the commonly considered need for transparency, these include directability and continuous support throughout the entire decision process. Based on our design exploration, we encourage to expand the view on DST design beyond trust calibration at the point of the actual decision.}, doi = {10.1145/3581641.3584056}, keywords = {human-AI interaction, decision support tools, intelligent decision support, AI-assisted decision-making, naturalistic decision-making, imperfect AI, appropriation, aviation}, } @inproceedings{, author = {Storath, Cara and Zhang, Zelun Tony and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Building Trust by Supporting Situation Awareness: Exploring Pilots' Design Requirements for Decision Support Tools}, booktitle = {CHI TRAIT '22: Workshop on Trust and Reliance in Human-AI Teams at CHI 2022}, year = {2022}, month = apr, location = {New Orleans, LA}, abstract = {Supporting pilots with a decision support tool (DST) during high-workload scenarios is a promising and potentially very helpful application for AI in aviation. Nevertheless, design requirements and opportunities for trustworthy DSTs within the aviation domain have not been explored much in the scientific literature. To address this gap, we explore the decision-making process of pilots with respect to user requirements for the use case of diversions. We do so via two prototypes, each representing a role the AI could have in a DST: A) Unobtrusively hinting at data points the pilot should be aware of. B) Actively suggesting and ranking diversion options based on criteria the pilot has previously defined. Our work-in-progress feedback study reveals four preliminary main findings: 1) Pilots demand guaranteed trustworthiness of such a system and refuse trust calibration in the moment of emergency. 2) We may need to look beyond trust calibration for isolated decision points and rather design for the process leading to the decision. 3) An unobtrusive, augmenting AI seems to be preferred over an AI proposing and ranking diversion options at decision time. 4) Shifting the design goal toward supporting situation awareness rather than the decision itself may be a promising approach to increase trust and reliance.}, type = {Workshop}, keywords = {human-AI interaction, decision support tools, decision support systems, human-AI teaming, aviation}, url = {https://www.researchgate.net/publication/360166947_Building_Trust_by_Supporting_Situation_Awareness_Exploring_Pilots'_Design_Requirements_for_Decision_Support_Tools}, } @inproceedings{, author = {Zhang, Zelun Tony and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Pilot Attitudes Toward AI in the Cockpit: Implications for Design}, booktitle = {2021 IEEE 2nd International Conference on Human-Machine Systems (ICHMS)}, publisher = {IEEE}, year = {2021}, month = sep, location = {Magdeburg, Germany}, abstract = {As the aviation industry is actively working on adopting AI for air traffic, stakeholders agree on the need for a human-centered approach. However, automation design is often driven by user-centered intentions, while the development is actually technology-centered. This can be attributed to a discrepancy between the system designers’ perspective and complexities in real-world use. The same can be currently observed with AI applications where most design efforts focus on the interface between humans and AI, while the overall system design is built on preconceived assumptions. To understand potential usability issues of AI-driven cockpit assistant systems from the users’ perspective, we conducted interviews with four experienced pilots. While our participants did discuss interface issues, they were much more concerned about how autonomous systems could be a burden if the operational complexity exceeds their capabilities. Besides commonly addressed human-AI interface issues, our results thus point to the need for more consideration of operational complexities on a system-design level.}, doi = {10.1109/ICHMS53169.2021.9582448}, keywords = {interviews, thematic analysis, intelligent cockpit assistant systems, human-AI interaction, imperfect AI}, url = {https://doi.org/10.1109/ICHMS53169.2021.9582448}, } @inproceedings{, author = {Zhang, Zelun Tony and Liu, Yuanting and Hu{\ss}mann, Heinrich}, title = {Forward Reasoning Decision Support: Toward a More Complete View of the Human-AI Interaction Design Space}, booktitle = {CHItaly '21: 14th Biannual Conference of the Italian SIGCHI Chapter}, pages = {18:1-18:5}, year = {2021}, month = jul, abstract = {Decision support systems based on AI are usually designed to generate complete outputs entirely automatically and to explain those to users. However, explanations, no matter how well designed, might not adequately address the output uncertainty of such systems in many applications. This is especially the case when the human-out-of-the-loop problem persists, which is a fundamental human limitation. There is no reason to limit decision support systems to such backward reasoning designs, though. We argue how more interactive forward reasoning designs where users are actively involved in the task can be effective in managing output uncertainty. We therefore call for a more complete view of the design space for decision support systems that includes both backward and forward reasoning designs. We argue that such a more complete view is necessary to overcome the barriers that hinder AI deployment especially in high-stakes applications.}, doi = {10.1145/3464385.3464696}, } @inproceedings{, author = {Zhang, Zelun Tony and Hu{\ss}mann, Heinrich}, title = {How to Manage Output Uncertainty: Targeting the Actual End User Problem in Interactions with AI}, booktitle = {Joint Proceedings of the ACM IUI 2021 Workshops}, year = {2021}, month = jul, abstract = {Given the opaqueness and complexity of modern AI algorithms, there is currently a strong focus on developing transparent and explainable AI, especially in high-stakes domains. We claim that opaqueness and complexity are not the core issues for end users when interacting with AI. Instead, we propose that the output uncertainty inherent to AI systems is the actual problem, with opaqueness and complexity as contributing factors. Transparency and explainability should therefore not be the end goals, as such a focus tends to place the human into a passive supervisory role in what is in reality an algorithm-centered system design. To enable effective management of output uncertainty, we believe it is necessary to focus on truly human-centered AI designs that keep the human in an active role of control. We discuss the conceptual implications of such a shift in focus and give examples from literature to illustrate the more holistic, interactive designs that we envision.}, url = {http://ceur-ws.org/Vol-2903/IUI21WS-TExSS-17.pdf}, }