2022

Homayouni, Taymaz; Gholami, Akram; Toudeshki, Arash; Afsah-Hejri, Leili; Ehsani, Reza
Estimation of proper shaking parameters for pistachio trees based on their trunk size Journal Article
In: Biosystems Engineering, vol. 216, pp. 121–131, 2022, ISSN: 1537-5110.
Abstract | Links | BibTeX | Tags: Labor
@article{homayouni_estimation_2022,
title = {Estimation of proper shaking parameters for pistachio trees based on their trunk size},
author = {Homayouni, Taymaz and Gholami, Akram and Toudeshki, Arash and Afsah-Hejri, Leili and Ehsani, Reza},
url = {https://www.sciencedirect.com/science/article/pii/S1537511022000411},
doi = {10.1016/j.biosystemseng.2022.02.008},
issn = {1537-5110},
year = {2022},
date = {2022-04-01},
urldate = {2022-07-07},
journal = {Biosystems Engineering},
volume = {216},
pages = {121\textendash131},
abstract = {Trunk shaking is the most common mechanical harvesting system for harvesting pistachio. Harvesting machine operators often subjectively decide how to set the shaking parameters such as frequency and duration and this requires experience. The main objectives of this study were to evaluate the effect of tree morphology and shaking parameters such as trunk size and shaking pattern on the energy distribution through the branches and to optimise the shaking intensity of individual pistachio trees based on a tree-specific feedback loop. Wireless 3D accelerometer sensors were built and used to measure vibration transmission through the tree canopy at different locations and to monitor the energy transmission between the machine shaker head and the tree trunk. Thirty trees were selected for this study and were divided into three groups based on the trunk circumference size. To study the effect of a shaking pattern on the vibration transmission through the tree, four shaking patterns were selected and tested. Shaking duration was measured and it showed an average of 30% longer time compared to the shaking pattern duration. The effect of all four shaking patterns was analysed using continuous wavelet transform. The responses of trees were analysed and the optimum shaking intensity for each tree was determined. A model was developed to estimate the optimum shaking intensity for pistachio trees based on their trunk size. The model showed that 37, 57, and 65% are the optimum shaking intensity percentages for small, medium, and large trees, respectively.},
keywords = {Labor},
pubstate = {published},
tppubtype = {article}
}

Roli Khanna; Jonathan Dodge; Andrew Anderson; Rupika Dikkala; Jed Irvine; Zeyad Shureih; Kin-Ho Lam; Caleb R. Matthews; Zhengxian Lin; Minsuk Kahng; Alan Fern; Margaret Burnett
Finding AI’s Faults with AAR/AI: An Empirical Study Journal Article
In: ACM Transactions on Interactive Intelligent Systems, vol. 12, no. 1, pp. 1:1–1:33, 2022, ISSN: 2160-6455.
Abstract | Links | BibTeX | Tags: AI, Human-Computer Interaction
@article{khanna_finding_2022,
title = {Finding AI’s Faults with AAR/AI: An Empirical Study},
author = { Roli Khanna and Jonathan Dodge and Andrew Anderson and Rupika Dikkala and Jed Irvine and Zeyad Shureih and Kin-Ho Lam and Caleb R. Matthews and Zhengxian Lin and Minsuk Kahng and Alan Fern and Margaret Burnett},
url = {https://doi.org/10.1145/3487065},
doi = {10.1145/3487065},
issn = {2160-6455},
year = {2022},
date = {2022-03-01},
urldate = {2022-03-01},
journal = {ACM Transactions on Interactive Intelligent Systems},
volume = {12},
number = {1},
pages = {1:1--1:33},
abstract = {Would you allow an AI agent to make decisions on your behalf? If the answer is “not always,” the next question becomes “in what circumstances”? Answering this question requires human users to be able to assess an AI agent\textemdashand not just with overall pass/fail assessments or statistics. Here users need to be able to localize an agent’s bugs so that they can determine when they are willing to rely on the agent and when they are not. After-Action Review for AI (AAR/AI), a new AI assessment process for integration with Explainable AI systems, aims to support human users in this endeavor, and in this article we empirically investigate AAR/AI’s effectiveness with domain-knowledgeable users. Our results show that AAR/AI participants not only located significantly more bugs than non-AAR/AI participants did (i.e., showed greater recall) but also located them more precisely (i.e., with greater precision). In fact, AAR/AI participants outperformed non-AAR/AI participants on every bug and were, on average, almost six times as likely as non-AAR/AI participants to find any particular bug. Finally, evidence suggests that incorporating labeling into the AAR/AI process may encourage domain-knowledgeable users to abstract above individual instances of bugs; we hypothesize that doing so may have contributed further to AAR/AI participants’ effectiveness.},
keywords = {AI, Human-Computer Interaction},
pubstate = {published},
tppubtype = {article}
}

Jonathan Dodge; Andrew A. Anderson; Matthew Olson; Rupika Dikkala; Margaret Burnett
How Do People Rank Multiple Mutant Agents? Proceedings Article
In: 27th International Conference on Intelligent User Interfaces, pp. 191–211, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9144-3.
Abstract | Links | BibTeX | Tags: AI, Human-Computer Interaction
@inproceedings{dodge_how_2022,
title = {How Do People Rank Multiple Mutant Agents?},
author = { Jonathan Dodge and Andrew A. Anderson and Matthew Olson and Rupika Dikkala and Margaret Burnett},
url = {https://doi.org/10.1145/3490099.3511115},
doi = {10.1145/3490099.3511115},
isbn = {978-1-4503-9144-3},
year = {2022},
date = {2022-03-01},
urldate = {2022-03-01},
booktitle = {27th International Conference on Intelligent User Interfaces},
pages = {191--211},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {IUI '22},
abstract = {Faced with several AI-powered sequential decision-making systems, how might someone choose on which to rely? For example, imagine car buyer Blair shopping for a self-driving car, or developer Dillon trying to choose an appropriate ML model to use in their application. Their first choice might be infeasible (i.e., too expensive in money or execution time), so they may need to select their second or third choice. To address this question, this paper presents: 1) Explanation Resolution, a quantifiable direct measurement concept; 2) a new XAI empirical task to measure explanations: “the Ranking Task”; and 3) a new strategy for inducing controllable agent variations\textemdashMutant Agent Generation. In support of those main contributions, it also presents 4) novel explanations for sequential decision-making agents; 5) an adaptation to the AAR/AI assessment process; and 6) a qualitative study around these devices with 10 participants to investigate how they performed the Ranking Task on our mutant agents, using our explanations, and structured by AAR/AI. From an XAI researcher perspective, just as mutation testing can be applied to any code, mutant agent generation can be applied to essentially any neural network for which one wants to evaluate an assessment process or explanation type. As to an XAI user’s perspective, the participants ranked the agents well overall, but showed the importance of high explanation resolution for close differences between agents. The participants also revealed the importance of supporting a wide diversity of explanation diets and agent “test selection” strategies.},
keywords = {AI, Human-Computer Interaction},
pubstate = {published},
tppubtype = {inproceedings}
}

Amreeta Chatterjee; Lara Letaw; Rosalinda Garcia; Doshna Umma Reddy; Rudrajit Choudhuri; Sabyatha Sathish Kumar; Patricia Morreale; Anita Sarma; Margaret Burnett
Inclusivity Bugs in Online Courseware: A Field Study Proceedings Article
In: Proceedings of the 2022 ACM Conference on International Computing Education Research - Volume 1, pp. 356–372, Association for Computing Machinery, New York, NY, USA, 2022, ISBN: 978-1-4503-9194-8.
Abstract | Links | BibTeX | Tags: Education, Human-Computer Interaction
@inproceedings{chatterjee_inclusivity_2022,
title = {Inclusivity Bugs in Online Courseware: A Field Study},
author = { Amreeta Chatterjee and Lara Letaw and Rosalinda Garcia and Doshna Umma Reddy and Rudrajit Choudhuri and Sabyatha Sathish Kumar and Patricia Morreale and Anita Sarma and Margaret Burnett},
url = {https://doi.org/10.1145/3501385.3543973},
doi = {10.1145/3501385.3543973},
isbn = {978-1-4503-9194-8},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
booktitle = {Proceedings of the 2022 ACM Conference on International Computing Education Research - Volume 1},
pages = {356--372},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {ICER '22},
abstract = {Motivation: Although asynchronous online CS courses have enabled more diverse populations to access CS higher education, research shows that online CS-ed is far from inclusive, with women and other underrepresented groups continuing to face inclusion gaps. Worse, diversity/inclusion research in CS-ed has largely overlooked the online courseware\textemdashthe web pages and course materials that populate the online learning platforms\textemdashthat constitute asynchronous online CS-ed’s only mechanism of course delivery. Objective: To investigate this aspect of CS-ed’s inclusivity, we conducted a three-phase field study with online CS faculty, with three research questions: (1) whether, how, and where online CS-ed’s courseware has inclusivity bugs; (2) whether an automated tool can detect them; and (3) how online CS faculty would make use of such a tool. Method: In the study’s first phase, we facilitated online CS faculty members’ use of GenderMag (an inclusive design method) on two online CS courses to find their own courseware’s inclusivity bugs. In the second phase, we used a variant of the GenderMag Automated Inclusivity Detector (AID) tool to automatically locate a “vertical slice” of such courseware inclusivity bugs, and evaluated the tool’s accuracy. In the third phase, we investigated how online CS faculty used the tool to find inclusivity bugs in their own courseware. Results: The results revealed 29 inclusivity bugs spanning 6 categories in the online courseware of 9 online CS courses; showed that the tool achieved an accuracy of 75% at finding such bugs; and revealed new insights into how a tool could help online CS faculty uncover assumptions about their own courseware to make it more inclusive. Implications: As the first study to investigate the presence and types of cognitive- and gender-inclusivity bugs in online CS courseware and whether an automated tool can find them, our results reveal new possibilities for how to make online CS education a more inclusive virtual environment for gender-diverse students.},
keywords = {Education, Human-Computer Interaction},
pubstate = {published},
tppubtype = {inproceedings}
}
2021

Jonathan Dodge; Roli Khanna; Jed Irvine; Kin-ho Lam; Theresa Mai; Zhengxian Lin; Nicholas Kiddle; Evan Newman; Andrew Anderson; Sai Raja; Caleb Matthews; Christopher Perdriau; Margaret Burnett; Alan Fern
After-Action Review for AI (AAR/AI) Journal Article
In: ACM Transactions on Interactive Intelligent Systems, vol. 11, no. 3-4, pp. 29:1–29:35, 2021, ISSN: 2160-6455.
Abstract | Links | BibTeX | Tags: AI, Human-Computer Interaction
@article{dodge_after-action_2021,
title = {After-Action Review for AI (AAR/AI)},
author = { Jonathan Dodge and Roli Khanna and Jed Irvine and Kin-ho Lam and Theresa Mai and Zhengxian Lin and Nicholas Kiddle and Evan Newman and Andrew Anderson and Sai Raja and Caleb Matthews and Christopher Perdriau and Margaret Burnett and Alan Fern},
url = {https://doi.org/10.1145/3453173},
doi = {10.1145/3453173},
issn = {2160-6455},
year = {2021},
date = {2021-01-01},
urldate = {2021-01-01},
journal = {ACM Transactions on Interactive Intelligent Systems},
volume = {11},
number = {3-4},
pages = {29:1--29:35},
abstract = {Explainable AI is growing in importance as AI pervades modern society, but few have studied how explainable AI can directly support people trying to assess an AI agent. Without a rigorous process, people may approach assessment in ad hoc ways\textemdashleading to the possibility of wide variations in assessment of the same agent due only to variations in their processes. AAR, or After-Action Review, is a method some military organizations use to assess human agents, and it has been validated in many domains. Drawing upon this strategy, we derived an After-Action Review for AI (AAR/AI), to organize ways people assess reinforcement learning agents in a sequential decision-making environment. We then investigated what AAR/AI brought to human assessors in two qualitative studies. The first investigated AAR/AI to gather formative information, and the second built upon the results, and also varied the type of explanation (model-free vs. model-based) used in the AAR/AI process. Among the results were the following: (1) participants reporting that AAR/AI helped to organize their thoughts and think logically about the agent, (2) AAR/AI encouraged participants to reason about the agent from a wide range of perspectives, and (3) participants were able to leverage AAR/AI with the model-based explanations to falsify the agent’s predictions.},
keywords = {AI, Human-Computer Interaction},
pubstate = {published},
tppubtype = {article}
}