Philip Guo Endowed Professor of HCI
Human-Computer Interaction Institute
Language Technologies Institute
School of Computer Science
Carnegie Mellon University
407 S. Craig St., 220
Apple Human-Centered Machine Intelligence
jbigham@cs.cmu.edu | hci.social/@jbigham
C.V. |
Google
Scholar | Short Bio
human-computer interaction, human-AI interaction, accessibility, dialog systems, NLP, speech, crowdsourcing
Research Highlights
Recent Publications
2024
System-class Accessibility
@article {system-class-accessibility,
author={Chris Fleizach and Jeffrey P. Bigham},
title={System-class Accessibility},
booktitle={ACM Queue},
volume={22},
issue={5},
pages={28--39},
keywords={accessibility},
publisher={ACM},
address={New York, NY, USA},
year={2024},
}UIClip: A Data-driven Model for Assessing User Interface Design
@inproceedings {uiclip-interface-design,
author={Jason Wu, Yi-Hao Peng, Xin Yue Li, Amanda Swearngin, Jeffrey P. Bigham, Jeffrey Nichols},
title={UIClip: A Data-driven Model for Assessing User Interface Design},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2024)},
series={UIST '24},
year={2024},
location={Pittsburgh, PA},
publisher={ACM},
address={New York, NY, USA},
keywords={ui understanding, user interfaces, guis, machine learning, design},
}DreamStruct: Understanding Slides and User Interfaces via Synthetic Data Generation
@inproceedings {dreamstruct,
title={DreamStruct: Understanding Slides and User Interfaces via Synthetic Data Generation},
author={Yi-Hao Peng, Faria Huq, Yue Jiang, Jason Wu, Amanda Xin Yue Li, Jeffrey Bigham, Amy Pavel},
booktitle={Proceedings of the The 18th European Conference on Computer Vision (ECCV 2024)},
series={ECCV '24},
year={2024},
location={Milano, Italy},
keywords={computer vision, synthetic data, presentation, slides, user interfaces, mobile uis},
}Towards Automated Accessibility Report Generation for Mobile Apps
@article {automated-accessibility-reports,
title={Towards Automated Accessibility Report Generation for Mobile Apps},
author={Amanda Swearngin, Jason Wu, Xiaoyi Zhang, Esteban Gomez, Jen Coughenour, Rachel Stukenborg, Bhavya Garg, Greg Hughes, Adriana Hilliard, Jeffrey P. Bigham, Jeffrey Nichols},
booktitle={ACM Transactions on Computer-Human Interaction},
volume={31},
issue={4},
pages={1-44},
year={2024},
publisher={ACM},
keywords={accessibility, evaluation, reports, mobile uis},
}Community-Supported Shared Infrastructure in Support of Speech Accessibility
@article {community-speech-accessibility,
title={Community-Supported Shared Infrastructure in Support of Speech Accessibility},
author={Mark Hasegawa-Johnson, Xiuwen Zheng, Heejin Kim, Clarion Mendes, Meg Dickinson, Erik Hege, Chris Zwilling, Marie Moore Channell, Laura Mattie, Heather Hodges, Lorraine Ramig, Mary Bellard, Mike Shebanek, Leda Sarι, Kaustubh Kalgaonkar, David Frerichs, Jeffrey P Bigham, Leah Findlater, Colin Lea, Sarah Herrlinger, Peter Korn, Shadi Abou-Zahra, Rus Heywood, Katrin Tomanek, Bob MacDonald},
year={2024},
booktitle={Journal of Speech, Language, and Hearing Research},
publisher={American Speech-Language-Hearing Association},
pages={1-14},
keywords={speech, accessibility, speech recognition},
}Apple Intelligence Foundation Language Models
@inproceedings {apple-intelligence,
title={Apple Intelligence Foundation Language Models},
author={Tom Gunter, Zirui Wang, Chong Wang, Ruoming Pang, Andy Narayanan, Aonan Zhang, Bowen Zhang, Chen Chen, Chung-Cheng Chiu, David Qiu, Deepak Gopinath, Dian Ang Yap, Dong Yin, Feng Nan, Floris Weers, Guoli Yin, Haoshuo Huang, Jianyu Wang, Jiarui Lu, John Peebles, Ke Ye, Mark Lee, Nan Du, Qibin Chen, Quentin Keunebroek, Sam Wiseman, Syd Evans, Tao Lei, Vivek Rathod, Xiang Kong, Xianzhi Du, Yanghao Li, Yongqiang Wang, Yuan Gao, Zaid Ahmed, Zhaoyang Xu, Zhiyun Lu, Al Rashid, Albin Madappally Jose, Alec Doane, Alfredo Bencomo, Allison Vanderby, Andrew Hansen, Ankur Jain, Anupama Mann Anupama, Areeba Kamal, Bugu Wu, Carolina Brum, Charlie Maalouf, Chinguun Erdenebileg, Chris Dulhanty, Dominik Moritz, Doug Kang, Eduardo Jimenez, Evan Ladd, Fangping Shi, Felix Bai, Frank Chu, Fred Hohman, Hadas Kotek, Hannah Gillis Coleman, Jane Li, Jeffrey P. Bigham, Jeffery Cao, Jeff Lai, Jessica Cheung, Jiulong Shan, Joe Zhou, John Li, Jun Qin, Karanjeet Singh, Karla Vega, Kelvin Zou, Laura Heckman, Lauren Gardiner, Margit Bowler, Maria Cordell, Meng Cao, Nicole Hay, Nilesh Shahdadpuri, Otto Godwin, Pranay Dighe, Pushyami Rachapudi, Ramsey Tantawi, Roman Frigg, Sam Davarnia, Sanskruti Shah, Saptarshi Guha, Sasha Sirovica, Shen Ma, Shuang Ma, Simon Wang, Sulgi Kim, Suma Jayaram, Vaishaal Shankar, Varsha Paidi, Vivek Kumar, Xin Wang, Xin Zheng, Walker Cheng, Yael Shrager, Yang Ye, Yasu Tanaka, Yihao Guo, Yunsong Meng, Zhao Tang Luo, Zhi Ouyang, Alp Aygar, Alvin Wan, Andrew Walkingshaw, Antonie Lin, Arsalan Farooq, Brent Ramerth, Chris Bartels, Chris Chaney, David Riazati, Eric Liang Yang, Erin Feldman, Gabriel Hochstrasser, Guillaume Seguin, Irina Belousova, Joris Pelemans, Karen Yang, Keivan Alizadeh Vahid, Liangliang Cao, Mahyar Najibi, Marco Zuliani, Max Horton, Minsik Cho, Nikhil Bhendawade, Patrick Dong, Piotr Maj, Pulkit Agrawal, Qi Shan, Qichen Fu, Regan Poston, Sam Xu, Shuangning Liu, Sushma Rao, Tashweena Heeramun, Thomas Merth, Uday Rayala, Victor Cui, Vivek Rangarajan Sridhar, Wencong Zhang, Wenqi Zhang, Wentao Wu, Xingyu Zhou, Xinwen Liu, Yang Zhao},
year={2024},
booktitle={arxiv},
keywords={generative AI, apple, responsible, safety},
}Towards Bidirectional Human-AI Alignment: A Systematic Review for Clarifications, Framework, and Future Directions
@inproceedings {bidirection-human-feedback,
title={Towards Bidirectional Human-AI Alignment: A Systematic Review for Clarifications, Framework, and Future Directions},
author={Hua Shen, Tiffany Knearem, Reshmi Ghosh, Kenan Alkiek, Kundan Krishna, Yachuan Liu, Ziqiao Ma, Savvas Petridis, Yi-Hao Peng, Li Qiwei, Sushrita Rakshit, Chenglei Si, Yutong Xie, Jeffrey P. Bigham, Frank Bentley, Joyce Chai, Zachary Lipton, Qiaozhu Mei, Rada Mihalcea, Michael Terry, Diyi Yang, Meredith Ringel Morris, Paul Resnick, David Jurgens},
year={2024},
booktitle={arxiv},
keywords={generative AI, alignment, human feedback, crowdsourcing},
}"This really lets us see the entire world:" Designing a conversational telepresence robot for homebound older adults
@inproceedings {conversational-telepresence,
author={Yaxin Hu, Laura Stegner, Yasmine Kotturi, Caroline Zhang, Yi-Hao Peng, Faria Huq, Yuhang Zhao, Jeffrey P. Bigham, Bilge Mutlu},
title={"This really lets us see the entire world:" Designing a conversational telepresence robot for homebound older adults},
year={2024},
isbn={9798400705830},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/3643834.3660710},
doi={10.1145/3643834.3660710},
abstract={In this paper, we explore the design and use of conversational telepresence robots to help homebound older adults interact with the external world. An initial needfinding study (N=8) using video vignettes revealed older adults’ experiential needs for robot-mediated remote experiences such as exploration, reminiscence and social participation. We then designed a prototype system to support these goals and conducted a technology probe study (N=11) to garner a deeper understanding of user preferences for remote experiences. The study revealed user interactive patterns in each desired experience, highlighting the need of robot guidance, social engagements with the robot and the remote bystanders. Our work identifies a novel design space where conversational telepresence robots can be used to foster meaningful interactions in the remote physical environment. We offer design insights into the robot’s proactive role in providing guidance and using dialogue to create personalized, contextualized and meaningful experiences.},
booktitle={Proceedings of the 2024 ACM Designing Interactive Systems Conference},
pages={2450–2467},
numpages={18},
keywords={robots, human robot interaction, older adults, telepresence, remote, user study, conversational, dialog},
location={IT University of Copenhagen, Denmark},
series={DIS '24},
}‘Your Duties Are To Sweep A Floor Remotely’: Low Information Quality in Job Advertisements is a Barrier to Low-Income Job-Seekers’ Successful Use of Digital Platforms
@inproceedings {low-quality-job-ads,
author={Sara Kingsley, Michael Six Silberman, Clara Wang, Robert Lambeth, Jiayin Zhi, Motahhare Eslami, Beibei Li, Jeffrey P. Bigham},
title={‘Your Duties Are To Sweep A Floor Remotely’: Low Information Quality in Job Advertisements is a Barrier to Low-Income Job-Seekers’ Successful Use of Digital Platforms},
year={2024},
isbn={9798400710179},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/3663384.3663403},
doi={10.1145/3663384.3663403},
abstract={Digital platforms have become central in job search. Job-seekers’ experiences with these platforms, however, is a relatively new research area. This paper presents findings from 27 interviews with US low-income job-seekers. Job-seekers encountered many job ads with low information quality on the platforms they used in their searches. These included ads where important information, such as job pay, duration, hours, location, or requirements were missing, unclear, contradictory, or misleading; ads for unethical or illegal work; and ads that did not correspond to paying work but were designed to lure job-seekers into performing free labor or into scams. While job-seekers developed heuristics to navigate low quality ads, these did not always work, and may have caused job-seekers to miss relevant job opportunities. This paper helps answer an open question in HCI research about barriers to low-income job-seekers’ successful use of digital platforms: one barrier is low information quality job ads.},
booktitle={Proceedings of the 3rd Annual Meeting of the Symposium on Human-Computer Interaction for Work},
articleno={18},
numpages={20},
keywords={work, crowdsourcing, labor, ai},
location={Newcastle upon Tyne, United Kingdom},
series={CHIWORK '24},
}UICoder: Finetuning Large Language Models to Generate User Interface Code through Automated Feedback
@inproceedings {uicoder,
title={UICoder: Finetuning Large Language Models to Generate User Interface Code through Automated Feedback},
author={Jason Wu, Eldon Schoop, Alan Leung, Titus Barik, Jeffrey P. Bigham, Jeffrey Nichols},
booktitle={Proceedings of the 2024 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (Volume 1: Long Papers)},
month={June},
year={2024},
address={Mexico City, Mexico},
publisher={Association for Computational Linguistics},
url={https://aclanthology.org/2024.naacl-long.417},
doi={10.18653/v1/2024.naacl-long.417},
pages={7511--7525},
abstract={Many large language models (LLMs) struggle to consistently generate UI code that compiles and produces visually relevant designs. Existing approaches to improve generation rely either on expensive human feedback or distilling a proprietary model. In this paper, we explore the use of automated feedback (compilers and multi-modal models) to guide LLMs to generate high-quality UI code. Our method starts with an existing LLM and iteratively produces improved models by self-generating a large synthetic dataset using an original model, applying automated tools to aggressively filter, score, and de-duplicate the data into a refined higher quality dataset, and producing a new LLM by finetuning the original on the refined dataset.We applied our approach to several open-source LLMs and compared the resulting performance to baseline models with both automated metrics and human preferences.Our results show the resulting models outperform all other downloadable baselines and approach the performance of larger proprietary models.},
keywords={generative AI, ui understanding, generative ui},
}Deconstructing the Veneer of Simplicity: Co-Designing Introductory Generative AI Workshops with Local Entrepreneurs
@inproceedings {deconstructing-genai,
title={Deconstructing the Veneer of Simplicity: Co-Designing Introductory Generative AI Workshops with Local Entrepreneurs},
author={Yasmine Kotturi, Angel Anderson, Glenn Ford, Michael Skirpan, and Jeffrey P. Bigham},
year={2024},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Honolulu, Hawaii},
keywords={generative AI, community-based research, community forge, help desk},
}COMPA: Using Conversation Context to Achieve Common Ground in AAC
@inproceedings {compa,
title={COMPA: Using Conversation Context to Achieve Common Ground in AAC},
author={Stephanie Valencia, Jessica Huynh, Emma Y. Jiang, Yufei Wu, Teresa Wan, Zixuan Zheng, Henny Admoni, Jeffrey P. Bigham, Amy Pavel},
year={2024},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Honolulu, Hawaii},
keywords={aac, accessibility, conversation, system},
}Talaria: Interactively Optimizing Machine Learning Models for Efficient Inference
@inproceedings {talaria,
title={Talaria: Interactively Optimizing Machine Learning Models for Efficient Inference},
author={Fred Hohman, Chaoqun Wang, Jinmook Lee, Jochen Görtler, Dominik Moritz, Jeffrey P. Bigham, Zhile Ren, Cecile Foret, Qi Shan, Xiaoyi Zhang},
year={2024},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Honolulu, Hawaii},
keywords={machine learning, compression, efficient, visualization, infovis},
award={nomination},
}2023
USB: A Unified Summarization Benchmark Across Tasks and Domains
@inproceedings {usb-summarization-benchmark,
author={Kundan Krishna, Parkhar Gupta, Sanjana Ramprasad, Byron C. Wallace, Jeffrey P. Bigham, and Zachary C. Lipton},
title={USB: A Unified Summarization Benchmark Across Tasks and Domains},
booktitle={Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP 2023)},
series={EMNLP-Findings '23},
year={2023},
location={Virtual},
publisher={ACM},
address={Singapore},
keywords={llms, summarization, benchmark, nlp},
}Never-ending Learning of User Interfaces
@inproceedings {never-ending-learning-of-uis,
author={Jason Wu, Rebecca Krosnick, Eldon Schoop, Amanda Swearngin, Jeffrey P. Bigham, and Jeffrey Nichols},
title={Never-ending Learning of User Interfaces},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2023)},
series={UIST '23},
year={2023},
location={San Francisco, CA},
publisher={ACM},
address={New York, NY, USA},
keywords={ui understanding, user interfaces, guis, machine learning},
}Latent Phrase Matching for Dysarthric Speech
@inproceddings {lpm-dysarthric,
author={Colin Lea, Dianna Yee, Jaya Narain, Zifang Huang, Lauren Tooley, Jeffrey P. Bigham, Leah Findlater},
title={Latent Phrase Matching for Dysarthric Speech},
year={2023},
keywords={speech, dsyarthric, latent phrase matching, accessibility},
booktitle={Proceedings of INTERSPEECH 2023},
location={Dublin, Ireland},
}Downstream Datasets Make Surprisingly Good Pretraining Corpora
@inproceedings {pretraining-down,
author={Kundan Krishna, Saurabh Garg, Jeffrey P. Bigham, and Zachary C. Lipton},
title={Downstream Datasets Make Surprisingly Good Pretraining Corpora},
year={2023},
keywords={pretraining, downstream, bias, safety, ml, nlp, language, summarization, summary},
booktitle={Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (ACL 2023)},
location={Toronto, Canada},
}WebUI: A Dataset for Enhancing Visual UI Understanding with Web Semantics
@inproceedings {webui,
title={WebUI: A Dataset for Enhancing Visual UI Understanding with Web Semantics},
author={Jason Wu, Siyan Wang, Siman Shen, Yi-Hao Peng, Jeffrey Nichols, and Jeffrey P. Bigham},
year={2023},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Hamburg, Germany},
keywords={ui understanding, accessibility, web},
award={nomination},
}From User Perceptions to Technical Improvement: Enabling People Who Stutter to Better Use Speech Recognition
@inproceedings {stutter-speech,
title={From User Perceptions to Technical Improvement: Enabling People Who Stutter to Better Use Speech Recognition},
author={Colin Lea, Zifang Huang, Jaya Narain, Lauren Tooley, Dianna Yee, Tien Dung Tran, Panayiotis Georgiou, Jeffrey P. Bigham, and Leah Findlater},
year={2023},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Hamburg, Germany},
keywords={speech recognition, stutter, speech, accessibility},
}Screen Correspondence: Mapping Interchangeable Elmements Between UIs
@inproceedings {screen-correspondence,
author={Jason Wu, Amanda Swearngin, Xiaoyi Zhang, Jeffrey Nichols, and Jeffrey P. Bigham},
title={Screen Correspondence: Mapping Interchangeable Elmements Between UIs},
booktitle={arxix},
series={arxiv},
year={2023},
keywords={ui understanding,machine learning,guis},
}2022
Diffscriber: Describing Visual Design Changes to Support Mixed-Ability Collaborative Presentation Authoring
@inproceedings {diffscriber,
author={Yi-Hao Peng, Jason Wu, Jeffrey P. Bigham, and Amy Pavel},
title={Diffscriber: Describing Visual Design Changes to Support Mixed-Ability Collaborative Presentation Authoring},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2022)},
series={UIST '22},
year={2022},
location={Virtual},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, slideshow, presentations, blind, non-visual},
}InstructDial: Improving Zero and Few-shot Generalization in Dialogue through Instruction Tuning
@inproceedings {instruction-tuning,
author={Prakhar Gupta, Cathy Jiao, Yi-Ting Yeh, Shikib Mehri, Maxine Eskenazi, and Jeffrey P. Bigham},
title={InstructDial: Improving Zero and Few-shot Generalization in Dialogue through Instruction Tuning},
booktitle={Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP 2022)},
series={EMNLP-Findings '22},
year={2022},
location={Virtual},
publisher={ACM},
address={Abu Dhabi, UAE},
keywords={dialog, chorus, instruction turning, nlp, language},
}Reflow: Automatically Improving Touch Interactions in Mobile Applications through Pixel-based Refinements
@misc {reflow,
author={Jason Wu, Titus Barik, Xiaoyi Zhang, Colin Lea, Jeffrey Nichols, and Jeffrey P. Bigham},
title={Reflow: Automatically Improving Touch Interactions in Mobile Applicationsthrough Pixel-based Refinements},
year={2022},
keywords={accessibility,ability-based design,abd,pixels},
}DialCrowd 2.0: A Quality-Focused Dialog System Crowdsourcing Toolkit
@misc {dialcrowd2,
author={Jessica Huynh, Ting-Rui Chiang, Jeffrey P. Bigham, and Maxine Eskenazi},
title={DialCrowd 2.0: A Quality-Focused Dialog System Crowdsourcing Toolkit},
year={2022},
keywords={crowdsourcing, dialog, data, collection, annotation, conversation, crowd, workers, quality, chorus},
}Computation Where the (inter)Action Is
@article {soundwatch-technical-perspective,
author={Jeffrey P. Bigham},
journal={Communications of the ACM},
publisher={ACM},
title={Computation Where the (inter)Action Is},
year={2022},
month={June},
volume={65},
url={https://dl.acm.org/doi/10.1145/3531446},
number={6},
doi={10.1145/3531446},
keywords={accessibility, hci, systems, machine learning},
}Target-Guided Dialogue Response Generation Using Commonsense and Data Augmentation
@inproceedings {target-guided,
title={Target-Guided Dialogue Response Generation Using Commonsense and Data Augmentation},
author={Prakhar Gupta, Harsh Jhamtani, and Jeffrey P. Bigham},
year={2022},
booktitle={Proceedings of NAACL 2022 - Findings},
location={Seattle, WA},
keywords={dialogue, response, chorus, data},
}Tech Help Desk: Support for Local Entrepreneurs Addressing the Long Tail of Computing Challenges
@inproceedings {tech-help-desk,
title={Tech Help Desk: Support for Local Entrepreneurs Addressing the Long Tail of Computing Challenges},
author={Yasmine Kotturi, Herman T. Johnson Jr., Michael Skirpan, Sarah E. Fox, Jeffrey P. Bigham, Amy Pavel},
year={2022},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={New Orleans, Louisiana},
keywords={work, techo},
}Anticipate and Adjust: Cultivating Access in Human-Centered Methods
@inproceedings {anticipate-a11y,
title={Anticipate and Adjust: Cultivating Access in Human-Centered Methods},
author={Kelly Mack, Emma J. McDonnell, Venkatesh Potluri, Maggie Xu, Jailyn Zabala, Jeffrey P. Bigham, Jennifer Mankoff, and Cynthia Bennett},
year={2022},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={New Orleans, Louisiana},
keywords={accessibility, methods, research},
award={nomination},
}Nonverbal Sound Detection for Disordered Speech
@inproceedings {nonverbal-sound-detection,
title={Nonverbal Sound Detection for Disordered Speech},
author={Colin Lea, Zifang Huang, Dhruv Jain, Lauren Tooley, Zienab Liaghat, Shrinath Thelapurath, Leah Findlater, Jeffrey P. Bigham},
year={2022},
booktitle={Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2022)},
location={Virtual, McVirtualand},
keywords={accessibility, stuttering, sound, speech, detection},
}2021
Slidecho: Flexible Non-Visual Exploration of Presentation Videos
@inproceedings {slidecho,
author={Yi-Hao Peng, Jeffrey P. Bigham, and Amy Pavel},
title={Slidecho: Flexible Non-Visual Exploration of Presentation Videos},
booktitle={Proceedings of the 23rd International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '21},
year={2021},
location={Virtual Event},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, slides, presentations},
}Aided Nonverbal Communication through Physical Expressive Objects
@inproceedings {physical-expressive-objects,
author={Stephanie Valencia, Mark Steidl, Michael L. Rivera, Cynthia L. Bennett, Jeffrey P. Bigham, Henny Admoni},
title={Aided Nonverbal Communication through Physical Expressive Objects},
booktitle={Proceedings of the 23rd International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '21},
year={2021},
location={Virtual Event},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, aac, fabrication, physical, conversation, co-design},
award={best paper},
}TutorialLens: Authoring Interactive Augmented Reality Tutorials Through Narration and Demonstration
@inproceedings {tutorial-lens,
author={Junhan Kong, Dena Sabha, Jeffrey P. Bigham, Amy Pavel, Anhong Guo},
title={TutorialLens: Authoring Interactive Augmented Reality TutorialsThrough Narration and Demonstration},
booktitle={Proceedings of the Symposium on Spatial User Interaction (SUI 2021)},
series={SUI '21},
year={2021},
location={Virtual},
publisher={ACM},
address={New York, NY, USA},
keywords={augmented reality, ar, mr, xr, mixed-reality, learning, help},
}Does Pretraining for Summarization Require Knowledge Transfer?
@inproceedings {pretraining-babble,
author={Kundan Krishna, Jeffrey P. Bigham, Zachary C. Lipton},
title={Does Pretraining for Summarization Require Knowledge Transfer?},
booktitle={Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP 2021)},
series={EMNLP-Findings '21},
year={2021},
location={Virtual},
publisher={ACM},
address={New York, NY, USA},
keywords={pretraining, summarization, nlp, language, generation, bias},
}Screen Parsing: Towards Reverse Engineering of UI Models from Screenshots
@inproceedings {screen-parsing,
author={Jason Wu, Xiaoyi Zhang, Jeffrey Nichols, Jeffrey P. Bigham},
title={Screen Parsing: Towards Reverse Engineering of UI Models from Screenshots},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2021)},
series={UIST '21},
year={2021},
location={Virtual},
publisher={ACM},
address={New York, NY, USA},
keywords={ui understanding, user interfaces, pixels},
}Analysis and Tuning of a Voice Assistant System for Dysfluent Speech
@inproceedings {dysfluent-speech,
title={Analysis and Tuning of a Voice Assistant System for Dysfluent Speech},
author={Vikramjit Mitra, Zifang Huang, Colin Lea, Lauren Tooley, Sarah Wu, Darren Botten, Ashwini Palekar, Shrinath Thelapurath, Panayiotis Georgiou, Sachin Kajarekar, Jeffrey Bigham},
year={2021},
booktitle={Proceedings of INTERSPEECH 2021},
location={Brno, Czech Republic},
keywords={speech recognition, dysfluent, asr, stuttering, atypical},
}Accessibility and The Crowded Sidewalk: Micromobility’s Impact on Public Space
@inproceedings {crowded-sidewalk,
title={Accessibility and The Crowded Sidewalk: Micromobility’s Impact on Public Space},
author={Cynthia L. Bennett, Emily E. Ackerman, Bonnie Fan, Jeffrey P. Bigham, Patrick Carrington, and Sarah E. Fox},
year={2021},
booktitle={Proceedings of the ACM Conference on Designing Interactive Systems (DIS 2021)},
location={Nowhere and Everywhere},
keywords={Micromobility, Accessibility, Activism, Governance, Public space},
award={nomination},
}Generating SOAP Notes from Doctor-Patient Conversations Using Modular Summarization Techniques
@inproceedings {generating-soap-notes,
title={Generating SOAP Notes from Doctor-Patient Conversations Using Modular Summarization Techniques},
author={Kundan Krishna, Sopan Khosla, Jeffrey P. Bigham, and Zachary C. Lipton},
year={2021},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics (ACL 2021)},
location={Virtual, McVirtualand},
keywords={summarization, nlp},
}Synthesizing Adversarial Negative Responses for Robust Response Ranking and Evaluation
@inproceedings {adversarial-negative-examples-synth,
title={Synthesizing Adversarial Negative Responses for Robust Response Ranking and Evaluation},
author={Prakhar Gupta, Yulia Tsvetkov, and Jeffrey P. Bigham},
year={2021},
booktitle={Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics - Findings (ACL-Findings 2021)},
location={Virtual, McVirtualand},
keywords={dialog, control, responses, negative, chorus},
}When Can Accessibility Help?: An Exploration of Accessibility Feature Recommendation on Mobile Devices
@inproceedings {recommending-accessibility,
title={When Can Accessibility Help?: An Exploration of Accessibility Feature Recommendation on Mobile Devices},
author={Jason Wu, Gabriel Reyes, Samuel C. White, Xiaoyi Zhang, and Jeffrey P. Bigham},
year={2021},
booktitle={Proceedings of the 18th International Web for All Conference (W4A 2021)},
location={Virtual, McVirtualand},
keywords={accessibility, recommendation, mobile},
award={best paper},
}Controlling Dialogue Generation with Semantic Exemplars
@inproceedings {controlling-dialogue,
title={Controlling Dialogue Generation with Semantic Exemplars},
author={Prakhar Gupta, Jeffrey P. Bigham, Yulia Tsvetkov, Amy Pavel},
year={2021},
booktitle={To Appear in NAACL-HLT 2021},
location={Virtual, McVirtualand},
keywords={dialogue, control},
}SEP-28K: A Dataset for Stuttering Event Detection from Podcasts with People Who Stutter
@inproceedings {sep28k,
title={SEP-28K: A Dataset for Stuttering Event Detection from Podcasts with People Who Stutter},
author={Colin Lea and Vikramjit Mitra and Aparna Joshi and Sachin Kajarekar and Jeffrey P. Bigham},
year={2021},
booktitle={Proceedings of the IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP 2021)},
location={Virtual, McVirtualand},
keywords={accessibility, stuttering, speech, detection, dataset},
}Screen Recognition: Creating Accessibility Metadata for Mobile Applications from Pixels
@inproceedings {screen-recognition,
title={Screen Recognition: Creating Accessibility Metadata for Mobile Applications from Pixels},
author={Xiaoyi Zhang and Lilian de Greef and Amanda Swearngin and Samuel White and Kyle Murray and Lisa Yu and Qi Shan and Jeffrey Nichols and Jason Wu and Chris Fleizach and Aaron Everitt and Jeffrey P. Bigham},
year={2021},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Virtual, McVirtualand},
keywords={accessibility, computer vision, cv, blind, voiceover, screen reader},
award={best paper},
}``It’s Complicated'': Negotiating Accessibility and (Mis)Representation in Image Descriptions of Race, Gender, and Disability
@inproceedings {description-representations,
title={``It’s Complicated'': Negotiating Accessibility and (Mis)Representation in ImageDescriptions of Race, Gender, and Disability},
author={Cynthia L. Bennett, Cole Gleason, Morgan Klaus Scheuerman, Jeffrey P. Bigham, Anhong Guo, and Alexandra To},
year={2021},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Virtual, McVirtualand},
keywords={accessibility, computer vision, cv, blind, voiceover, screen reader},
award={nomination},
}Say It All: Feedback for Improving Non-Visual Presentation Accessibility
@inproceedings {say-it-all,
title={Say It All: Feedback for Improving Non-Visual Presentation Accessibility},
author={Yi-Hao Peng, JiWoong Jang, Jeffrey P. Bigham, Amy Pavel},
year={2021},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
location={Virtual, McVirtualand},
keywords={accessibility, computer vision, presentation, feedback, cv, blind, voiceover, screen reader},
}Co-designing Socially Assistive Sidekicks for Motion-based AAC
@inproceedings {codesign-aac-sidekick,
title={Co-designing Socially Assistive Sidekicks for Motion-based AAC},
author={Stephanie Valencia, Michal Luria, Amy Pavel, Jefrey P. Bigham, Henny Admoni},
year={2021},
booktitle={Proceedings of the ACM/IEEE International Conference on Human-Robot Interaction},
location={Virtual, McVirtualand},
keywords={accessibility, aac, agency, robotics, sidekick, codesign, design},
award={nomination},
}2020
Predicting risk of dyslexia with an online gamified test
@article {dytective-plosone,
author={Rello, Luz AND Baeza-Yates, Ricardo AND Ali, Abdullah AND Bigham, Jeffrey P. AND Serra, Miquel},
journal={PLOS ONE},
publisher={Public Library of Science},
title={Predicting risk of dyslexia with an online gamified test},
year={2020},
month={12},
volume={15},
url={https://doi.org/10.1371/journal.pone.0241687},
pages={1-15},
abstract={Dyslexia is a specific learning disorder related to school failure. Detection is both crucial and challenging, especially in l\anguages with transparent orthographies, such as Spanish. To make detecting dyslexia easier, we designed an online gamified test and a predict\ive machine learning model. In a study with more than 3,600 participants, our model correctly detected over 80% of the participants with dysle\xia. To check the robustness of the method we tested our method using a new data set with over 1,300 participants with age customized tests in\ a different environment -a tablet instead of a desktop computer- reaching a recall of over 78% for the class with dyslexia for children 12 ye\ars old or older. Our work shows that dyslexia can be screened using a machine learning approach. An online screening tool in Spanish based on\ our methods has already been used by more than 200,000 people.},
number={12},
doi={10.1371/journal.pone.0241687},
keywords={accessibility, dyslexia, game, prediction},
}Rescribe: Authoring and Automatically Editing Audio Descriptions
@inproceedings {rescribe,
author={Amy Pavel, Gabriel Reyes, Jeffrey P. Bigham},
title={Rescribe: Authoring and Automatically Editing Audio Descriptions},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2020)},
series={UIST '20},
year={2020},
location={Minneapolis, MN},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, audio description, human-ai interaction},
}Making Mobile Augmented Reality Applications Accessible
@inproceedings {arvr-accessibility,
author={Jaylin Herskovitz, Jason Wu, Samuel White, Amy Pavel, Gabriel Reyes, Anhong Guo, and Jeffrey P. Bigham},
title={Making Mobile Augmented Reality Applications Accessible},
booktitle={Proceedings of the 22nd International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '20},
year={2020},
location={Athens, Greece},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, augmented reality, ar},
}Disability and the COVID-19 Pandemic: Using Twitter to Understand Accessibility during Rapid Societal Transition
@inproceedings {covid19-accessibility,
author={Cole Gleason, Stephanie Valencia-Valencia, Lynn Kirabo, Jason Wu, Anhong Guo, Elizabeth J. Carter, Jeffrey P. Bigham, Cynthia L. Bennett, and Amy Pavel},
title={Disability and the COVID-19 Pandemic: Using Twitter to Understand Accessibility during Rapid Societal Transition},
booktitle={Proceedings of the 22nd International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '20},
year={2020},
location={Athens, Greece},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, disability, covid19},
}Making GIFs Accessible
@inproceedings {making-gifs-accessible,
author={Cole Gleason, Amy Pavel, Himalini Gururaj, Kris M. Kitani, and Jeffrey P. Bigham},
title={Making GIFs Accessible},
booktitle={Proceedings of the 22nd International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '20},
year={2020},
location={Athens, Greece},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, gifs},
}Conversational Agency in Augmentative and Alternative Communication
@inproceedings {aac-agency,
title={Conversational Agency in Augmentative and Alternative Communication},
author={Stephanie Valencia, Amy Pavel, Jared Santa Maria, Seunga (Gloria) Yu, Jeffrey P. Bigham, and Henny Admoni},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year={2020},
location={Honolulu, HI},
keywords={aac, augmentative, alternative, communication, agency},
award={nomination},
}Twitter A11y: Making Images on Social Media Accessible
@inproceedings {twitter-a11y,
title={Twitter A11y: Making Images on Social Media Accessible},
author={Cole Gleason, Amy Pavel, Emma McCamey, Christina Low, Patrick Carrington, Kris Kitani, and Jeffrey P. Bigham},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year={2020},
location={Honolulu, HI},
keywords={images, accessibility, vizwiz, a11y, twitter},
award={nomination},
}Automated Class Discovery and One-Shot Interactions for Acoustic Activity Recognition
@inproceedings {listen-learner,
title={Automated Class Discovery and One-Shot Interactions for Acoustic Activity Recognition},
author={Jason Wu, Chris Harrison, Jeffrey P. Bigham, and Gierad Laput},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year={2020},
location={Honolulu, HI},
keywords={audio, acoustic, scribe, one-shot},
award={nomination},
}Becoming the Super Turker: Increasing Wages via a Strategy from High Earning Workers
@inproceedings {super-turker,
author={Saiph Savage, Chun Chiang, Susumu Saito, Carlos Toxtli, and Jeffrey P. Bigham},
title={Becoming the Super Turker: Increasing Wages via a Strategy from High Earning Workers},
booktitle={Proceedings of the World Wide Web Conference (WebConf 2020)},
year={2020},
location={Taiwan},
keywords={mechanical turk, crowd work, crowdsourcing},
}2019
Predicting the Working Time of Microtasks Based on Workers' Perception of Prediction Errors
@article {predicting-work-time,
author={Susumu Saito, Chun-Wei Chiang, Saiph Savage, Teppei Nakano, Tetsunori Kobayashi, and Jeffrey P. Bigham},
title={Predicting the Working Time of Microtasks Based on Workers' Perception of Prediction Errors},
journal={Human Computation},
year={2019},
pages={192-219},
keywords={crowdsourcing, work, crowd, mechanical turk, mturk},
}InstructableCrowd: Creating IF-THEN Rules for Smartphones via Conversations with the Crowd
@article {if-then-crowd,
author={Ting-Hao K. Huang, Amos Azaria, Oscar J. Romero, and Jeffrey P. Bigham},
title={InstructableCrowd: Creating IF-THEN Rules for Smartphones via Conversations with the Crowd},
journal={Human Computation},
year={2019},
pages={101-131},
keywords={chorus, smartphone, rules, crowd, crowdsourcing},
}Making Memes Accessible
@inproceedings {accessible-memes,
author={Cole Gleason, Amy Pavel, Xingyu Liu, Patrick Carrington, Lydia B. Chilton, and Jeffrey P. Bigham},
title={Making Memes Accessible},
booktitle={Proceedings of the 21st International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '19},
year={2019},
location={Pittsburgh, PA},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, memes},
}X-Ray: Screenshot Accessibility via Embedded Metadata
@inproceedings {x-ray,
author={Sujeath Pareddy, Anhong Guo, and Jeffrey P. Bigham},
title={X-Ray: Screenshot Accessibility via Embedded Metadata},
booktitle={Proceedings of the 21st International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '19},
year={2019},
location={Pittsburgh, PA},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, vizwiz},
}Investigating Evaluation of Open-Domain Dialogue Systems With Human Generated Multiple References
@inproceedings {multiple-references,
author={Prakhar Gupta, Shikib Mehri, Tiancheng Zhao, Amy Pavel, Maxine Eskenazi, and Jeffrey P. Bigham},
title={Investigating Evaluation of Open-Domain Dialogue Systems With Human Generated Multiple References},
booktitle={Proceedings of SIGDIAL 2019},
year={2019},
keywords={dialog, chorus, evaluation, machine learning, conversation},
}StateLens: A Reverse Engineering Solution for Making Existing Dynamic Touchscreens Accessible
@inproceedings {statelens,
author={Anhong Guo, Junhan Kong, Michael Rivera, Frank F. Xu, and Jeffrey P. Bigham},
title={StateLens: A Reverse Engineering Solution for Making Existing Dynamic Touchscreens Accessible},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2019)},
year={2019},
keywords={accessibility, vizwiz, vizlens, smartwatch, blind, visual assistance},
}ScratchThat: Supporting Command-Agnostic Speech Repair in Voice-Driven Assistants
@article {scratchthat,
author={Jason Wu, Karan Ahuja, Richard Li, Victor Chen, and Jeffrey P. Bigham},
title={ScratchThat: Supporting Command-Agnostic Speech Repair in Voice-Driven Assistants},
booktitle={UbiComp Journal: ACM Journal on Interactive, Mobile, Wearable and Ubiquitous Technologies},
year={2019},
location={London},
keywords={conversation, speech, dialog, chorus},
}VizWiz-Priv: A Dataset for Recognizing the Presence and Purpose of Private Visual Information in Images Taken by Blind People
@inproceedings {vizwiz-priv,
author={Danna Gurari, Qing Li, Chi Lin, Yinan Zhao, Anhong Guo, Abigale Stangl, and Jeffrey P. Bigham},
title={VizWiz-Priv: A Dataset for Recognizing the Presence and Purpose of Private Visual Information in Images Taken by Blind People},
booktitle={CVPR 2019},
year={2019},
location={Los Angeles, CA},
keywords={vizwiz, vqa, computer vision, dataset, accessibility, privacy},
}"It's almost like they're trying to hide it": How User-Provided Image Descriptions Have Failed to Make Twitter Accessible
@inproceedings {twitter-alt-text,
author={Cole Gleason, Patrick Carrington, Cameron Cassidy, Meredith Ringel Morris, Kris M. Kitani, and Jeffrey P. Bigham},
title={"It's almost like they're trying to hide it": How User-Provided Image Descriptions Have Failed to Make Twitter Accessible},
booktitle={Proceedings of the World Wide Web Conference (WebConf 2019)},
year={2019},
location={San Francisco},
keywords={accessibility, social media, twitter, alt text, image description},
}TurkScanner: Predicting the Hourly Wage of Microtasks
@inproceedings {turk-scanner,
title={TurkScanner: Predicting the Hourly Wage of Microtasks},
author={Susumu Saito, Chun-Wei Chiang, Saiph Savage, Teppai Nakano, Tetsunori Kobayashi, and Jeffrey P. Bigham},
booktitle={Proceedings of the World Wide Web Conference (WebConf 2019)},
year={2019},
location={San Francisco},
keywords={crowdsourcing, human computation, wages, work, pay rate, human augmentation},
}Worker Demographics and Earnings on Amazon Mechanical Turk: An Exploratory Analysis
@inproceedings {worker-demographics,
title={Worker Demographics and Earnings on Amazon Mechanical Turk: An Exploratory Analysis},
author={Kotaro Hara, Abigail Adams, Kristy Milland, Saiph Savage, Benjamin V. Hanrahan, Jeffrey P. Bigham, Chris Callison-Burch},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems - Extended Abstracts},
year={2019},
location={Glasgow, Scotland},
keywords={crowdsourcing, crowdworkers, crowd, demographics, workers, future of work},
}App Usage Predicts Cognitive Ability in Older Adults
@inproceedings {app-usage-older-adults,
title={App Usage Predicts Cognitive Ability in Older Adults},
author={Mitchell L. Gordon, Leon Gatys, Carlos Guestrin, Jeffrey P. Bigham, Andrew Trister, and Kayur Patel},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
year={2019},
location={Glasgow, Scotland},
keywords={cognitive ability, apps, apple, older adults},
}2018
Crowd-AI Camera Sensing in the Real World
@article {zensors2,
author={Guo, Anhong and Jain, Anuraag and Ghose, Shomiron and Laput, Gierad and Harrison, Chris and Bigham, Jeffrey P.},
title={Crowd-AI Camera Sensing in the Real World},
journal={Proc. ACM Interact. Mob. Wearable Ubiquitous Technol.},
issue_date={September 2018},
volume={2},
number={3},
month={September},
year={2018},
issn={2474-9567},
pages={111:1--111:20},
articleno={111},
numpages={20},
url={http://doi.acm.org/10.1145/3264921},
doi={10.1145/3264921},
acmid={3264921},
publisher={ACM},
address={New York, NY, USA},
keywords={Internet of things, Smart environments, camera, computer vision, crowdsourcing, deployment, human computation, machine learning, sensing},
video={https://youtu.be/3T538wzrQOM},
}Investigating Cursor-based Interactions to Support Non-Visual Exploration in the Real World
@inproceedings {nonvisual-cursors,
author={Anhong Guo, Saige McVea, Xu Wang, Patrick Clary, Ken Goldman, Yang Li, Yu Zhong, and Jeffrey P. Bigham},
title={Investigating Cursor-based Interactions to Support Non-Visual Exploration in the Real World},
booktitle={Proceedings of the 18th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '18},
year={2018},
location={Galway, Ireland},
numpages={7},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, non-visual, vizwiz},
}Exploring the Data Tracking and Sharing Preferences of Wheelchair Athletes
@inproceedings {spokesense,
author={Patrick Carrington, Gierad Laput, and Jeffrey P. Bigham},
title={Exploring the Data Tracking and Sharing Preferences ofWheelchair Athletes},
booktitle={Proceedings of the 18th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '18},
year={2018},
location={Galway, Ireland},
numpages={7},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, wheelchair, basketball},
award={nomination},
}Learning from the Front: People with Disabilities as Early Adopters of AI
@inproceedings {ai-and-hci-people-with-disabilities-as-early-adopters,
author={Jeffrey P. Bigham, and Patrick Carrington},
title={Learning from the Front: People with Disabilities as Early Adopters of AI},
booktitle={Submitted to HCIC 2018},
year={2018},
keywords={accessibility, AI, HCI},
}Crowdsourcing the Installation and Maintenance of Indoor Localization Infrastructure to Support Blind Navigation
@article {luzdeploy,
author={Cole Gleason, Dragan Ahmetovic, Saiph Savage, Carlos Toxtli, Carl Posthuma, Chieko Asakowa, Kris M. Kitani, and Jeffrey P. Bigham},
title={Crowdsourcing the Installation and Maintenance of Indoor Localization Infrastructure to Support Blind Navigation},
booktitle={UbiComp Journal: ACM Journal on Interactive, Mobile, Wearable and Ubiquitous Technologies},
year={2018},
location={Singapore},
keywords={collective action, beacons, indoor navigation, accessibility},
}Striving to Earn More: A Survey of Work Strategies and Tool Use Among Crowd Workers
@inproceedings {worker-strategies,
author={Toni Kaplan, Susumu Saito, Kotaro Hara, and Jeffrey P. Bigham},
title={Striving to Earn More: A Survey of Work Strategies and Tool Use Among Crowd Workers},
booktitle={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing (HCOMP 2018)},
year={2018},
location={Zurich},
keywords={crowdwork, crowdsourcing, strategies, human augmentation},
}VizWiz Grand Challenge: Answering Visual Questions from Blind People
@inproceedings {vizwiz-dataset,
author={Danna Gurari, Qing Li, Abigale J. Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, Jeffrey P. Bigham},
title={VizWiz Grand Challenge: Answering Visual Questions from Blind People},
booktitle={Accepted to CVPR 2018},
year={2018},
location={Salt Lake City, Utah},
keywords={vizwiz, vqa, computer vision, dataset, accessibility},
url={https://arxiv.org/abs/1802.08218},
}Vocal Programming for People with Upper-Body Motor Impairments
@inproceedings {vocal-programming,
author={Lucas Roseblatt, Patrick Carrington, Kotaro Hara, and Jeffrey P. Bigham},
title={Vocal Programming for People with Upper-Body Motor Impairments},
booktitle={Proceedings of the International Conference on Web for All (W4A 2018)},
year={2018},
location={Lyon, France},
keywords={speech recognition, speech, programming, accessibility},
}Towards Language Independent Detection of Dyslexia with a Web-based Game
@inproceedings {language-independent,
author={Maria Rauschenberger, Luz Rello, Ricardo Baeza-Yates, and Jeffrey P. Bigham},
title={Towards Language Independent Detection of Dyslexia with a Web-based Game},
booktitle={Proceedings of the International Conference on Web for All (W4A 2018)},
year={2018},
location={Lyon, France},
keywords={dyslexia, detection, children, dytective, music, accessibility},
}A Data-Driven Analysis of Workers' Earnings on Amazon Mechanical Turk
@inproceedings {crowd-earnings,
author={Kotaro Hara, Abi Adams, Kristy Milland, Saiph Savage, Chris Callison-Burch, Jeffrey P. Bigham},
title={A Data-Driven Analysis of Workers' Earnings on Amazon Mechanical Turk},
booktitle={SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '18},
year={2018},
location={Montreal, Canada},
numpages={14},
publisher={ACM},
address={New York, NY, USA},
keywords={earnings, work, crowdsourcing, microwork, data-driven},
url={https://arxiv.org/abs/1712.05796},
award={nomination},
}Evorus: A Crowd-powered Conversational Assistant Built to Automate Itself Over Time
@inproceedings {evorus,
author={Ting-Hao (Kenneth) Haung, Joseph Chee Chang, Jeffrey P. Bigham},
title={Evorus: A Crowd-powered Conversational Assistant Built to Automate Itself Over Time},
booktitle={SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '18},
year={2018},
location={Montreal, Canada},
numpages={14},
publisher={ACM},
address={New York, NY, USA},
keywords={chorus, dialog systems, conversation, crowdsourcing},
url={},
award={nomination},
}2017
Scribe: Deep Integration of Human and Machine Intelligence to Caption Speech in Real Time
@article {scribe-cacm2017,
author={Walter S. Lasecki, Christopher D. Miller, Iftekhar Naim, Raja Kushalnagar, Adam Sadilek, Daniel Gildea, andJeffrey P. Bigham},
title={Scribe: Deep Integration of Human and Machine Intelligence to Caption Speech in Real Time},
journal={Communications of the ACM},
volume={60},
number={11},
year={2017},
month={November},
keywords={crowdsourcing, scribe, speech, human computation, accessibility},
}WearMail: On-the-Go Access to Information in Your Email with a Privacy-Preserving Human Computation Workflow
@inproceedings {wearmail,
author={Saiganesh Swaminathan, Raymond Fok, Fanglin Chen, Ting-Hao (Kenneth) Huang, Irene Lin, Rohan Jadvani, Walter S. Lasecki, Jeffrey P. Bigham},
title={WearMail: On-the-Go Access to Information in Your Email with a Privacy-Preserving Human Computation Workflow},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2017)},
year={2017},
keywords={crowdsourcing, wearables, smartwatch, privacy, human computation},
}A 10-Month-Long Deployment Study of On-Demand Recruiting for Low-Latency Crowdsourcing
@inproceedings {ondemand,
author={Ting-Hao (Kenneth) Huang and Jeffrey P. Bigham},
title={A 10-Month-Long Deployment Study of On-Demand Recruiting for Low-Latency Crowdsourcing},
booktitle={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
series={HCOMP '17},
year={2017},
location={Quebec City, Canada},
numpages={10},
publisher={AAAI},
keywords={chorus, crowdsourcing, dialogue, web, retainer},
}CrowdMask: Using Crowds to Preserve Privacy in Crowd-Powered Systems via Progressive Filtering
@inproceedings {crowdmask,
author={Harmanpreet Kaur, Mitchell Gordon, Yiwei Yang, Jeffrey P. Bigham, Jaime Teevan, Ece Kamar, and Walter S. Lasecki},
title={CrowdMask: Using Crowds to Preserve Privacy in Crowd-Powered Systems via Progressive Filtering},
booktitle={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
series={HCOMP '17},
year={2017},
location={Quebec City, Canada},
numpages={10},
publisher={AAAI},
keywords={human computation, crowdsourcing, privacy},
}Introducing People with ASD to Crowd Work
@inproceedings {asd-crowdwork,
author={Kotaro Hara, and Jeffrey P. Bigham},
title={Introducing People with ASD to Crowd Work},
booktitle={Proceedings of the 17th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '17},
year={2017},
location={Baltimore, Maryland},
numpages={8},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, crowdsourcing, crowd work, work, crowdwork, autism},
}The Effects of "Not Knowing What You Don`t Know'' on Web Accessibility for Blind Web Users
@inproceedings {nkwydk,
author={Jeffrey P. Bigham, Irene Lin, and Saiph Savage},
title={The Effects of "Not Knowing What You Don`t Know'' on Web Accessibility for Blind Web Users},
booktitle={Proceedings of the 17th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '17},
year={2017},
location={Baltimore, Maryland},
numpages={8},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind, screen reader, usability},
}Good Background Colors for Readers: A Study of People with and without Dyslexia
@inproceedings {colors,
author={Luz Rello, and Jeffrey P. Bigham},
title={Good Background Colors for Readers: A Study of People with and without Dyslexia},
booktitle={Proceedings of the 17th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '17},
year={2017},
location={Baltimore, Maryland},
numpages={9},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, dyslexia, reading, colors},
}On How Deaf People Might Use Speech to Control Devices
@inproceedings {deafiot-poster,
author={Jeffrey P. Bigham, Raja Kusahlnagar, Ting-Hao Kenneth Huang, Juan Pablo Flores, Saiph Savage},
title={On How Deaf People Might Use Speech to Control Devices},
booktitle={Proceedings of the 17th International ACM SIGACCESS Conference on Computers \& Accessibility -- Posters Track},
series={ASSETS '17},
year={2017},
location={Baltimore, Maryland},
numpages={2},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, deaf, hard of hearing, dhh, crowdsourcing, human computation, speech, scribe, speech recognition},
}Real-time On-Demand Crowd-powered Entity Extraction
@inproceedings {realentity,
title={Real-time On-Demand Crowd-powered Entity Extraction},
author={Ting-Hao (Kenneth) Huang, Yun-Nung Chen, Jeffrey P. Bigham},
booktitle={Proceedings of the Collective Intelligence Conference},
series={CI '17},
year={2017},
location={New York, New York},
keywords={crowdsourcing, entity extraction, nlp},
}Audience Participation Games: Blurring the Line Between Player and Spectator
@inproceedings {apg,
author={Joseph Seering, Saiph Savage, Michael Eagle, Joshua Churchin, Rachel Moeller, Jeffrey P. Bigham, and Jessica Hammer},
title={Audience Participation Games: Blurring the Line Between Player and Spectator},
booktitle={Proceedings of the ACM Conference on Designing Interactive Systems},
series={DIS '17},
year={2017},
location={Edinburgh, Scotland},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={audience, games, crowdsourcing, apg},
}Subcontracting Microwork
@inproceedings {subcontracting-crowdwork,
author={Meredith Ringel Morris, Jeffrey P. Bigham, Robin Brewer, Jonathan Bragg, Anand Kulkarni, Jessie Li, and Saiph Savage},
title={Subcontracting Microwork},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '17},
year={2017},
location={Denver, CO},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={crowdsourcing, microwork, subcontracting, human computation, task selection, task design, task decomposition},
}Facade: Auto-generating Tactile Interfaces to Appliances
@inproceedings {facade,
author={Anhong Guo, Jeeun Kim, Xiang (Anthony) Chen, Tom Yeh, Scott E. Hudson, Jennifer Mankoff, and Jeffrey P. Bigham},
title={Facade: Auto-generating Tactile Interfaces to Appliances},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '17},
year={2017},
location={Denver, CO},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={non-visual interfaces, visually impaired, blind, accessibility, crowdsourcing, fabrication, 3d printing, computer vision, vizwiz, accessibility},
}People with Visual Impairment Training Personal Object Recognizers: Feasibility and Challenges
@inproceedings {personal-object-recognizers,
author={Hernisa Kacorri, Kris M. Kitani, Jeffrey P. Bigham, and Chieko Asakawa},
title={People with Visual Impairment Training Personal Object Recognizers: Feasibility and Challenges},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '17},
year={2017},
location={Denver, CO},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={blind, accessibility, photographs, photography, object recognition, computer vision, vizwiz, accessibility},
award={nomination},
}Leveraging Complementary Contributions of Different Workers for Efficient Crowdsourcing of Video Captions
@inproceedings {complementary-contributions,
author={Yun Huang, Yifeng Huang, Na Xue, and Jeffrey P. Bigham},
title={Leveraging Complementary Contributions of Different Workers for Efficient Crowdsourcing of Video Captions},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '17},
year={2017},
location={Denver, CO},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={video caption, crowdsourcing, complementary contributions, transcription, scribe, accessibility},
}The Crowd Work Accessibility Problem
@inproceedings {crowdwork-accessibility,
author={Saiganesh Swaminathan, Kotaro Hara, and Jeffrey P. Bigham},
title={The Crowd Work Accessibility Problem},
booktitle={Proceedings of the Web for All Conference},
series={W4A '17},
year={2017},
location={Perth, Australia},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={crowdsourcing, accessibility},
}Scopist: Building a Skill Ladder into Crowd Work
@inproceedings {scopist,
author={Jeffrey P. Bigham, Kristin Williams, Nila Banerjee, and John Zimmerman},
title={Scopist: Building a Skill Ladder into Crowd Work},
booktitle={Proceedings of the Web for All Conference},
series={W4A '17},
year={2017},
location={Perth, Australia},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={crowdsourcing, accessibility, future, scribe},
}2016
''Is there anything else I can help you with?'': Challenges in Deploying an On-Demand Crowd-Powered Conversational Agent
@inproceedings {chorus-deploy,
author={Huang, T.~H. and Lasecki, Walter S. and Azaria, A. and Bigham, J.P.},
title={''Is there anything else I can help you with?'': Challenges in Deploying an On-Demand Crowd-Powered Conversational Agent},
booktitle={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
series={HCOMP '16},
year={2016},
location={Austin, TX},
numpages={10},
publisher={AAAI},
keywords={chorus, crowdsourcing, dialogue, web, API},
}VizLens: A Robust and Interactive Screen Reader for Interfaces in the Real World
@inproceedings {vizlens,
author={Guo, A., Chen, A., Qi, H., White, S., Ghosh, S., Asakawa, C., and Bigham, J.P.},
title={VizLens: A Robust and Interactive Screen Reader for Interfaces in the Real World},
booktitle={Proceedings of the ACM Symposium on User Interface Software and Technology (UIST 2016)},
year={2016},
keywords={non-visual interfaces, visually impaired users, accessibility, crowdsourcing, computer vision, mobile devices, vizwiz},
}Manipulating Word Lattices to Incorporate Human Corrections
@inproceedings {manipulating-word-lattices,
author={Gaur, Y., Metze, F., and Bigham, J.P.},
title={Manipulating Word Lattices to Incorporate Human Corrections},
booktitle={Proceedings of INTERSPEECH},
year={2016},
keywords={asr, speech recognition, scribe, transcription, captioning},
}Questimator: Generating Knowledge Assessments for Arbitrary Topics
@inproceedings {questimator,
author={Guo, Q., Kulkarni, C., Kittur, A., Bigham, J.P., and Brunskill, E.},
title={Questimator: Generating Knowledge Assessments for Arbitrary Topics},
booktitle={Proceedings of the International Joint Conference on Artificial Intelligence (IJCAI 2016)},
address={New York, USA},
year={2016},
keywords={information extraction, assessment},
}Dytective: Diagnosing risk of dyslexia with a game
@inproceedings {dytective,
title={Dytective: Diagnosing risk of dyslexia with a game},
author={Rello, Luz and Ballesteros, Miguel and Ali, Abdullah and Serra, Miquel and Alaron, D and Bigham, Jeffrey P},
booktitle={Proceedings of Pervasive Health},
volume={16},
year={2016},
keywords={dyslexia, dytective, accessibility},
url={http://www.luzrello.com/Publications_files/PerHealth2016-Dytective.pdf},
}InstructableCrowd: Creating IF-THEN Rules via Conversations with the Crowd
@inproceedings {instructable-crowd,
author={Huang, Ting-Hao Kenneth and Azaria, Amos and Bigham, Jeffrey P.},
title={InstructableCrowd: Creating IF-THEN Rules via Conversations with the Crowd},
booktitle={Proceedings of the 2016 CHI Conference Extended Abstracts on Human Factors in Computing Systems},
series={CHI EA '16},
year={2016},
isbn={978-1-4503-4082-3},
location={Santa Clara, California, USA},
pages={1555--1562},
numpages={8},
url={http://doi.acm.org/10.1145/2851581.2892502},
doi={10.1145/2851581.2892502},
acmid={2892502},
publisher={ACM},
address={New York, NY, USA},
keywords={crowd-powered system, crowdsourcing, end-user programming, mobile, chorus},
}An Uninteresting Tour Through Why Our Research Papers Aren't Accessible
@inproceedings {pdf-accessibility,
title={An Uninteresting Tour Through Why Our Research Papers Aren't Accessible},
author={Bigham, J.P., Brady, E., Gleason, C., Guo, A., and Shamma, D.A.},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems, alt.chi},
year={2016},
location={San Jose, CA},
numpages={11},
keywords={accessibility, pdf, documents},
}An Online Chess Game Designed for People with Dyslexia
@inproceedings {dyslexia-chess,
title={An Online Chess Game Designed for People with Dyslexia},
author={Rello, L., Subirats, S., and Bigham, J.P.},
booktitle={Proceedings of the International Web for All Conference (W4A 2016)},
year={2016},
location={Montreal, Canada},
numpages={10},
keywords={accessibility, dyslexia},
}The Effects of Automatic Speech Recognition Quality on Human Transcription Latency
@inproceedings {asr-threshold,
author={Gaur, Y., Lasecki, W.S., Metze, F., and Bigham, J.P.},
title={The Effects of Automatic Speech Recognition Quality on Human Transcription Latency},
booktitle={Proceedings of the International Web for All Conference (W4A 2016)},
year={2016},
location={Montreal, Canada},
numpages={10},
keywords={accessibility, speech recognition, asr, captioning, transcription, crowdsourcing},
award={best paper},
}WearWrite: Crowd-Assisted Writing from Smartwatches
@inproceedings {wearwrite,
author={Nebeling, M., To, A., Guo, A., de Freitas, A., Teevan, J., Dow, S., and Bigham, J.P.},
title={WearWrite: Crowd-Assisted Writing from Smartwatches},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '16},
year={2016},
location={San Jose, CA},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={smartwatches, wearables, crowdsourcing, writing},
}"With most of it being pictures now, I rarely use it": Understanding Twitter's Evolving Accessibility to Blind Users
@inproceedings {twitteraccessibility,
author={Morris, M.R., Perkins, A., Yao, C., Bahram, S., Bigham, J.P., and Kane, S.K.},
title={"With most of it being pictures now, I rarely use it": Understanding Twitter's Evolving Accessibility to Blind Users},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '16},
year={2016},
location={San Jose, CA},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={social media, twitter, blindness, accessibility},
}2015
Target Acquisition and the Crowd Actor
@article {targetacquisition,
author={Bigham, J.P., Lasecki, W.S., and Bigham, J.P.},
title={Target Acquisition and the Crowd Actor},
journal={Human Computation},
volume={1},
issue={2},
pages={101-131},
doi={10.15346/hc.v1i1.2},
year={2015},
}Crowdsourcing Accessibility: Human-Powered Access Technologies
@article {crowdsourcingaccessibility,
author={Brady, E. and Bigham, J.P.},
title={Crowdsourcing Accessibility: Human-Powered Access Technologies},
journal={Foundations and Trends in Human–Computer Interaction},
volume={8},
number={4},
pages={273-372},
doi={http://dx.doi.org/10.1561/1100000050},
url={http://dx.doi.org/10.1561/1100000050},
year={2015},
}Guardian: A Crowd-Powered Spoken Dialog System for Web APIs
@inproceedings {guardian,
author={Huang, T.~H. and Lasecki, Walter S. and Bigham, J.P.},
title={Guardian: A Crowd-Powered Spoken Dialog System for Web APIs},
booktitle={Proceedings of the AAAI Conference on Human Computation and Crowdsourcing},
series={HCOMP '15},
year={2015},
location={San Diego, CA},
numpages={8},
publisher={AAAI},
keywords={chorus, crowdsourcing, dialogue, web, API},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/guardian.pdf},
}A Spellchecker for Dyslexia
@inproceedings {realcheck,
author={Rello, Luz and Ballesteros, Miguel and Bigham, Jeffrey P.},
title={A Spellchecker for Dyslexia},
booktitle={Proceedings of the 17th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '15},
year={2015},
location={Lisbon, Portugal},
numpages={8},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, dyslexia, spell checker},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/realcheck.pdf},
}Using Keyword Spotting to Help Humans Correct Captioning Faster
@inproceedings {keywordspotting,
author={Gaur, Y. and Metze, F. and Miao, Y. and Bigham, J.P.},
title={Using Keyword Spotting to Help Humans Correct Captioning Faster},
booktitle={Proceedings of INTERSPEECH 2015},
series={INTERSPEECH '15},
year={2015},
location={Dresden, Germany},
numpages={5},
publisher={INTERSPEECH},
keywords={keyword spotting, automatic speech recognition, asr, scribe, captioning, transcription},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/keywordspotting.pdf},
}WearWrite: Orchestrating the Crowd to Complete Complex Tasks from Wearables (We Wrote This Paper on a Watch)
@article {wearwrite,
author={Nebeling, M. and Guo, A. and Murray, K. and Tostengard, A. and Giannopoulos, A. and Mihajlov, M. and Dow, S. and Teevan, J. and Bigham, J.~P.},
title={WearWrite: Orchestrating the Crowd to Complete Complex Tasks from Wearables (We Wrote This Paper on a Watch)},
journal={arXiv},
volume={1508.02982},
year={2015},
month={July},
url={http://arxiv.org/pdf/1508.02982v1},
ee={http://arxiv.org/abs/1508.02982},
keywords={wearwrite, crowdsourcing, human computation, writing, wearables},
}A Plug-in to Aid Online Reading in Spanish
@inproceedings {dyslexiaplugin,
author={Rello, Luz and Carlini, Roberto and Baeza-Yates, Ricardo and Bigham, Jeffrey P.},
title={A Plug-in to Aid Online Reading in Spanish},
booktitle={Proceedings of the 12th Web for All Conference},
series={W4A '15},
year={2015},
isbn={978-1-4503-3342-9},
location={Florence, Italy},
pages={7:1--7:4},
articleno={7},
numpages={4},
url={http://doi.acm.org/10.1145/2745555.2746661},
doi={10.1145/2745555.2746661},
acmid={2746661},
publisher={ACM},
address={New York, NY, USA},
keywords={Chrome, definitions, lexical simplification, plug-in, readability, synonyms, text simplification, accessibility, dyslexia},
}Creating Accessible PDFs for Conference Proceedings
@inproceedings {accessibleconferences,
author={Brady, Erin and Zhong, Yu and Bigham, Jeffrey P.},
title={Creating Accessible PDFs for Conference Proceedings},
booktitle={Proceedings of the 12th Web for All Conference},
series={W4A '15},
year={2015},
isbn={978-1-4503-3342-9},
location={Florence, Italy},
pages={34:1--34:4},
articleno={34},
numpages={4},
url={http://doi.acm.org/10.1145/2745555.2746665},
doi={10.1145/2745555.2746665},
acmid={2746665},
publisher={ACM},
address={New York, NY, USA},
}Measuring Text Simplification with the Crowd
@inproceedings {measuringsimplicity,
author={Lasecki, Walter S. and Rello, Luz and Bigham, Jeffrey P.},
title={Measuring Text Simplification with the Crowd},
booktitle={Proceedings of the 12th Web for All Conference},
series={W4A '15},
year={2015},
isbn={978-1-4503-3342-9},
location={Florence, Italy},
pages={4:1--4:9},
articleno={4},
numpages={9},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/measuringsimplicity.pdf},
doi={10.1145/2745555.2746658},
acmid={2746658},
publisher={ACM},
address={New York, NY, USA},
keywords={NLP, accessibility, crowdsourcing, text simplification},
}CAN: Composable Accessibility Infrastructure via Data-driven Crowdsourcing
@inproceedings {can,
author={Huang, Yun and Dobreski, Brian and Deo, Bijay Bhaskar and Xin, Jiahang and Barbosa, Nat\~{a} Miccael and Wang, Yang and Bigham, Jeffrey P.},
title={CAN: Composable Accessibility Infrastructure via Data-driven Crowdsourcing},
booktitle={Proceedings of the 12th Web for All Conference},
series={W4A '15},
year={2015},
isbn={978-1-4503-3342-9},
location={Florence, Italy},
pages={2:1--2:10},
articleno={2},
numpages={10},
url={http://doi.acm.org/10.1145/2745555.2746651},
doi={10.1145/2745555.2746651},
acmid={2746651},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, crowdsourcing, web, end user},
}Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping
@inproceedings {steadytapping,
author={Zhong, Yu and Weber, Astrid and Burkhardt, Casey and Weaver, Phil and Bigham, Jeffrey P.},
title={Enhancing Android Accessibility for Users with Hand Tremor by Reducing Fine Pointing and Steady Tapping},
booktitle={Proceedings of the 12th Web for All Conference},
series={W4A '15},
year={2015},
isbn={978-1-4503-3342-9},
location={Florence, Italy},
pages={29:1--29:10},
articleno={29},
numpages={10},
url={http://doi.acm.org/10.1145/2745555.2747277},
doi={10.1145/2745555.2747277},
acmid={2747277},
publisher={ACM},
address={New York, NY, USA},
keywords={Android, accessibility, disambiguation, fine pointing, magnification, motor space, steady tapping, visual space},
award={nomination},
}Gauging Receptiveness to Social Microvolunteering
Zensors: Adaptive, Rapidly Deployable, Human-Intelligent Sensor Feeds
@inproceedings {zensors,
author={Laput, G. and Lasecki, W.~S. and Wiese, J. and Xiao, R. and Bigham, J.~P. and Harrison, C.},
title={Zensors: Adaptive, Rapidly Deployable, Human-Intelligent Sensor Feeds},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={smart environments, sensing, human computation, computer vision, machine learning, end-user programming},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/zensors.pdf},
movie={https://www.youtube.com/watch?v=VVP9emuFsQI},
}Apparition: Crowdsourced User Interfaces That Come To Life As You Sketch Them
@inproceedings {apparition,
author={Lasecki, W.~S. and Kim, J. and Rafter, N. and Sen, O. and Bigham, J.~P. and Bernstein, M.~S.},
title={Apparition: Crowdsourced User Interfaces That Come To Life As You Sketch Them},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={Rapid prototyping, crowdsourcing, human computation},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/apparition.pdf},
award={nomination},
}RegionSpeak: Quick Comprehensive Spatial Descriptions of Complex Images for Blind Users
@inproceedings {regionspeak,
author={Zhong, Y. and Lasecki, W.~S. and Brady, E. and Bigham, Jeffrey P.},
title={RegionSpeak: Quick Comprehensive Spatial Descriptions of Complex Images for Blind Users},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems - Works-in-Progress},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
pages={2353--2362},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={visual questions, crowdsourcing, stitching, accessibility, computer vision, vizwiz},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/regionspeak.pdf},
}The Effects of Sequence and Delay on Crowd Work
@inproceedings {sequenceanddelay,
author={Lasecki, W.~S. and Rzeszotarski, J.~M. and Marcus, A. and Bigham, J.~P.},
title={The Effects of Sequence and Delay on Crowd Work},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={Crowdsourcing, human computation, workflows, continuity, interruptions, efficiency},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/sequenceanddelay.pdf},
}Exploring Privacy and Accuracy Trade-Offs in Crowdsourced Behavioral Video Coding
@inproceedings {privacy_accuracy,
author={Lasecki, W.~S. and Gordon, M. and Leung, W. and Lim, E. and Bigham, Jeffrey P. and Dow, S.~P.},
title={Exploring Privacy and Accuracy Trade-Offs in Crowdsourced Behavioral Video Coding},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems - Works-in-Progress},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
numpages={6},
publisher={ACM},
address={New York, NY, USA},
keywords={data analysis, subjective coding, crowdsourcing, video},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/privacy_accuracy.pdf},
}ApplianceReader: A Wearable, Crowdsourced, Vision-based System to Make Appliances Accessible
@inproceedings {appliancereader,
author={Guo, Anhong and Chen, Xiang 'Anthony' and Bigham, Jeffrey P.},
title={ApplianceReader: A Wearable, Crowdsourced, Vision-based System to Make Appliances Accessible},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems - Works-in-Progress},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
numpages={6},
publisher={ACM},
address={New York, NY, USA},
keywords={Non-visual interfaces, visually impaired, blind, accessibility, crowdsourcing, computer vision, wearable computers, vizwiz},
url={http://www.cs.cmu.edu/~jbigham/pubs/pdfs/2015/appliancereader.pdf},
}Accessible Crowdwork? Understanding the Value in and Challenge of Microtask Employment for People with Disabilities
@inproceedings {accessiblecrowdwork,
abstract={We present the first formal study of crowdworkers who have disabilities via in-depth open-ended interviews of 17 people (disabled crowdworkers and job coaches for people with disabilities) and a survey of 631 adults with disabilities. Our findings establish that people with a variety of disabilities currently participate in the crowd labor marketplace, despite challenges such as crowdsourcing workflow designs that inadvertently prohibit participation by, and may negatively affect the worker reputations of, people with disabilities. Despite such challenges, we find that crowdwork potentially offers different opportunities for people with disabilities relative to the normative office environment, such as job flexibility and lack of a need to rely on public transit. We close by identifying several ways in which crowd labor platform operators and/or individual task requestors could improve the accessibility of this increasingly important form of employment.},
author={Kathryn Zyskowski and Meredith Ringel Morris and Jeffrey P. Bigham and Mary L. Gray and Shaun Kane},
month={March},
publisher={ACM – Association for Computing Machinery},
title={Accessible Crowdwork? Understanding the Value in and Challenge of Microtask Employment for People with Disabilities},
url={http://research.microsoft.com/apps/pubs/default.aspx?id=228714},
year={2015},
}Human-Computer Interaction and Collective Intelligence
@chapter {hciandci,
author={Bigham, Jeffrey. P. and Bernstein, Michael and Adar, Eytan},
title={Human-Computer Interaction and Collective Intelligence},
booktitle={Collective Intelligence Handbook},
publisher={MIT Press},
year={2015},
url={https://docs.google.com/file/d/0B4-bDrtyS3lXdFJIRXk2bGdpQzg/edit},
}2014
Architecting Real-Time Crowd-Powered Systems
@article {realtimecrowdarchitecture,
author={Lasecki, Walter S., Homan, Chris, and Jeffrey P. Bigham},
title={Architecting Real-Time Crowd-Powered Systems},
journal={Human Computation Journal},
month={September},
year={2014},
}Making the Web Easier to See with Opportunistic Accessibility Improvement
@inproceedings {oppaccess,
author={Bigham, Jeffrey P.},
title={Making the Web Easier to See with Opportunistic Accessibility Improvement},
booktitle={Proceedings of the 27th Annual ACM Symposium on User Interface Software and Technology},
series={UIST '14},
year={2014},
isbn={978-1-4503-3069-5},
location={Honolulu, Hawaii, USA},
pages={117--122},
numpages={6},
url={http://doi.acm.org/10.1145/2642918.2647357},
doi={10.1145/2642918.2647357},
acmid={2647357},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, low vision, magnification, zoom},
movie={https://www.youtube.com/watch?v=SSvy66-og5s},
}Glance: Rapidly Coding Behavioral Video with the Crowd
@inproceedings {glance,
author={Lasecki, Walter S. and Gordon, Mitchell and Koutra, Danai and Jung, Malte F. and Dow, Steven P. and Bigham, Jeffrey P.},
title={Glance: Rapidly Coding Behavioral Video with the Crowd},
booktitle={Proceedings of the 27th Annual ACM Symposium on User Interface Software and Technology},
series={UIST '14},
year={2014},
isbn={978-1-4503-3069-5},
location={Honolulu, Hawaii, USA},
pages={551--562},
numpages={12},
url={http://doi.acm.org/10.1145/2642918.2647367},
doi={10.1145/2642918.2647367},
acmid={2647367},
publisher={ACM},
address={New York, NY, USA},
keywords={crowdsourcing, data analysis, subjective coding, video},
}How Companies Engage Customers Around Accessibility on Social Media
@inproceedings {companiessocialmedia,
author={Brady, Erin and Bigham, Jeffrey P.},
title={How Companies Engage Customers Around Accessibility on Social Media},
booktitle={Proceedings of the 16th International ACM SIGACCESS Conference on Computers \& Accessibility},
series={ASSETS '14},
year={2014},
isbn={978-1-4503-2720-6},
location={Rochester, New York, USA},
pages={51--58},
numpages={8},
url={http://doi.acm.org/10.1145/2661334.2661355},
doi={10.1145/2661334.2661355},
acmid={2661355},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, corporations, social media, twitter},
}Crowdsourcing Medical Expertise in Near Realtime
@article {chirp,
author={Sims, M. and Bigham, Jeffrey P. and Kautz, Henry and Halterman, Mark W.},
title={Crowdsourcing Medical Expertise in Near Realtime},
journal={Journal of Hospital Medicine},
abstract={Given the pace of discovery in medicine, accessing the literature to make informed decisions at the point of care has become increasingly difficult. Although the Internet creates unprecedented access to information, gaps in the medical literature and inefficient searches often leave healthcare providers' questions unanswered. Advances in social computation and human computer interactions offer a potential solution to this problem. We developed and piloted the mobile application DocCHIRP, which uses a system of point-to-multipoint push notifications designed to help providers problem solve by crowdsourcing from their peers. Over the 244-day pilot period, 85 registered users logged 1544 page views and sent 45 consult questions. The median initial first response from the crowd occurred within 19 minutes. Review of the transcripts revealed several dominant themes, including complex medical decision making and inquiries related to prescription medication use. Feedback from the post-trial survey identified potential hurdles related to medical crowdsourcing, including a reluctance to expose personal knowledge gaps and the potential risk for "distracted doctoring." Users also suggested program modifications that could support future adoption, including changes to the mobile interface and mechanisms that could expand the crowd of participating healthcare providers.},
month={April},
day={17},
year={2014},
url={http://www.ncbi.nlm.nih.gov/pubmed/24740747},
fixedicon={https://www.cs.cmu.edu/~jbigham/pubs/icons/chirp.png},
}JustSpeak: Enabling Universal Voice Control on Android
@inproceedings {justspeak,
author={Zhong, Yu and Raman, T. V. and Burkhardt, Casey and Biadsy, Fadi and Bigham, Jeffrey P.},
title={JustSpeak: Enabling Universal Voice Control on Android},
booktitle={Proceedings of the 11th Web for All Conference},
series={W4A '14},
year={2014},
isbn={978-1-4503-2651-3},
location={Seoul, Korea},
pages={36:1--36:4},
articleno={36},
numpages={4},
url={http://doi.acm.org/10.1145/2596695.2596720},
doi={10.1145/2596695.2596720},
acmid={2596720},
publisher={ACM},
address={New York, NY, USA},
keywords={Android, accessibility, mobile, universal voice control},
}Helping Students Keep Up with Real-time Captions by Pausing and Highlighting
@inproceedings {pausinghighlighting,
author={Lasecki, Walter S. and Kushalnagar, Raja and Bigham, Jeffrey P.},
title={Helping Students Keep Up with Real-time Captions by Pausing and Highlighting},
booktitle={Proceedings of the 11th Web for All Conference},
series={W4A '14},
year={2014},
isbn={978-1-4503-2651-3},
location={Seoul, Korea},
pages={39:1--39:8},
articleno={39},
numpages={8},
url={http://doi.acm.org/10.1145/2596695.2596701},
doi={10.1145/2596695.2596701},
acmid={2596701},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, caption readability, human factors, inclusive classrooms, real-time captioning},
award={best paper},
}Introducing Shared Control to Existing Video Games
@inproceedings {wegame,
author={Loparev, Anna, and Lasecki, Walter S. and Murray, Kyle I., and Bigham, Jeffrey P.},
title={Introducing Shared Control to Existing Video Games},
booktitle={Proceedings of the Foundations of Digital Games (FDG 2014)},
location={Ft. Lauderdale, Florida, USA},
year={2014},
}Crowd Storage: Storing Information on Existing Memories
@inproceedings {crowdstorage,
author={Bigham, Jeffrey P. and Lasecki, Walter S.},
title={Crowd Storage: Storing Information on Existing Memories},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '14},
year={2014},
isbn={978-1-4503-2473-1},
location={Toronto, Ontario, Canada},
pages={601--604},
numpages={4},
url={http://doi.acm.org/10.1145/2556288.2557159},
doi={10.1145/2556288.2557159},
acmid={2557159},
publisher={ACM},
address={New York, NY, USA},
keywords={crowdsourcing, memory, storage},
movie={https://www.youtube.com/watch?v=TNbitv35P8A},
}Finding Dependencies Between Actions Using the Crowd
@inproceedings {actiondependencies,
author={Lasecki, Walter S. and Weingard, Leon and Ferguson, George and Bigham, Jeffrey P.},
title={Finding Dependencies Between Actions Using the Crowd},
booktitle={Proceedings of the 32Nd Annual ACM Conference on Human Factors in Computing Systems},
series={CHI '14},
year={2014},
isbn={978-1-4503-2473-1},
location={Toronto, Ontario, Canada},
pages={3095--3098},
numpages={4},
url={http://doi.acm.org/10.1145/2556288.2557176},
doi={10.1145/2556288.2557176},
acmid={2557176},
publisher={ACM},
address={New York, NY, USA},
keywords={activity recognition, constraint finding, crowdsourcing},
}Tracking @Stemxcomet: Teaching Programming to Blind Students via 3D Printing, Crisis Management, and Twitter
@inproceedings {stemx,
author={Kane, Shaun K. and Bigham, Jeffrey P.},
title={Tracking @Stemxcomet: Teaching Programming to Blind Students via 3D Printing, Crisis Management, and Twitter},
booktitle={Proceedings of the 45th ACM Technical Symposium on Computer Science Education},
series={SIGCSE '14},
year={2014},
isbn={978-1-4503-2605-6},
location={Atlanta, Georgia, USA},
pages={247--252},
numpages={6},
url={http://doi.acm.org/10.1145/2538862.2538975},
doi={10.1145/2538862.2538975},
acmid={2538975},
publisher={ACM},
address={New York, NY, USA},
keywords={3D printing, accessibility, crisis informatics, education, fabrication, programming, visual impairments},
}Accessibility Evaluation of Classroom Captions
@article {classroomcaptions,
author={Kushalnagar, Raja S. and Lasecki, Walter S. and Bigham, Jeffrey P.},
title={Accessibility Evaluation of Classroom Captions},
journal={ACM Trans. Access. Comput.},
issue_date={January 2014},
volume={5},
number={3},
month={January},
year={2014},
issn={1936-7228},
pages={7:1--7:24},
articleno={7},
numpages={24},
url={http://doi.acm.org/10.1145/2543578},
doi={10.1145/2543578},
acmid={2543578},
publisher={ACM},
address={New York, NY, USA},
keywords={Real-time captioning, crowdsourcing, deaf, hard of hearing, accessibility},
}2013
Real Time Object Scanning Using a Mobile Phone and Cloud-based Visual Search Engine
@inproceedings {objectscanning,
author={Zhong, Yu and Garrigues, Pierre J. and Bigham, Jeffrey P.},
title={Real Time Object Scanning Using a Mobile Phone and Cloud-based Visual Search Engine},
booktitle={Proceedings of the 15th International ACM SIGACCESS Conference on Computers and Accessibility},
series={ASSETS '13},
year={2013},
isbn={978-1-4503-2405-2},
location={Bellevue, Washington},
pages={20:1--20:8},
articleno={20},
numpages={8},
url={http://doi.acm.org/10.1145/2513383.2513443},
doi={10.1145/2513383.2513443},
acmid={2513443},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind user, mobile, real time object scanning, vizwiz, computer vision},
}Answering Visual Questions with Conversational Crowd Assistants
@inproceedings {view,
author={Lasecki, Walter S. and Thiha, Phyo and Zhong, Yu and Brady, Erin and Bigham, Jeffrey P.},
title={Answering Visual Questions with Conversational Crowd Assistants},
booktitle={Proceedings of the 15th International ACM SIGACCESS Conference on Computers and Accessibility},
series={ASSETS '13},
year={2013},
isbn={978-1-4503-2405-2},
location={Bellevue, Washington},
pages={18:1--18:8},
articleno={18},
numpages={8},
url={http://doi.acm.org/10.1145/2513383.2517033},
doi={10.1145/2513383.2517033},
acmid={2517033},
publisher={ACM},
address={New York, NY, USA},
keywords={assistive technology, crowdsourcing, human computation, accessibility, chorus, vizwiz},
}Chorus: A Crowd-powered Conversational Assistant
@inproceedings {chorus,
author={Lasecki, Walter S. and Wesley, Rachel and Nichols, Jeffrey and Kulkarni, Anand and Allen, James F. and Bigham, Jeffrey P.},
title={Chorus: A Crowd-powered Conversational Assistant},
booktitle={Proceedings of the 26th Annual ACM Symposium on User Interface Software and Technology},
series={UIST '13},
year={2013},
isbn={978-1-4503-2268-3},
location={St. Andrews, Scotland, United Kingdom},
pages={151--162},
numpages={12},
url={http://doi.acm.org/10.1145/2501988.2502057},
doi={10.1145/2501988.2502057},
acmid={2502057},
publisher={ACM},
address={New York, NY, USA},
keywords={conversational assistants, crowd-powered systems, crowdsourcing, dialog systems, human computation, chorus},
}Text Alignment for Real-Time Crowd Captioning.
@inproceedings {naim2013text,
title={Text Alignment for Real-Time Crowd Captioning.},
author={Naim, Iftekhar and Gildea, Daniel and Lasecki, Walter S. and Bigham, Jeffrey P.},
booktitle={Proceedings of the North American Chapter of the Association for Computational Linguistics Conference (NAACL 2013)},
pages={201--210},
year={2013},
keywords={nlp, multiple sequence alignment, msa, alignment, scribe, captioning},
}Captions Versus Transcripts for Online Video Content
@inproceedings {captionvstranscripts,
author={Kushalnagar, Raja S. and Lasecki, Walter S. and Bigham, Jeffrey P.},
title={Captions Versus Transcripts for Online Video Content},
booktitle={Proceedings of the 10th International Cross-Disciplinary Conference on Web Accessibility},
series={W4A '13},
year={2013},
isbn={978-1-4503-1844-0},
location={Rio de Janeiro, Brazil},
pages={32:1--32:4},
articleno={32},
numpages={4},
url={http://doi.acm.org/10.1145/2461121.2461142},
doi={10.1145/2461121.2461142},
acmid={2461142},
publisher={ACM},
address={New York, NY, USA},
keywords={captions, deaf education, online education, transcripts},
}Warping Time for More Effective Real-time Crowdsourcing
@inproceedings {timewarp,
author={Lasecki, Walter S. and Miller, Christopher D. and Bigham, Jeffrey P.},
title={Warping Time for More Effective Real-time Crowdsourcing},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '13},
year={2013},
isbn={978-1-4503-1899-0},
location={Paris, France},
pages={2033--2036},
numpages={4},
url={http://doi.acm.org/10.1145/2470654.2466269},
doi={10.1145/2470654.2466269},
acmid={2466269},
publisher={ACM},
address={New York, NY, USA},
keywords={captioning, human computation, real-time crowdsourcing, scribe},
award={nomination},
}Visual Challenges in the Everyday Lives of Blind People
@inproceedings {visualchallenges,
author={Brady, Erin and Morris, Meredith Ringel and Zhong, Yu and White, Samuel and Bigham, Jeffrey P.},
title={Visual Challenges in the Everyday Lives of Blind People},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '13},
year={2013},
isbn={978-1-4503-1899-0},
location={Paris, France},
pages={2117--2126},
numpages={10},
url={http://doi.acm.org/10.1145/2470654.2481291},
doi={10.1145/2470654.2481291},
acmid={2481291},
publisher={ACM},
address={New York, NY, USA},
keywords={accessibility, blind users, crowdsourcing, mobile, questions, accessibility, vizwiz},
}Mechanical Turk is Not Anonymous
@inproceedings {not-anonymous,
author={Lease, Matthew and Hullman, Jessica and Bigham, Jeffrey and Bernstein, Michael and Kim, Juho and Lasecki, Walter and Bakhshi, Saeideh and Mitra, Tanushree and Miller, Robert},
title={Mechanical Turk is Not Anonymous},
booktitle={SSRN},
series={SSRN},
year={2013},
url={http://dx.doi.org/10.2139/ssrn.2228728},
keywords={crowdsourcing, human computation, mechanical turk, anonymous, work, gig work},
}Real-time Crowd Labeling for Deployable Activity Recognition
@inproceedings {legionar,
author={Lasecki, Walter S. and Song, Young Chol and Kautz, Henry and Bigham, Jeffrey P.},
title={Real-time Crowd Labeling for Deployable Activity Recognition},
booktitle={Proceedings of the 2013 Conference on Computer Supported Cooperative Work},
series={CSCW '13},
year={2013},
isbn={978-1-4503-1331-5},
location={San Antonio, Texas, USA},
pages={1203--1212},
numpages={10},
url={http://doi.acm.org/10.1145/2441776.2441912},
doi={10.1145/2441776.2441912},
acmid={2441912},
publisher={ACM},
address={New York, NY, USA},
keywords={activity recognition, crowdsourcing, human computation},
}Investigating the Appropriateness of Social Network Question Asking As a Resource for Blind Users
@inproceedings {socialnetworkappropriateness,
author={Brady, Erin L. and Zhong, Yu and Morris, Meredith Ringel and Bigham, Jeffrey P.},
title={Investigating the Appropriateness of Social Network Question Asking As a Resource for Blind Users},
booktitle={Proceedings of the 2013 Conference on Computer Supported Cooperative Work},
series={CSCW '13},
year={2013},
isbn={978-1-4503-1331-5},
location={San Antonio, Texas, USA},
pages={1225--1236},
numpages={12},
url={http://doi.acm.org/10.1145/2441776.2441915},
doi={10.1145/2441776.2441915},
acmid={2441915},
publisher={ACM},
address={New York, NY, USA},
keywords={blind users, friendsourcing, social networks, visual impairment, vizwiz, accessibility},
}2012
Real-time Captioning by Groups of Non-experts
@inproceedings {scribe,
author={Lasecki, Walter and Miller, Christopher and Sadilek, Adam and Abumoussa, Andrew and Borrello, Donato and Kushalnagar, Raja and Bigham, Jeffrey},
title={Real-time Captioning by Groups of Non-experts},
booktitle={Proceedings of the 25th Annual ACM Symposium on User Interface Software and Technology},
series={UIST '12},
year={2012},
isbn={978-1-4503-1580-7},
location={Cambridge, Massachusetts, USA},
pages={23--34},
numpages={12},
url={http://doi.acm.org/10.1145/2380116.2380122},
doi={10.1145/2380116.2380122},
acmid={2380122},
publisher={ACM},
address={New York, NY, USA},
keywords={captioning, crowdsourcing, deaf, hard of hearing, real-time, text alignment, transcription, accessibility, scribe},
award={nomination},
}Crowdsourcing Subjective Fashion Advice Using VizWiz: Challenges and Opportunities
@inproceedings {vizwiz-fashion,
author={Burton, Michele A. and Brady, Erin and Brewer, Robin and Neylan, Callie and Bigham, Jeffrey P. and Hurst, Amy},
title={Crowdsourcing Subjective Fashion Advice Using VizWiz: Challenges and Opportunities},
booktitle={Proceedings of the 14th International ACM SIGACCESS Conference on Computers and Accessibility},
series={ASSETS '12},
year={2012},
isbn={978-1-4503-1321-6},
location={Boulder, Colorado, USA},
pages={135--142},
numpages={8},
url={http://doi.acm.org/10.1145/2384916.2384941},
doi={10.1145/2384916.2384941},
acmid={2384941},
publisher={ACM},
address={New York, NY, USA},
keywords={blind users, crowdsourcing, fashion, vizwiz},
}Crowd Memory: Learning in the Collective
@article {crowdmemory,
author={Walter S. Lasecki and Samuel White and Kyle I. Murray and Jeffrey P. Bigham},
title={Crowd Memory: Learning in the Collective},
journal={Collective Intelligence},
volume={abs/1204.3678},
year={2012},
url={http://arxiv.org/abs/1204.3678},
timestamp={Wed, 10 Oct 2012 21:28:50 +0200},
biburl={http://dblp.uni-trier.de/rec/bib/journals/corr/abs-1204-3678},
bibsource={dblp computer science bibliography, http://dblp.org},
}Finding Your Friends and Following Them to Where You Are
@inproceedings {findingyourfriends,
author={Sadilek, Adam and Kautz, Henry and Bigham, Jeffrey P.},
title={Finding Your Friends and Following Them to Where You Are},
booktitle={Proceedings of the Fifth ACM International Conference on Web Search and Data Mining},
series={WSDM '12},
year={2012},
isbn={978-1-4503-0747-5},
location={Seattle, Washington, USA},
pages={723--732},
numpages={10},
url={http://doi.acm.org/10.1145/2124295.2124380},
doi={10.1145/2124295.2124380},
acmid={2124380},
publisher={ACM},
address={New York, NY, USA},
keywords={graphical models, link prediction, location modeling, machine learning, social networks, visualization},
award={best paper},
}2011
The Design of Human-powered Access Technology
@inproceedings {designofat,
author={Bigham, Jeffrey P. and Ladner, Richard E. and Borodin, Yevgen},
title={The Design of Human-powered Access Technology},
booktitle={The Proceedings of the 13th International ACM SIGACCESS Conference on Computers and Accessibility},
series={ASSETS '11},
year={2011},
isbn={978-1-4503-0920-2},
location={Dundee, Scotland, UK},
pages={3--10},
numpages={8},
url={http://doi.acm.org/10.1145/2049536.2049540},
doi={10.1145/2049536.2049540},
acmid={2049540},
publisher={ACM},
address={New York, NY, USA},
keywords={access technology, crowdsourcing, human computation},
}Supporting Blind Photography
@inproceedings {blindphotography,
author={Jayant, Chandrika and Ji, Hanjie and White, Samuel and Bigham, Jeffrey P.},
title={Supporting Blind Photography},
booktitle={The Proceedings of the 13th International ACM SIGACCESS Conference on Computers and Accessibility},
series={ASSETS '11},
year={2011},
isbn={978-1-4503-0920-2},
location={Dundee, Scotland, UK},
pages={203--210},
numpages={8},
url={http://doi.acm.org/10.1145/2049536.2049573},
doi={10.1145/2049536.2049573},
acmid={2049573},
publisher={ACM},
address={New York, NY, USA},
keywords={blind, camera, photography, visually impaired},
}Real-time Crowd Control of Existing Interfaces
@inproceedings {legion,
author={Lasecki, Walter S. and Murray, Kyle I. and White, Samuel and Miller, Robert C. and Bigham, Jeffrey P.},
title={Real-time Crowd Control of Existing Interfaces},
booktitle={Proceedings of the 24th Annual ACM Symposium on User Interface Software and Technology},
series={UIST '11},
year={2011},
isbn={978-1-4503-0716-1},
location={Santa Barbara, California, USA},
pages={23--32},
numpages={10},
url={http://doi.acm.org/10.1145/2047196.2047200},
doi={10.1145/2047196.2047200},
acmid={2047200},
publisher={ACM},
address={New York, NY, USA},
keywords={crowdsourcing, real-time crowd control, real-time human computation, remote control},
}Multimodal Summarization of Complex Sentences
@inproceedings {multimodal_summarization,
author={UzZaman, Naushad and Bigham, Jeffrey P. and Allen, James F.},
title={Multimodal Summarization of Complex Sentences},
booktitle={Proceedings of the 16th International Conference on Intelligent User Interfaces},
series={IUI '11},
year={2011},
isbn={978-1-4503-0419-1},
location={Palo Alto, CA, USA},
pages={43--52},
numpages={10},
url={http://doi.acm.org/10.1145/1943403.1943412},
doi={10.1145/1943403.1943412},
acmid={1943412},
publisher={ACM},
address={New York, NY, USA},
keywords={AAC, MMS, ROC MMS, augmentative and alternative communication, automatic illustration, illustration, multimodal summarization, pictorial representation, picture, sentence compression, summarization, text-to-picture, visualization},
}2010
VizWiz: Nearly Real-time Answers to Visual Questions
@inproceedings {vizwiz,
author={Bigham, Jeffrey P. and Jayant, Chandrika and Ji, Hanjie and Little, Greg and Miller, Andrew and Miller, Robert C. and Miller, Robin and Tatarowicz, Aubrey and White, Brandyn and White, Samual and Yeh, Tom},
title={VizWiz: Nearly Real-time Answers to Visual Questions},
booktitle={Proceedings of the 23Nd Annual ACM Symposium on User Interface Software and Technology},
series={UIST '10},
year={2010},
isbn={978-1-4503-0271-5},
location={New York, New York, USA},
pages={333--342},
numpages={10},
url={http://doi.acm.org/10.1145/1866029.1866080},
doi={10.1145/1866029.1866080},
acmid={1866080},
publisher={ACM},
address={New York, NY, USA},
keywords={blind users, non-visual interfaces, crowdsourcing, human computation, accessibility},
award={best paper},
}A Conversational Interface to Web Automation
@inproceedings {conversational-web-automation,
author={Lau, Tessa and Cerruti, Julian and Manzato, Guillermo and Bengualid, Mateo and Bigham, Jeffrey P. and Nichols, Jeffrey},
title={A Conversational Interface to Web Automation},
year={2010},
isbn={9781450302715},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1866029.1866067},
doi={10.1145/1866029.1866067},
abstract={This paper presents CoCo, a system that automates web tasks on a user's behalf throughan interactive conversational interface. Given a short command such as "get road conditionsfor highway 88," CoCo synthesizes a plan to accomplish the task, executes it on theweb, extracts an informative response, and returns the result to the user as a snippetof text. A novel aspect of our approach is that we leverage a repository of previouslyrecorded web scripts and the user's personal web browsing history to determine howto complete each requested task. This paper describes the design and implementationof our system, along with the results of a brief user study that evaluates how likelyusers are to understand what CoCo does for them.},
booktitle={Proceedings of the 23nd Annual ACM Symposium on User Interface Software and Technology},
pages={229–238},
numpages={10},
keywords={intelligent assistants, automation, natural language interfaces},
location={New York, New York, USA},
series={UIST '10},
}Accessibility by Demonstration: Enabling End Users to Guide Developers to Web Accessibility Solutions
@inproceedings {accessibility-by-demonstration,
author={Bigham, Jeffrey P. and Brudvik, Jeremy T. and Zhang, Bernie},
title={Accessibility by Demonstration: Enabling End Users to Guide Developers to Web Accessibility Solutions},
year={2010},
isbn={9781605588810},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1878803.1878812},
doi={10.1145/1878803.1878812},
abstract={Few web developers have been explicitly trained to create accessible web pages, andare unlikely to recognize subtle accessibility and usability concerns that disabledpeople face. Evaluating web pages with assistive technology can reveal problems, butthis software takes time to install and its complexity can be overwhelming. To addressthese problems, we introduce a new approach for accessibility evaluation called Accessibilityby Demonstration (ABD). ABD lets assistive technology users retroactively record accessibilityproblems at the time they experience them as human-readable macros and easily sendthose recordings and the software necessary to replay them to others. This paper describesan implementation of ABD as an extension to the WebAnywhere screen reader, and presentsan evaluation with 15 web developers not experienced with accessibility showing thatinteracting with these recordings helped them understand and fix some subtle accessibilityproblems better than existing tools.},
booktitle={Proceedings of the 12th International ACM SIGACCESS Conference on Computers and Accessibility},
pages={35–42},
numpages={8},
keywords={blind users, web usability, evaluation, web accessibility},
location={Orlando, Florida, USA},
series={ASSETS '10},
}WebTrax: visualizing non-visual web interactions
@inproceedings {webtrax,
title={WebTrax: visualizing non-visual web interactions},
author={Bigham, Jeffrey P and Murray, Kyle},
booktitle={International Conference on Computers for Handicapped Persons},
pages={346--353},
year={2010},
location={Vienna, Austria},
organization={Springer},
}More than Meets the Eye: A Survey of Screen-Reader Browsing Strategies
@inproceedings {screen-reader-strategies,
author={Borodin, Yevgen and Bigham, Jeffrey P. and Dausch, Glenn and Ramakrishnan, I. V.},
title={More than Meets the Eye: A Survey of Screen-Reader Browsing Strategies},
year={2010},
isbn={9781450300452},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1805986.1806005},
doi={10.1145/1805986.1806005},
abstract={Browsing the Web with screen readers can be difficult and frustrating. Web pages oftencontain inaccessible content that is expressed only visually or that can be accessedonly with the mouse. Screen-reader users must also contend with usability challengesencountered when the reading content is designed with built-in assumptions of howit will be accessed -- generally by a sighted person on a standard display. Far frompassive consumers of content who simply accept web content as accessible or not, manyscreen-reader users are adept at developing, discovering, and employing browsing strategiesthat help them overcome the accessibility and usability problems they encounter. Inthis paper, we overview the browsing strategies that we have observed screen-readerusers employ when faced with challenges, ranging from unfamiliar web sites and complexweb pages to dynamic and automatically-refreshing content. A better understandingof existing browsing strategies can inform the design of accessible websites, developmentof new tools that make experienced users more effective, and help overcome the initiallearning curve for users who have not yet acquired effective browsing strategies.},
booktitle={Proceedings of the 2010 International Cross Disciplinary Conference on Web Accessibility (W4A)},
articleno={13},
numpages={10},
keywords={blind, usability, browsing strategy, screen reader, accessibility},
location={Raleigh, North Carolina},
series={W4A '10},
}ASL-STEM Forum: Enabling Sign Language to Grow through Online Collaboration
@inproceedings {asl-stem-forum,
author={Cavender, Anna C. and Otero, Daniel S. and Bigham, Jeffrey P. and Ladner, Richard E.},
title={ASL-STEM Forum: Enabling Sign Language to Grow through Online Collaboration},
year={2010},
isbn={9781605589299},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1753326.1753642},
doi={10.1145/1753326.1753642},
abstract={American Sign Language (ASL) currently lacks agreed-upon signs for complex terms inscientific fields, causing deaf students to miss or misunderstand course material.Furthermore, the same term or concept may have multiple signs, resulting in inconsistentstandards and strained collaboration. The ASL-STEM Forum is an online, collaborative,video forum for sharing ASL signs and discussing them. An initial user study of theForum has shown its viability and revealed lessons in accommodating varying user types,from lurkers to advanced contributors, until critical mass is achieved.},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
pages={2075–2078},
numpages={4},
keywords={deaf, american sign language, video, stem, forum},
location={Atlanta, Georgia, USA},
}2009
ClassInFocus: Enabling Improved Visual Attention Strategies for Deaf and Hard of Hearing Students
@inproceedings {classinfocus,
author={Cavender, Anna C. and Bigham, Jeffrey P. and Ladner, Richard E.},
title={ClassInFocus: Enabling Improved Visual Attention Strategies for Deaf and Hard of Hearing Students},
year={2009},
isbn={9781605585581},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1639642.1639656},
doi={10.1145/1639642.1639656},
abstract={Deaf and hard of hearing students must juggle their visual attention in current classroomsettings. Managing many visual sources of information (instructor, interpreter orcaptions, slides or whiteboard, classmates, and personal notes) can be a challenge.ClassInFocus automatically notifies students of classroom changes, such as slide changesor new speakers, helping them employ more beneficial observing strategies. A userstudy of notification techniques shows that students who liked the notifications weremore likely to visually utilize them to improve performance.},
booktitle={Proceedings of the 11th International ACM SIGACCESS Conference on Computers and Accessibility},
pages={67–74},
numpages={8},
keywords={multimedia conferencing technology, deaf and hard of hearing users, classroom technology},
location={Pittsburgh, Pennsylvania, USA},
series={Assets '09},
award={best paper},
}Mining Web Interactions to Automatically Create Mash-Ups
@inproceedings {mining-mashups,
author={Bigham, Jeffrey P. and Kaminsky, Ryan S. and Nichols, Jeffrey},
title={Mining Web Interactions to Automatically Create Mash-Ups},
year={2009},
isbn={9781605587455},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1622176.1622215},
doi={10.1145/1622176.1622215},
abstract={The deep web contains an order of magnitude more information than the surface web,but that information is hidden behind the web forms of a large number of web sites.Metasearch engines can help users explore this information by aggregating resultsfrom multiple resources, but previously these could only be created and maintainedby programmers. In this paper, we explore the automatic creation of metasearch mash-upsby mining the web interactions of multiple web users to find relations between queryforms on different web sites. We also present an implemented system called TX2 thatuses those connections to search multiple deep web resources simultaneously and integratethe results in context in a single results page. TX2 illustrates the promise of constructingmash-ups automatically and the potential of mining web interactions to explore deepweb resources.},
booktitle={Proceedings of the 22nd Annual ACM Symposium on User Interface Software and Technology},
pages={203–212},
numpages={10},
keywords={programming-by-example, deep web, meta-search, web forms, mash-ups},
location={Victoria, BC, Canada},
series={UIST '09},
}Evaluating Existing Audio CAPTCHAs and an Interface Optimized for Non-Visual Use
@inproceedings {audio-captchas,
author={Bigham, Jeffrey P. and Cavender, Anna C.},
title={Evaluating Existing Audio CAPTCHAs and an Interface Optimized for Non-Visual Use},
year={2009},
isbn={9781605582467},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1518701.1518983},
doi={10.1145/1518701.1518983},
abstract={Audio CAPTCHAs were introduced as an accessible alternative for those unable to usethe more common visual CAPTCHAs, but anecdotal accounts have suggested that they maybe more difficult to solve. This paper demonstrates in a large study of more than150 participants that existing audio CAPTCHAs are clearly more difficult and time-consumingto complete as compared to visual CAPTCHAs for both blind and sighted users. In orderto address this concern, we developed and evaluated a new interface for solving CAPTCHAsoptimized for non-visual use that can be added in-place to existing audio CAPTCHAs.In a subsequent study, the optimized interface increased the success rate of blindparticipants by 59\% on audio CAPTCHAs, illustrating a broadly applicable principleof accessible design: the most usable audio interfaces are often not direct translationsof existing visual interfaces.},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
pages={1829–1838},
numpages={10},
keywords={blind users, non-visual interfaces, audio captcha},
location={Boston, MA, USA},
series={CHI '09},
}Trailblazer: Enabling Blind Users to Blaze Trails through the Web
@inproceedings {trailblazer,
author={Bigham, Jeffrey P. and Lau, Tessa and Nichols, Jeffrey},
title={Trailblazer: Enabling Blind Users to Blaze Trails through the Web},
year={2009},
isbn={9781605581682},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1502650.1502677},
doi={10.1145/1502650.1502677},
abstract={For blind web users, completing tasks on the web can be frustrating. Each step canrequire a time-consuming linear search of the current web page to find the neededinteractive element or piece of information. Existing interactive help systems andthe playback components of some programming-by-demonstration tools identify the neededelements of a page as they guide the user through predefined tasks, obviating theneed for a linear search on each step. We introduce TrailBlazer, a system that providesan accessible, non-visual interface to guide blind users through existing how-to knowledge.A formative study indicated that participants saw the value of TrailBlazer but wantedto use it for tasks and web sites for which no existing script was available. To addressthis, TrailBlazer offers suggestion-based help created on-the-fly from a short, user-providedtask description and an existing repository of how-to knowledge. In an evaluationon 15 tasks, the correct prediction was contained within the top 5 suggestions 75.9%of the time.},
booktitle={Proceedings of the 14th International Conference on Intelligent User Interfaces},
pages={177–186},
numpages={10},
keywords={suggestions, blind users, web accessibility, non-visual interfaces, programming-by-demonstration},
location={Sanibel Island, Florida, USA},
series={IUI '09},
}2008
What's New? Making Web Page Updates Accessible
@inproceedings {accessible-web-updates,
author={Borodin, Yevgen and Bigham, Jeffrey P. and Raman, Rohit and Ramakrishnan, I. V.},
title={What's New? Making Web Page Updates Accessible},
year={2008},
isbn={9781595939760},
publisher={Association for Computing Machinery},
address={New York, NY, USA},
url={https://doi.org/10.1145/1414471.1414499},
doi={10.1145/1414471.1414499},
abstract={Web applications facilitated by technologies such as JavaScript, DHTML, AJAX, andFlash use a considerable amount of dynamic web content that is either inaccessibleor unusable by blind people. Server side changes to web content cause whole page refreshes,but only small sections of the page update, causing blind web users to search linearlythrough the page to find new content. The connecting theme is the need to quicklyand unobtrusively identify the segments of a web page that have changed and notifythe user of them. In this paper we propose Dynamo, a system designed to unify differenttypes of dynamic content and make dynamic content accessible to blind web users. Dynamotreats web page updates uniformly and its methods encompass both web updates enabledthrough dynamic content and scripting, and updates resulting from static page refreshes,form submissions, and template-based web sites. From an algorithmic and interactionperspective Dynamo detects underlying changes and provides users with a single andintuitive interface for reviewing the changes that have occurred. We report on thequantitative and qualitative results of an evaluation conducted with blind users.These results suggest that Dynamo makes access to dynamic content faster, and thatblind web users like it better than existing interfaces.},
booktitle={Proceedings of the 10th International ACM SIGACCESS Conference on Computers and Accessibility},
pages={145–152},
numpages={8},
keywords={screen reader, web browser, blind users, dynamic content, hearsay, non-visual aural interface},
location={Halifax, Nova Scotia, Canada},
series={Assets '08},
award={best paper},
}
A complete list of my publications is available on my C.V..
author={Brady, Erin and Morris, Meredith R. and Bigham, Jeffrey P.},
title={Gauging Receptiveness to Social Microvolunteering},
booktitle={Proceedings of the SIGCHI Conference on Human Factors in Computing Systems},
series={CHI '15},
year={2015},
location={Seoul, Republic of Korea},
numpages={10},
publisher={ACM},
address={New York, NY, USA},
keywords={volunteering, SNS, friendsourcing, crowdsourcing},
url={http://research.microsoft.com/pubs/238319/socialmicrovolunteering_chi2015.pdf},
award={nomination},
}