@inproceedings{Mayer:2020:EMV, abstract = {Contemporary voice assistants require that objects of interest be specified in spoken commands. Of course, users are often looking directly at the object or place of interest - fine-grained, contextual information that is currently unused. We present WorldGaze, a software-only method for smartphones that provides the real-world gaze location of a user that voice agents can utilize for rapid, natural, and precise interactions. We achieve this by simultaneously opening the front and rear cameras of a smartphone. The front-facing camera is used to track the head in 3D, including estimating its direction vector. As the geometry of the front and back cameras are fixed and known, we can raycast the head vector into the 3D world scene as captured by the rear-facing camera. This allows the user to intuitively define an object or region of interest using their head gaze. We started our investigations with a qualitative exploration of competing methods, before developing a functional, real-time implementation. We conclude with an evaluation that shows WorldGaze can be quick and accurate, opening new multimodal gaze+voice interactions for mobile voice agents.}, address = {New York, NY, USA}, author = {Sven Mayer and Gierad Laput and Chris Harrison}, booktitle = {Proceedings of the 2020 CHI Conference on Human Factors in Computing Systems}, date = {2020-04-25}, doi = {10.1145/3313831.3376479}, keywords = {gaze, interaction technique, mobile device, voice assistants, worldgaze}, publisher = {ACM}, pubstate = {published}, series = {CHI '20}, title = {Enhancing Mobile Voice Assistants with WorldGaze}, tppubtype = {inproceedings}, url = {http://sven-mayer.com/wp-content/uploads/2020/03/mayer2020worldgaze.pdf https://www.youtube.com/watch?v=kjACtQK3D-k}, year = {2020} }