2024
Pavanatto, Leonardo; Bowman, Doug A.
Virtual Displays for Knowledge Work: Extending or Replacing Physical Monitors for More Flexibility and Screen Space Proceedings Article Forthcoming
In: 2024 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW), pp. 2 pages, Forthcoming.
@inproceedings{pavanatto2024_expanding_replacing,
title = {Virtual Displays for Knowledge Work: Extending or Replacing Physical Monitors for More Flexibility and Screen Space},
author = {Leonardo Pavanatto and Doug A. Bowman},
year = {2024},
date = {2024-03-17},
urldate = {2024-03-17},
booktitle = {2024 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)},
pages = {2 pages},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
2023
Giovannelli, Alexander; Thomas, Jerald; Lane, Logan; Rodrigues, Francielly; Bowman, Doug A.
Gestures vs. Emojis: Comparing Non-Verbal Reaction Visualizations for Immersive Collaboration Journal Article
In: IEEE Trans. Visual. Comput. Graphics, vol. 29, no. 11, pp. 4772–4781, 2023, ISSN: 1941-0506.
BibTeX | Links:
@article{Giovannelli2023,
title = {Gestures vs. Emojis: Comparing Non-Verbal Reaction Visualizations for Immersive Collaboration},
author = {Alexander Giovannelli and Jerald Thomas and Logan Lane and Francielly Rodrigues and Doug A. Bowman},
doi = {10.1109/tvcg.2023.3320254},
issn = {1941-0506},
year = {2023},
date = {2023-11-00},
journal = {IEEE Trans. Visual. Comput. Graphics},
volume = {29},
number = {11},
pages = {4772--4781},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Pavanatto, Leonardo; Davari, Shakiba; Badea, Carmen; Stoakley, Rich; Bowman, Doug A
Virtual Monitors vs. Physical Monitors: an Empirical Comparison for Productivity Work Journal Article
In: Frontiers in Virtual Reality, vol. 4, 2023, ISSN: 2673-4192.
BibTeX | Links:
@article{Pavanatto2023,
title = {Virtual Monitors vs. Physical Monitors: an Empirical Comparison for Productivity Work},
author = {Leonardo Pavanatto and Shakiba Davari and Carmen Badea and Rich Stoakley and Doug A Bowman},
doi = {10.3389/frvir.2023.1215820},
issn = {2673-4192},
year = {2023},
date = {2023-10-23},
urldate = {2023-10-23},
journal = {Frontiers in Virtual Reality},
volume = {4},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Rodrigues, Francielly; Giovannelli, Alexander; Pavanatto, Leonardo; Miao, Haichao; Oliveira, Jauvane C.; Bowman, Doug A.
AMP-IT and WISDOM: Improving 3D Manipulation for High-Precision Tasks in Virtual Reality Proceedings Article
In: 2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 303-311, 2023.
BibTeX | Links:
@inproceedings{rodrigues2023_ampit,
title = {AMP-IT and WISDOM: Improving 3D Manipulation for High-Precision Tasks in Virtual Reality},
author = {Francielly Rodrigues and Alexander Giovannelli and Leonardo Pavanatto and Haichao Miao and Jauvane C. Oliveira and Doug A. Bowman},
doi = {10.1109/ISMAR59233.2023.00045},
year = {2023},
date = {2023-10-16},
urldate = {2023-10-16},
booktitle = {2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {303-311},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lisle, Lee; Davidson, Kylie; Pavanatto, Leonardo; Tahmid, Ibrahim; North, Chris; Bowman, Doug
Spaces to Think: A Comparison of Small, Large, and Immersive Displays for the Sensemaking Process Proceedings Article
In: 2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 1084-1093, 2023.
BibTeX | Links:
@inproceedings{lisle-specetothinkdisplays,
title = {Spaces to Think: A Comparison of Small, Large, and Immersive Displays for the Sensemaking Process},
author = {Lee Lisle and Kylie Davidson and Leonardo Pavanatto and Ibrahim Tahmid and Chris North and Doug Bowman},
doi = {10.1109/ISMAR59233.2023.00125},
year = {2023},
date = {2023-10-16},
urldate = {2023-10-16},
booktitle = {2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {1084-1093},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Thomas, Jerald; Lee, Sang Won; Giovannelli, Alexander; Lane, Logan; Bowman, Doug
A Communication-Focused Framework for Understanding Immersive Collaboration Experiences Proceedings Article
In: 2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW), pp. 301-304, 2023.
BibTeX | Links:
@inproceedings{10108770,
title = {A Communication-Focused Framework for Understanding Immersive Collaboration Experiences},
author = {Jerald Thomas and Sang Won Lee and Alexander Giovannelli and Logan Lane and Doug Bowman},
doi = {10.1109/VRW58643.2023.00070},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)},
pages = {301-304},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Giovannelli, Alexander; Rodrigues, Francielly; Davari, Shakiba; Tahmid, Ibrahim A.; Lane, Logan; Connor, Cherelle; Davidson, Kylie; Ramirez, Gabriella N.; David-John, Brendan; Bowman, Doug A.
CLUE HOG: An Immersive Competitive Lock-Unlock Experience using Hook On Go-Go Technique for Authentication in the Metaverse Proceedings Article
In: 2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW), pp. 945-946, 2023.
BibTeX | Links:
@inproceedings{10108888,
title = {CLUE HOG: An Immersive Competitive Lock-Unlock Experience using Hook On Go-Go Technique for Authentication in the Metaverse},
author = {Alexander Giovannelli and Francielly Rodrigues and Shakiba Davari and Ibrahim A. Tahmid and Logan Lane and Cherelle Connor and Kylie Davidson and Gabriella N. Ramirez and Brendan David-John and Doug A. Bowman},
doi = {10.1109/VRW58643.2023.00315},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)},
pages = {945-946},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Lu, Feiyu; Pavanatto, Leonardo; Bowman, Doug A.
In-the-Wild Experiences with an Interactive Glanceable AR System for Everyday Use Proceedings Article
In: Proceedings of the 2023 ACM Symposium on Spatial User Interaction, Association for Computing Machinery, Sydney, NSW, Australia, 2023, ISBN: 9798400702815.
BibTeX | Links:
@inproceedings{10.1145/3607822.3614515,
title = {In-the-Wild Experiences with an Interactive Glanceable AR System for Everyday Use},
author = {Feiyu Lu and Leonardo Pavanatto and Doug A. Bowman},
doi = {10.1145/3607822.3614515},
isbn = {9798400702815},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {Proceedings of the 2023 ACM Symposium on Spatial User Interaction},
publisher = {Association for Computing Machinery},
address = {Sydney, NSW, Australia},
series = {SUI '23},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davidson, Kylie; Lisle, Lee; Tahmid, Ibrahim A.; Whitley, Kirsten; North, Chris; Bowman, Doug A.
Uncovering Best Practices in Immersive Space to Think Proceedings Article
In: 2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 1094-1103, 2023.
BibTeX | Links:
@inproceedings{10316471,
title = {Uncovering Best Practices in Immersive Space to Think},
author = {Kylie Davidson and Lee Lisle and Ibrahim A. Tahmid and Kirsten Whitley and Chris North and Doug A. Bowman},
doi = {10.1109/ISMAR59233.2023.00126},
year = {2023},
date = {2023-01-01},
booktitle = {2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {1094-1103},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Davidson, Kylie; Lisle, Lee; Whitley, Kirsten; Bowman, Doug A.; North, Chris
Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 29, no. 12, pp. 5294-5307, 2023.
BibTeX | Links:
@article{9894094,
title = {Exploring the Evolution of Sensemaking Strategies in Immersive Space to Think},
author = {Kylie Davidson and Lee Lisle and Kirsten Whitley and Doug A. Bowman and Chris North},
doi = {10.1109/TVCG.2022.3207357},
year = {2023},
date = {2023-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {29},
number = {12},
pages = {5294-5307},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Tahmid, Ibrahim A; Lisle, Lee; Davidson, Kylie; Whitley, Kirsten; North, Chris; Bowman, Doug A
Evaluating the Feasibility of Predicting Information Relevance During Sensemaking with Eye Gaze Data Proceedings Article
In: 2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), pp. 713–722, IEEE 2023.
BibTeX | Links:
@inproceedings{tahmid2023evaluating,
title = {Evaluating the Feasibility of Predicting Information Relevance During Sensemaking with Eye Gaze Data},
author = {Ibrahim A Tahmid and Lee Lisle and Kylie Davidson and Kirsten Whitley and Chris North and Doug A Bowman},
doi = {10.1109/ISMAR59233.2023.00086},
year = {2023},
date = {2023-01-01},
urldate = {2023-01-01},
booktitle = {2023 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {713–722},
organization = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2022
Tahmid, Ibrahim A; Lisle, Lee; Davidson, Kylie; North, Chris; Bowman, Doug A
Evaluating the Benefits of Explicit and Semi-Automated Clusters for Immersive Sensemaking Conference
International Symposium on Mixed and Augmented Reality, 2022.
@conference{tahmidevaluating,
title = {Evaluating the Benefits of Explicit and Semi-Automated Clusters for Immersive Sensemaking},
author = {Ibrahim A Tahmid and Lee Lisle and Kylie Davidson and Chris North and Doug A Bowman},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9995165},
doi = {10.1109/ISMAR55827.2022.00064},
year = {2022},
date = {2022-10-17},
urldate = {2022-10-17},
publisher = {International Symposium on Mixed and Augmented Reality},
abstract = {Immersive spaces have great potential to support analysts in complex sensemaking tasks, but the use of only manual interactions
for organizing data elements can become tedious. We analyzed
the user interactions to support cluster formation in an immersive
sensemaking system, and we designed a semi-automated cluster creation technique that determines the user’s intent to create a cluster
based on object proximity. We present the results of a user study
comparing this proximity-based technique with a manual clustering technique and a baseline immersive workspace with no explicit
clustering support. We found that semi-automated clustering was
faster and preferred, while manual clustering gave greater control to
users. These results provide support for the approach of adding intelligent semantic interactions to aid the users of immersive analytics
systems},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
for organizing data elements can become tedious. We analyzed
the user interactions to support cluster formation in an immersive
sensemaking system, and we designed a semi-automated cluster creation technique that determines the user’s intent to create a cluster
based on object proximity. We present the results of a user study
comparing this proximity-based technique with a manual clustering technique and a baseline immersive workspace with no explicit
clustering support. We found that semi-automated clustering was
faster and preferred, while manual clustering gave greater control to
users. These results provide support for the approach of adding intelligent semantic interactions to aid the users of immersive analytics
systems
Giovannelli, Alexander; Lisle, Lee; Bowman, Doug A
Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality Conference
2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR), 2022, ISSN: 1554-7868.
@conference{giovannellivrtyping,
title = { Exploring the Impact of Visual Information on Intermittent Typing in Virtual Reality},
author = {Alexander Giovannelli and Lee Lisle and Doug A Bowman},
doi = {10.1109/ISMAR55827.2022.00014},
issn = {1554-7868},
year = {2022},
date = {2022-10-17},
urldate = {2022-10-17},
booktitle = {2022 IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {8-17},
abstract = {For touch typists, using a physical keyboard ensures optimal text entry task performance in immersive virtual environments. However, successful typing depends on the user’s ability to accurately position their hands on the keyboard after performing other, non-keyboard tasks. Finding the correct hand position depends on sensory feedback, including visual information. We designed and conducted a user study where we investigated the impact of visual representations of the keyboard and users’ hands on the time required to place hands on the homing bars of a keyboard after performing other tasks. We found that this keyboard homing time decreased as the fidelity of visual representations of the keyboard and hands increased, with a video pass-through condition providing the best performance. We discuss additional impacts of visual representations of a user’s hands and the keyboard on typing performance and user experience in virtual reality.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Davari, Shakiba
[DC] Context-Aware Inference and Adaptation in Augmented Reality Conference
2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW), 2022.
@conference{Davari2022DC,
title = {[DC] Context-Aware Inference and Adaptation in Augmented Reality},
author = {Shakiba Davari },
doi = {DOI 10.1109/VRW55335.2022.00320},
year = {2022},
date = {2022-03-16},
pages = {938-939},
publisher = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)},
abstract = {The recent developments in Augmented Reality (AR) eyeglasses promise the potential for more efficient and reliable information access. This reinforces the widespread belief that AR Glasses are the next generation of personal computing devices, providing efficient information access to the user all day, every day. However, to realize this vision of all-day wearable AR, the AR interface must address the challenges that constant and pervasive presence of virtual content may cause. Throughout the day, as the user's context switches, an optimal all-day interface must adapt its virtual content display and interactions. The optimal interface, that is the most efficient yet least intrusive, in one context may be the worst interface for another context. This work aims to propose a research agenda to design and validate different adaptation techniques and context-aware AR interfaces and introduce a framework for the design of such intelligent interfaces.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Davari, Shakiba; Lu, Feiyu; Bowman, Doug A
Validating the Benefits of Glanceable and Context-Aware Augmented Reality for Everyday Information Access Tasks Conference
2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), 2022.
@conference{Davari2022validate,
title = {Validating the Benefits of Glanceable and Context-Aware Augmented Reality for Everyday Information Access Tasks},
author = {Shakiba Davari and Feiyu Lu and Doug A Bowman},
url = {https://wordpress.cs.vt.edu/3digroup/validatepaper/},
doi = {10.1109/VR51125.2022.00063},
year = {2022},
date = {2022-03-16},
publisher = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
abstract = {Glanceable Augmented Reality interfaces have the potential to provide fast and efficient information access for the user. However, where to place the virtual content and how to access them depend on the user context. We designed a Context-Aware AR interface that can intelligently adapt for two different contexts: solo and social. We evaluated information access using Context-Aware AR compared to current mobile phones and non-adaptive Glanceable AR interfaces. We found that in a solo scenario, compared to a mobile phone, the Context-Aware AR interface was preferred, easier, and significantly faster; it improved the user experience; and it allowed the user to better focus on their primary task. In the social scenario, we discovered that the mobile phone was slower, more intrusive, and perceived as the most difficult. Meanwhile, Context-Aware AR was faster for responding to information needs triggered by the conversation; it was preferred and perceived as the easiest for resuming conversation after information access; and it improved the user’s awareness of the other person's facial expressions. },
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Li, Yuan; Tahmid, Ibrahim A; Lu, Feiyu; Bowman, Doug A
Evaluation of Pointing Ray Techniques for Distant Object Referencing in Model-Free Outdoor Collaborative Augmented Reality Journal Article
In: IEEE Transactions on Visualization and Computer Graphics, vol. 28, no. 11, pp. 3896–3906, 2022.
@article{li2022evaluation,
title = {Evaluation of Pointing Ray Techniques for Distant Object Referencing in Model-Free Outdoor Collaborative Augmented Reality},
author = {Yuan Li and Ibrahim A Tahmid and Feiyu Lu and Doug A Bowman},
url = {https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=9873964},
doi = {10.1109/TVCG.2022.3203094},
year = {2022},
date = {2022-01-01},
urldate = {2022-01-01},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {28},
number = {11},
pages = {3896--3906},
publisher = {IEEE},
abstract = {Referencing objects of interest is a common requirement in many collaborative tasks. Nonetheless, accurate object referencing at a distance can be challenging due to the reduced visibility of the objects or the collaborator and limited communication medium. Augmented Reality (AR) may help address the issues by providing virtual pointing rays to the target of common interest. However, such pointing ray techniques can face critical limitations in large outdoor spaces, especially when the environment model is unavailable. In this work, we evaluated two pointing ray techniques for distant object referencing in model-free AR from the literature: the Double Ray technique enhancing visual matching between rays and targets, and the Parallel Bars technique providing artificial orientation cues. Our experiment in outdoor AR involving participants as pointers and observers partially replicated results from a previous study that only evaluated observers in simulated AR. We found that while the effectiveness of the Double Ray technique is reduced with the additional workload for the pointer and human pointing errors, it is still beneficial for distant object referencing.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Lisle, Lee; Lu, Feiyu; Davari, Shakiba; Tahmid, Ibrahim Asadullah; Giovannelli, Alexander; Llo, Cory; Pavanatto, Leonardo; Zhang, Lei; Schlueter, Luke; Bowman, Doug A.
Clean the Ocean: An Immersive VR Experience Proposing New Modifications to Go-Go and WiM Techniques Proceedings Article
In: 2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW), pp. 920-921, 2022.
BibTeX | Links:
@inproceedings{9757607,
title = {Clean the Ocean: An Immersive VR Experience Proposing New Modifications to Go-Go and WiM Techniques},
author = {Lee Lisle and Feiyu Lu and Shakiba Davari and Ibrahim Asadullah Tahmid and Alexander Giovannelli and Cory Llo and Leonardo Pavanatto and Lei Zhang and Luke Schlueter and Doug A. Bowman},
doi = {10.1109/VRW55335.2022.00311},
year = {2022},
date = {2022-01-01},
booktitle = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)},
pages = {920-921},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
2021
Lu, Feiyu; Davari, Shakiba; Bowman, Doug
Exploration of Techniques for Rapid Activation of Glanceable Information in Head-Worn Augmented Reality Conference
Symposium on Spatial User Interaction, SUI '21 Association for Computing Machinery, Virtual Event, USA, 2021, ISBN: 9781450390910.
@conference{10.1145/3485279.3485286,
title = {Exploration of Techniques for Rapid Activation of Glanceable Information in Head-Worn Augmented Reality},
author = {Feiyu Lu and Shakiba Davari and Doug Bowman},
url = {https://doi.org/10.1145/3485279.3485286},
doi = {10.1145/3485279.3485286},
isbn = {9781450390910},
year = {2021},
date = {2021-11-09},
booktitle = {Symposium on Spatial User Interaction},
publisher = {Association for Computing Machinery},
address = {Virtual Event, USA},
series = {SUI '21},
abstract = {Future augmented reality (AR) glasses may provide pervasive and continuous access
to everyday information. However, it remains unclear how to address the issue of virtual
information overlaying and occluding real-world objects and information that are of
interest to users. One approach is to keep virtual information sources inactive until
they are explicitly requested, so that the real world remains visible. In this research,
we explored the design of interaction techniques with which users can activate virtual
information sources in AR. We studied this issue in the context of Glanceable AR,
in which virtual information resides at the periphery of the user’s view. We proposed
five techniques and evaluated them in both sitting and walking scenarios. Our results
demonstrate the usability, user preference, and social acceptance of each technique,
as well as design recommendations to achieve optimal performance. Our findings can
inform the design of lightweight techniques to activate virtual information displays
in future everyday AR interfaces.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
to everyday information. However, it remains unclear how to address the issue of virtual
information overlaying and occluding real-world objects and information that are of
interest to users. One approach is to keep virtual information sources inactive until
they are explicitly requested, so that the real world remains visible. In this research,
we explored the design of interaction techniques with which users can activate virtual
information sources in AR. We studied this issue in the context of Glanceable AR,
in which virtual information resides at the periphery of the user’s view. We proposed
five techniques and evaluated them in both sitting and walking scenarios. Our results
demonstrate the usability, user preference, and social acceptance of each technique,
as well as design recommendations to achieve optimal performance. Our findings can
inform the design of lightweight techniques to activate virtual information displays
in future everyday AR interfaces.
Pavanatto, Leonardo
Designing Augmented Reality Virtual Displays for Productivity Work Conference
Doctoral consortium of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), IEEE, 2021.
@conference{pavanatto_dc_2021,
title = {Designing Augmented Reality Virtual Displays for Productivity Work},
author = {Leonardo Pavanatto},
year = {2021},
date = {2021-10-04},
booktitle = {Doctoral consortium of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {2},
publisher = {IEEE},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Davari, Shakiba; Lu, Feiyu; Li, Yuan; Zhang, Lei; Lisle, Lee; Feng, Xueting; Blustein, Leslie; Bowman, Doug A
Integrating Everyday Proxy Objects in Multi-Sensory Virtual Reality Storytelling Workshop
Workshop on Everyday Proxy Objects for Virtual Reality (EPO4VR), CHI 2021, Yokohama, Japan, 2021.
@workshop{davari2021integrating,
title = {Integrating Everyday Proxy Objects in Multi-Sensory Virtual Reality Storytelling},
author = {Shakiba Davari and Feiyu Lu and Yuan Li and Lei Zhang and Lee Lisle and Xueting Feng and Leslie Blustein and Doug A Bowman },
url = {http://epo4vr.dfki.de/assets/papers/davari2021vrstorytelling.pdf},
year = {2021},
date = {2021-05-08},
booktitle = { Workshop on Everyday Proxy Objects for Virtual Reality (EPO4VR)},
publisher = {CHI 2021},
address = {Yokohama, Japan},
abstract = {We describe design research on the use of multiple physical proxy
objects to create an engaging and compelling virtual reality experience. Physical proxies, such as a camera prop that integrates a
help system into the storyline, enhance tactile immersion and may
result in improved presence. We use plausible storytelling elements
tied to passive haptics and reuse a single tracking device to track
multiple physical proxies. },
keywords = {},
pubstate = {published},
tppubtype = {workshop}
}
objects to create an engaging and compelling virtual reality experience. Physical proxies, such as a camera prop that integrates a
help system into the storyline, enhance tactile immersion and may
result in improved presence. We use plausible storytelling elements
tied to passive haptics and reuse a single tracking device to track
multiple physical proxies.