Compare commits
55 Commits
2b0eb92618
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
| 6ec76ae2e0 | |||
| 58161d561f | |||
| 708b836397 | |||
| 8a4a0bdb81 | |||
| 520be0856a | |||
| cb56abcd43 | |||
| e01e63d4cf | |||
| 0202efeb06 | |||
| 43037c8407 | |||
| 035603eee7 | |||
| 9f4e7fb8c7 | |||
| 60110bd64e | |||
| beb2dce3bb | |||
| 0a0e1ff4b5 | |||
| 3de6ad37df | |||
| 1482353a22 | |||
| 50ce740315 | |||
| e0f7f216d2 | |||
| 68bd792583 | |||
| 34db502525 | |||
| defd40d0bd | |||
| a603f8e8a5 | |||
| b296d3ae98 | |||
| 885f002898 | |||
| d27dddc548 | |||
| c681cbf43d | |||
| 97e35ca282 | |||
| 1eb5ed0e42 | |||
| 398583e819 | |||
| be9f9bb4be | |||
| 241c73a9c8 | |||
| f8ec931cd6 | |||
| f1cf425e7c | |||
| 93a47df0f8 | |||
| 49c33a5a37 | |||
| f05fec76df | |||
| 1062a6b13c | |||
| ee93d2079a | |||
| 121675b2bc | |||
| 03e96390a0 | |||
| 1164bd0da6 | |||
| 85df9fe164 | |||
| 7fe080351d | |||
| f42733a7e7 | |||
| 42f8202d43 | |||
| 4cbd2b7d0a | |||
| 5f9638c718 | |||
| 65ab354b62 | |||
| 530112fee9 | |||
| e8dd43f4b8 | |||
| 6c4891c255 | |||
| c8e9c8ef80 | |||
| 74d850fc1b | |||
| cee4e4b50f | |||
| b94cf59d74 |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -147,7 +147,6 @@ acs-*.bib
|
|||||||
|
|
||||||
# knitr
|
# knitr
|
||||||
*-concordance.tex
|
*-concordance.tex
|
||||||
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
|
|
||||||
# *.tikz
|
# *.tikz
|
||||||
*-tikzDictionary
|
*-tikzDictionary
|
||||||
|
|
||||||
|
|||||||
3
.vscode/ltex.dictionary.en.txt
vendored
3
.vscode/ltex.dictionary.en.txt
vendored
@@ -1,3 +0,0 @@
|
|||||||
vhar
|
|
||||||
visuo-haptic
|
|
||||||
vibrotactile
|
|
||||||
3
.vscode/ltex.dictionary.fr.txt
vendored
3
.vscode/ltex.dictionary.fr.txt
vendored
@@ -1,3 +0,0 @@
|
|||||||
visuo
|
|
||||||
RV
|
|
||||||
vibrotactile
|
|
||||||
4
.vscode/ltex.disabledRules.en.txt
vendored
4
.vscode/ltex.disabledRules.en.txt
vendored
@@ -1,4 +0,0 @@
|
|||||||
COMMA_PARENTHESIS_WHITESPACE
|
|
||||||
UPPERCASE_SENTENCE_START
|
|
||||||
UPPERCASE_SENTENCE_START
|
|
||||||
DOUBLE_PUNCTUATION
|
|
||||||
47
.vscode/ltex.hiddenFalsePositives.en.txt
vendored
47
.vscode/ltex.hiddenFalsePositives.en.txt
vendored
@@ -1,47 +0,0 @@
|
|||||||
{"rule":"EN_A_VS_AN","sentence":"^\\QHowever, this method has not yet been integrated in an context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qdevice apparatus\\E$"}
|
|
||||||
{"rule":"ALLOW_TO","sentence":"^\\QAmong the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched touched by a hand-held stylus ([related_work]texture_rendering).\\E$"}
|
|
||||||
{"rule":"ALLOW_TO","sentence":"^\\QAmong the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus ([related_work]texture_rendering).\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe contributions of this chapter are: Transposition of data-driven visuo-haptic textures to augment real objects in a direct touch context in immersive .\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[0.65]experiment/view First person view of the user study.[ As seen through the immersive headset Microsoft HoloLens 2.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q]\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[0.7]results/trial_predictions Proportion of trials in which the comparison texture was perceived as rougher than the reference texture, as a function of the amplitude difference between the two textures and the visual rendering.[ Curves represent predictions from the model (probit link function), and points are estimated marginal means with non-parametric bootstrap 95 .\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe (results/trial_pses) and (results/trial_jnds) for each visual rendering and their respective differences were estimated from the model, along with their corresponding 95 , using a non-parametric bootstrap procedure (1000 samples).\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QReported response times are .\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe frames analyzed were those in which the participants actively touched the comparison textures with a finger speed greater than 1 .\\E$"}
|
|
||||||
{"rule":"MISSING_GENITIVE","sentence":"^\\QFriedman tests were employed to compare the ratings to the questions (questions1 and questions2), with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment, except for the questions regarding the virtual hand that were directly compared with Wilcoxon signed-rank tests.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults_questions shows these ratings for questions where statistically significant differences were found (results are shown as mean \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q standard deviation): Hand Ownership: participants slightly feel the virtual hand as their own with the Mixed rendering (2.3 1.0) but quite with the Virtual rendering (3.5 0.9, 0.001).\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults_questions presents the questionnaire results of the Matching and Ranking tasks.\\E$"}
|
|
||||||
{"rule":"EN_A_VS_AN","sentence":"^\\QHowever, this method has not yet been integrated in an context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.\\E$"}
|
|
||||||
{"rule":"ALLOW_TO","sentence":"^\\QAmong the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus ([related_work]texture_rendering).\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/matching_confusion_matrix shows the confusion matrix of the Matching task with the visual textures and the proportion of haptic texture selected in response, i.e. the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[0.82]results/matching_confusion_matrix Confusion matrix of the Matching task.[ With the presented visual textures as columns and the selected haptic texture in proportion as rows.\\E$"}
|
|
||||||
{"rule":"EN_A_VS_AN","sentence":"^\\QA on the log Completion Time with the Visual Texture as fixed effect and the participant as random intercept was performed.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QNo statistical significant effect of Visual Texture was found (8 512 1.9, 0.06) on Completion Time (44 , 42 46), indicating an equal difficulty and participant behaviour for all the visual textures.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/ranking_mean_ci presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ A lower rank means that the texture was considered rougher, a higher rank means smoother.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/matching_correspondence_analysis shows the first two dimensions with the 18 haptic and visual textures.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults_clusters shows the dendrograms of the two hierarchical clusterings of the haptic and visual textures, constructed using the Euclidean distance and the Ward's method on squared distance.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ The closer the haptic and visual textures are, the more similar they were judged.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/haptic_visual_clusters_confusion_matrices (left) shows the confusion matrix of the Matching task with visual texture clusters and the proportion of haptic texture clusters selected in response.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/haptic_visual_clusters_confusion_matrices (right) shows the confusion matrix of the Matching task with visual texture ranks and the proportion of haptic texture clusters selected in response.\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/haptic_visual_clusters_confusion_matrices Confusion matrices of the visual texture (left) or rank (right) with the corresponding haptic texture clusters selected in proportion.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ Holm-Bonferroni adjusted binomial test results are marked in bold when the proportion is higher than chance (i.e. more than 20, 0.05).\\E$"}
|
|
||||||
{"rule":"EN_A_VS_AN","sentence":"^\\QA non-parametric on an model was used on the Difficulty and Realism question results, while the other question results were analyzed using Wilcoxon signed-rank tests.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QOn Difficulty, there were statistically significant effects of Task (1 57 13, 0.001) and of Modality (1 57 8, 0.007), but no interaction effect Task Modality (1 57 2, ).\\E$"}
|
|
||||||
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/questions_modalitiesresults/questions_tasks\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ Questions were bipolar 100-points scales (0 = Very Low and 100 = Very High, except for Performance where 0 = Perfect and 100 = Failure), with increments of 5.\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q] Code Question Mental Demand How mentally demanding was the task?\\E$"}
|
|
||||||
{"rule":"NON_STANDARD_WORD","sentence":"^\\QTouching, grasping and manipulating virtual objects are fundamental interactions in ([related_work]ve_tasks) and essential for many of its applications ([related_work]ar_applications).\\E$"}
|
|
||||||
{"rule":"AND_END","sentence":"^\\QManipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the and simulates interaction with virtual objects ([related_work]ar_virtual_hands).\\E$"}
|
|
||||||
{"rule":"THE_PUNCT","sentence":"^\\QHowever, direct hand manipulation is still challenging due to the intangibility of the , the lack of mutual occlusion between the hand and the virtual object in - ([related_work]ar_displays), and the inherent delays between the user's hand and the result of the interaction simulation ([related_work]ar_virtual_hands).\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QTo this end, we selected in the literature and compared the most popular visual hand renderings used to interact with virtual objects in .\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe main contributions of this chapter are: A comparison from the literature of six common visual hand renderings used to interact with virtual objects in .\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QWe present the results of the user study and discuss the implications of these results for the manipulation of virtual objects directly with the hand in .\\E$"}
|
|
||||||
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QResults showed that 95 of the contacts between the fingertip and the virtual cube happened at speeds below 1.5 .\\E$"}
|
|
||||||
{"rule":"DOUBLE_PUNCTUATION","sentence":"^\\QParticipants rated their expertise (I use it more than once a year) with , , and haptics in a pre-experiment questionnaire.\\E$"}
|
|
||||||
{"rule":"THE_SENT_END","sentence":"^\\QIt is technically and conceptually closely related to , which completely replaces perception with a .\\E$"}
|
|
||||||
{"rule":"THE_PUNCT","sentence":"^\\QOn the original reality-virtuality continuum of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, augmented virtuality is also considered, as the incorporation of real objects into a , and is placed between and .\\E$"}
|
|
||||||
{"rule":"THE_SENT_END","sentence":"^\\QIn this thesis we call / systems the computational set of hardware (input devices, sensors, displays and haptic devices) and software (tracking, simulation and rendering) that allows the user to interact with the .\\E$"}
|
|
||||||
{"rule":"THE_PUNCT","sentence":"^\\QBecause the visuo-haptic is displayed in real time and aligned with the , the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the .\\E$"}
|
|
||||||
{"rule":"THE_SENT_END","sentence":"^\\QBecause the visuo-haptic is displayed in real time and aligned with the , the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the .\\E$"}
|
|
||||||
3
.vscode/ltex.hiddenFalsePositives.fr.txt
vendored
3
.vscode/ltex.hiddenFalsePositives.fr.txt
vendored
@@ -1,3 +0,0 @@
|
|||||||
{"rule":"D_N","sentence":"^\\QL'intégration de l'haptique portable avec la RA immersive pour créer un environnement augmenté visuo-haptique est cependant complexe et présente de nombreux défis.\\E$"}
|
|
||||||
{"rule":"PAS_DE_TRAIT_UNION","sentence":"^\\QLes sensations haptiques virtuelles ne sont alors pas co-localisées avec le contact réel, ce qui peut affecter la perception globale.\\E$"}
|
|
||||||
{"rule":"J_N","sentence":"^\\QLes textures sont en effet l'une des principales propriétés perçues d'un objet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, aussi bien par la vue que le toucher \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, et une augmentation haptique bien étudiée \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"}
|
|
||||||
@@ -1,2 +1,25 @@
|
|||||||
\chapterstarbookmark{Acknowledgement}
|
\chapternotoc{Remerciements}
|
||||||
\label{ch:acknowledgement}
|
|
||||||
|
Je tiens tout d'abord à remercier mes encadrants de thèse, Maud Marchal, Claudio Pacchierotti et Éric Marchand, pour leur accompagnement précieux tout au long de ce travail.
|
||||||
|
Vous m'avez permis de découvrir et apprendre pleinement la recherche et l'enseignement, et vous m'avez accordé une grande confiance et liberté.
|
||||||
|
Vous avez aussi toujours su être disponibles pour m'aider, échanger nos idées, partager votre curiosité et votre expérience, et j'en suis très reconnaissant.
|
||||||
|
|
||||||
|
Je souhaite également remercier mes rapporteurs, Jens Grubert et Seokhee Jeon, pour avoir relu et commenté mon manuscrit de thèse.
|
||||||
|
Vos commentaires m'ont permis d'améliorer grandement la qualité de ce manuscrit et de préparer au mieux ma soutenance.
|
||||||
|
Je remercie aussi mes examinateurs, Sinan Haliyo et Martin Hachet, pour leur suivi et leurs retours durant les années de thèse jusqu'à la soutenance.
|
||||||
|
Enfin, je vous remercie tous les quatre pour avoir accepté, et pris le temps, de faire partie de mon jury de thèse.
|
||||||
|
J'ai beaucoup apprécié nos échanges inspirants et vos retours riches sur mon travail.
|
||||||
|
|
||||||
|
Je remercie également tous mes collègues, nombreux, de l'IRISA, de l'Inria et de l'INSA pour nos nombreux et riches échanges et tous les moments passés ensemble, au laboratoire et en dehors.
|
||||||
|
Je pense particulièrement à Ronan Gaugne, Charles Pontonnier, Thomas Genet, Pascale Sebillot, Anatole Lécuyer, Valérie Gouranton, Hélène de la Ruée et Léa Pillette qui m'ont apporté une aide et des conseils précieux sur la thèse et des questionnements plus personnels.
|
||||||
|
Mais je pense surtout à mes co-bureaux (par ordre chronologique) : Alexander, Fouad, Lendy, Pierre-Antoine, Guillaume, Thomas, Glenn et Jim avec qui nous avons beaucoup partagé de moments de travail et de vie.
|
||||||
|
Lendy, Pierre-Antoine et Guillaume, j'ai beaucoup apprécié refaire tous les jours le monde avec vous et nos discussions passionnantes !
|
||||||
|
Je retiens aussi vos talents culinaires, mais pour différentes raisons ;)
|
||||||
|
|
||||||
|
Pour finir, je suis profondément reconnaissant envers mes parents, familles et amis pour leur présence et leur soutien durant ces années de thèse.
|
||||||
|
Merci aussi à mes parents et beaux-parents pour votre soutien crucial les derniers jours et pour avoir préparé le pot de thèse.
|
||||||
|
Nos boules de poils ont également toujours été présentes pour apporter du réconfort et profiter des joies du quotidien.
|
||||||
|
Enfin et surtout, merci Agathe d'être toi, d'être là et pour tout ce que nous construisons ensemble.
|
||||||
|
La dernière fois que j'écrivais des remerciements, en 2018, j'avais hâte de vivre avec toi et j'ai maintenant encore plus hâte de vivre le reste de notre vie ensemble !
|
||||||
|
|
||||||
|
\textit{Trugarez vraz d'an holl!}
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
\begin{listof}
|
\begin{listof}
|
||||||
\renewcommand{\contentsname}{\textsf{Contents}}
|
\renewcommand{\contentsname}{\textsf{Contents}}
|
||||||
\renewcommand{\cftsecfont}{}% Unbold sections
|
\renewcommand{\cftsecfont}{}% Unbold sections
|
||||||
|
\pdfbookmark[chapter]{\contentsname}{toc}
|
||||||
\tableofcontents
|
\tableofcontents
|
||||||
\end{listof}
|
\end{listof}
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
@@ -5,12 +5,8 @@
|
|||||||
|
|
||||||
\bigskip
|
\bigskip
|
||||||
|
|
||||||
%This PhD manuscript shows how wearable haptics, worn on the outside of the hand, can improve direct hand interaction in immersive \AR by augmenting the perception of the virtual content and its manipulation.
|
In this manuscript thesis, we show how \AR headset, which integrates visual virtual content into the real world perception, and wearable haptics, which provide tactile sensations on the skin, can improve direct hand interaction with virtual and augmented objects.
|
||||||
In this manuscript thesis, we show how immersive \AR, which integrates visual virtual content into the real world perception, and wearable haptics, which provide tactile sensations on the skin, can improve the free and direct interaction of virtual objects with the hand.
|
|
||||||
Our goal is to enable users to perceive and interact with wearable visuo-haptic augmentations in a more realistic and effective way, as if they were real.
|
Our goal is to enable users to perceive and interact with wearable visuo-haptic augmentations in a more realistic and effective way, as if they were real.
|
||||||
%interaction of the hand with the virtual content.%, moving towards a seamless integration of the virtual into the real world.
|
|
||||||
%We are particularly interested in enabling direct contact of virtual and augmented objects with the bare hand.
|
|
||||||
%The aim of this thesis is to understand how immersive visual and wearable haptic augmentations complement each other in the context of direct hand perception and manipulation with virtual and augmented objects.
|
|
||||||
|
|
||||||
\section{Visual and Haptic Object Augmentations}
|
\section{Visual and Haptic Object Augmentations}
|
||||||
\label{visuo_haptic_augmentations}
|
\label{visuo_haptic_augmentations}
|
||||||
@@ -21,7 +17,6 @@ In daily life, \textbf{we simultaneously look at, touch and manipulate the every
|
|||||||
Many of these object properties can be perceived in a complementary way through all our sensory modalities, such as their shape or material \cite{baumgartner2013visual}.
|
Many of these object properties can be perceived in a complementary way through all our sensory modalities, such as their shape or material \cite{baumgartner2013visual}.
|
||||||
Vision often precedes touch, enabling us to anticipate the tactile sensations we will feel when touching the object \cite{yanagisawa2015effects}, \eg hardness or texture, and even to anticipate properties that we cannot see, \eg weight or temperature.
|
Vision often precedes touch, enabling us to anticipate the tactile sensations we will feel when touching the object \cite{yanagisawa2015effects}, \eg hardness or texture, and even to anticipate properties that we cannot see, \eg weight or temperature.
|
||||||
Information from different sensory sources can be complementary, redundant or contradictory \cite{ernst2004merging}.
|
Information from different sensory sources can be complementary, redundant or contradictory \cite{ernst2004merging}.
|
||||||
%This is why we sometimes want to touch an object to check one of its properties that we have seen and to compare or confront our visual and tactile sensations.
|
|
||||||
We then \textbf{instinctively construct a unified perception of the properties of the object} we are exploring and manipulating from our sensory modalities, as well as from the movement of our hand and fingers on the object \cite{ernst2002humans}.
|
We then \textbf{instinctively construct a unified perception of the properties of the object} we are exploring and manipulating from our sensory modalities, as well as from the movement of our hand and fingers on the object \cite{ernst2002humans}.
|
||||||
|
|
||||||
The sense of touch also allows us to perceive our environment, but also to represent ourselves in space and interact with the surrounding objects.
|
The sense of touch also allows us to perceive our environment, but also to represent ourselves in space and interact with the surrounding objects.
|
||||||
@@ -33,11 +28,11 @@ This rich and complex variety of actions and sensations makes it particularly \t
|
|||||||
|
|
||||||
\textbf{\emph{Haptics} is the study of the sense of touch and user interfaces that involve touch} \cite{klatzky2013haptic}.
|
\textbf{\emph{Haptics} is the study of the sense of touch and user interfaces that involve touch} \cite{klatzky2013haptic}.
|
||||||
Haptic devices can be categorized according to how they interface with the user: graspable, touchable and wearable, as illustrated in \figref{haptic-categories} \cite{culbertson2018haptics}.
|
Haptic devices can be categorized according to how they interface with the user: graspable, touchable and wearable, as illustrated in \figref{haptic-categories} \cite{culbertson2018haptics}.
|
||||||
\emph{Graspable interfaces} are the traditional haptic devices that are held in the hand.
|
\emph{Graspable} interfaces are the traditional haptic devices that are held in the hand.
|
||||||
They are either grounded devices that provide kinesthetic (force) feedback, \eg a robotic arm or a joystick, or ungrounded tactile devices, \eg a game controller or a smartphone.
|
They are either grounded devices that provide kinesthetic (force) feedback, \eg a robotic arm or a joystick, or ungrounded tactile devices, \eg a game controller or a smartphone.
|
||||||
\emph{Touchable interfaces} are actuated devices directly touched and that can dynamically change their shape or surface properties, such as hardness or friction, providing simultaneous kinesthetic and cutaneous feedback.
|
\emph{Touchable} interfaces are actuated devices directly touched and that can dynamically change their shape or surface properties, such as hardness or friction, providing simultaneous kinesthetic and cutaneous feedback.
|
||||||
However, graspable interfaces occupy the hand, preventing interaction with other objects, and touchable interfaces often involve cumbersome mechanisms and are by definition limited to their own working surface.
|
However, graspable interfaces occupy the hand, preventing interaction with other objects, and touchable interfaces often involve cumbersome mechanisms and are by definition limited to their own working surface.
|
||||||
Instead, \textbf{\emph{wearable interfaces} are directly mounted on the body} to provide cutaneous sensations on the skin in a portable way and \textbf{without restricting the user's movements} \cite{pacchierotti2017wearable}.
|
Instead, \textbf{\emph{wearable} interfaces are directly mounted on the body} to provide cutaneous sensations on the skin in a portable way and \textbf{without restricting the user's movements} \cite{pacchierotti2017wearable}.
|
||||||
|
|
||||||
\begin{subfigs}{haptic-categories}{
|
\begin{subfigs}{haptic-categories}{
|
||||||
Haptic devices can be divided into three categories according to their interface with the user:
|
Haptic devices can be divided into three categories according to their interface with the user:
|
||||||
@@ -53,8 +48,8 @@ Instead, \textbf{\emph{wearable interfaces} are directly mounted on the body} to
|
|||||||
|
|
||||||
A wide range of wearable haptic devices have been developed to provide the user with rich virtual tactile sensations, including normal force, skin stretch, vibration and thermal feedback.
|
A wide range of wearable haptic devices have been developed to provide the user with rich virtual tactile sensations, including normal force, skin stretch, vibration and thermal feedback.
|
||||||
\figref{wearable-haptics} shows some examples of different wearable haptic devices with different form factors and rendering capabilities.
|
\figref{wearable-haptics} shows some examples of different wearable haptic devices with different form factors and rendering capabilities.
|
||||||
Their portability, \ie their small form factor, light weight and unobtrusiveness, makes them particularly promising for everyday use in a variety of applications such as robotics, teleoperation, augmented and virtual realities, and social interaction \cite{pacchierotti2017wearable,culbertson2018haptics}.
|
Their portability, \ie their small form factor, light weight and unobtrusiveness, makes them particularly promising for everyday use in a variety of applications such as robotics, teleoperation, social interaction, and for improving hand interaction in \AR \cite{pacchierotti2017wearable,culbertson2018haptics}.
|
||||||
However, the \textbf{integration of wearable haptics with \AR has been little explored}. %, and few wearable haptic devices have specifically designed or experimentally tested for direct hand interaction in \AR.
|
However, the \textbf{integration of wearable haptics with \AR has been little explored}.
|
||||||
|
|
||||||
\begin{subfigs}{wearable-haptics}{
|
\begin{subfigs}{wearable-haptics}{
|
||||||
Wearable haptic devices can provide sensations on the skin as feedback to real or virtual objects being touched.
|
Wearable haptic devices can provide sensations on the skin as feedback to real or virtual objects being touched.
|
||||||
@@ -73,13 +68,16 @@ However, the \textbf{integration of wearable haptics with \AR has been little ex
|
|||||||
|
|
||||||
\subsectionstarbookmark{Augmented Reality Is Not Only Visual}
|
\subsectionstarbookmark{Augmented Reality Is Not Only Visual}
|
||||||
|
|
||||||
\textbf{\emph{Augmented Reality (\AR)} integrates virtual content into the real world perception, creating the illusion of a unique \emph{augmented environment}} \cite{azuma1997survey,skarbez2021revisiting}.
|
\textbf{\emph{Augmented Reality (\AR)} integrates virtual content into the real world perception, creating the illusion of a unique \emph{\AE}} \cite{azuma1997survey,skarbez2021revisiting}.
|
||||||
It thus promises natural and seamless interaction with physical and digital objects (and their combination) directly with our hands \cite{billinghurst2021grand}.
|
It thus promises natural and seamless interaction with physical and digital objects (and their combination) directly with our hands \cite{billinghurst2021grand}.
|
||||||
|
It could be used to render purely \emph{virtual objects} in the \RE, or to create \emph{augmented objects} by modifying the perception of a real object with virtual content, such as changing its shape or texture.
|
||||||
It is technically and conceptually closely related to \emph{\VR}, which completely replaces \emph{\RE} perception with a \emph{\VE}.
|
It is technically and conceptually closely related to \emph{\VR}, which completely replaces \emph{\RE} perception with a \emph{\VE}.
|
||||||
|
|
||||||
\AR and \VR can be placed on a reality-virtuality continuum, as proposed by \textcite{milgram1994taxonomy} and illustrated in \figref{rv-continuum}\footnote{On the original reality-virtuality continuum of \textcite{milgram1994taxonomy}, augmented virtuality is also considered, as the incorporation of real objects into a \VE, and is placed between \AR and \VR. For simplicity, we only consider \AR and \VR in this thesis.}.
|
\AR and \VR can be placed on a reality-virtuality continuum, as proposed by \textcite{milgram1994taxonomy} and illustrated in \figref{rv-continuum}.
|
||||||
It describes the degree of virtuality of the environment along an axis, with one end being \RE and the other end being pure \VE, \ie indistinguishable from the real world (as in \emph{The Matrix} movies).
|
\footnote{On the original reality-virtuality continuum of \textcite{milgram1994taxonomy}, augmented virtuality is also considered, as the incorporation of real objects into a \VE, and is placed between \AR and \VR. For simplicity, we only consider \AR and \VR in this thesis.}
|
||||||
Between these two extremes lies \MR, which includes \AR and \VR as different levels of mixing real and virtual environments \cite{skarbez2021revisiting}.\footnote{This is the original and classic definition of \MR, but there is still debate about how to define and characterize \AR and \MR experiences \cite{speicher2019what,skarbez2021revisiting}.}
|
It describes the degree of virtuality of the environment along an axis, with one end being \RE and the other end being pure \VE, \ie indistinguishable from the real world.
|
||||||
|
Between these two extremes lies \MR, which includes \AR and \VR as different levels of mixing real and virtual environments \cite{skarbez2021revisiting}.
|
||||||
|
\footnote{This is the original and classic definition of \MR, but there is still debate about how to define and characterize \AR and \MR experiences \cite{speicher2019what,skarbez2021revisiting}.}
|
||||||
|
|
||||||
\begin{subfigs}{rv-continuums}{Reality-virtuality continuums. }[][
|
\begin{subfigs}{rv-continuums}{Reality-virtuality continuums. }[][
|
||||||
\item For the visual sense, as originally proposed by and adapted from \textcite{milgram1994taxonomy}.
|
\item For the visual sense, as originally proposed by and adapted from \textcite{milgram1994taxonomy}.
|
||||||
@@ -89,28 +87,25 @@ Between these two extremes lies \MR, which includes \AR and \VR as different lev
|
|||||||
\subfig[0.49]{visuo-haptic-rv-continuum5}
|
\subfig[0.49]{visuo-haptic-rv-continuum5}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
%Concepts of virtuality and augmentation can also be applied for sensory modalities other than vision.
|
|
||||||
\textcite{jeon2009haptic} proposed to describe visuo-haptic \AR/\VR with two orthogonal reality-virtuality continuums, one for vision and one for touch, as shown in \figref{visuo-haptic-rv-continuum5}.
|
\textcite{jeon2009haptic} proposed to describe visuo-haptic \AR/\VR with two orthogonal reality-virtuality continuums, one for vision and one for touch, as shown in \figref{visuo-haptic-rv-continuum5}.
|
||||||
The combination of the two axes defines 9 types of visuo-haptic environments, with 3 possible levels of virtuality for each visual or haptic feedback: real, augmented and virtual.
|
The combination of the two axes defines 9 types of visuo-haptic environments, with 3 possible levels of virtuality for each visual or haptic feedback: real, augmented and virtual.
|
||||||
For example, (visual) \AR using a real object as a proxy to manipulate a virtual object is considered \emph{haptic reality} (\eg \figref{kahl2023using}; bottom middle cell in \figref{visuo-haptic-rv-continuum5}), whereas a device that provides synthetic haptic feedback when touching a virtual object is considered \emph{haptic virtuality} (\eg \figref{meli2018combining}; top middle cell in \figref{visuo-haptic-rv-continuum5}).
|
For example, visual \AR using a real object as a proxy to manipulate a virtual object is considered \emph{haptic reality} (\eg \figref{kahl2023using}; bottom middle cell in \figref{visuo-haptic-rv-continuum5}).
|
||||||
|
Conversely, a device that provides synthetic haptic feedback when touching a virtual object is considered \emph{haptic virtuality} (\eg \figref{meli2018combining}; top middle cell in \figref{visuo-haptic-rv-continuum5}).
|
||||||
\textbf{A \emph{haptic augmentation} is then the combination of real and virtual haptic stimuli}, such that the virtual haptic sensations modify the perception of the real object \cite{bhatia2024augmenting} (middle row in \figref{visuo-haptic-rv-continuum5}).
|
\textbf{A \emph{haptic augmentation} is then the combination of real and virtual haptic stimuli}, such that the virtual haptic sensations modify the perception of the real object \cite{bhatia2024augmenting} (middle row in \figref{visuo-haptic-rv-continuum5}).
|
||||||
%In particular, it has been implemented by augmenting the haptic perception of real objects by providing timely virtual tactile stimuli using wearable haptics:
|
|
||||||
\figref{salazar2020altering} shows an example of modifying the perceived stiffness of a real object in \VR using simultaneous wearable pressure feedback on the finger (left middle cell in \figref{visuo-haptic-rv-continuum5}).
|
\figref{salazar2020altering} shows an example of modifying the perceived stiffness of a real object in \VR using simultaneous wearable pressure feedback on the finger (left middle cell in \figref{visuo-haptic-rv-continuum5}).
|
||||||
\figref{bau2012revel} shows another example of visuo-haptic augmentation of virtual texture using reverse electrovibration when running the finger over a real surface (middle cell in the two axes in \figref{visuo-haptic-rv-continuum5}).
|
\figref{bau2012revel} shows another example of visuo-haptic augmentation of virtual texture using reverse electrovibration when running the finger over a real surface (middle cell in the two axes in \figref{visuo-haptic-rv-continuum5}).
|
||||||
|
|
||||||
In this thesis we call \AR/\VR \emph{systems} the computational set of hardware (input devices, sensors, displays and haptic devices) and software (tracking, simulation and rendering) that allows the user to interact with the \VE. % by implementing the interaction loop we proposed in \figref{interaction-loop}.
|
In this thesis we call \AR/\VR \emph{systems} the computational set of hardware (input devices, sensors, displays and haptic devices) and software (tracking, registration, simulation, and rendering) that allows the user to interact with the \VE.
|
||||||
Many \AR displays have been explored, from projection systems to hand-held displays.
|
Many \AR displays have been explored, from projection systems to hand-held displays.
|
||||||
\textbf{\AR headsets are the most promising display technology as they are portable and provide the user with an immersive augmented environment} \cite{hertel2021taxonomy}.
|
\textbf{\AR headsets are the most promising display technology because they create a portable experience that allows the user to navigate the \AE and interact with it directly using their hands} \cite{hertel2021taxonomy}.
|
||||||
Commercial headsets also have integrated real-time self-location and mapping of the \RE and hand pose estimation of the user.
|
While \AR and \VR systems can address any of the human senses, most focus only on visual augmentation \cites[p.144]{billinghurst2015survey}{kim2018revisiting}.
|
||||||
While \AR and \VR systems can address any of the human senses, most focus only on visual augmentation \cite[p.144]{billinghurst2015survey} and \cite{kim2018revisiting}.
|
|
||||||
%but the most \textbf{promising devices are \AR headsets}, which are \textbf{portable displays worn directly on the head}, providing the user with an \textbf{immersive visual augmented environment}.
|
|
||||||
|
|
||||||
\emph{Presence} is the illusion of \enquote{being there} when in \VR, or the illusion of the virtual content to \enquote{feel here} when in \AR \cite{slater2022separate,skarbez2021revisiting}.
|
\emph{Presence} is the illusion of \enquote{being there} when in \VR, or the illusion of the virtual content to \enquote{feel here} when in \AR \cite{slater2022separate,skarbez2021revisiting}.
|
||||||
One of the most important aspects of this illusion is the \emph{plausibility}, \ie the illusion that the virtual events are really happening. %, even if the user knows that they are not real.
|
One of the most important aspects of this illusion is the \emph{plausibility}, \ie the illusion that the virtual events are really happening.
|
||||||
However, when an \AR/\VR system lacks haptic feedback, it may create a deceptive and incomplete user experience when the hand reaches the virtual content.
|
However, \textbf{when an \AR/\VR headset lacks haptic feedback, it may create a deceptive and incomplete user experience when the hand reaches the visual virtual content}.
|
||||||
All (visual) virtual objects are inherently intangible and cannot physically constrain a user's hand, making it difficult to perceive their properties and interact with them with confidence and efficiency.
|
All visual virtual objects are inherently intangible and cannot physically constrain or provide touch feedback to the hand of the user.
|
||||||
It is also necessary to provide a haptic feedback that is coherent with the virtual objects and ensures the best possible user experience, as we argue in the next section.
|
This makes it difficult to perceive their properties and interact with them with confidence and efficiency.
|
||||||
The \textbf{integration of wearable haptics with immersive \AR appears to be one of the most promising solutions}, but it remains challenging due to their respective limitations and the additional constraints of combining them, as we will overview in the next section.
|
It is also necessary to provide a haptic feedback that is coherent with the virtual content and ensures the best possible user experience.
|
||||||
|
|
||||||
\begin{subfigs}{visuo-haptic-environments}{Visuo-haptic environments with varying degrees of reality-virtuality. }[][
|
\begin{subfigs}{visuo-haptic-environments}{Visuo-haptic environments with varying degrees of reality-virtuality. }[][
|
||||||
\item \AR environment with a real haptic object used as a proxy to manipulate a virtual object \cite{kahl2023using}.
|
\item \AR environment with a real haptic object used as a proxy to manipulate a virtual object \cite{kahl2023using}.
|
||||||
@@ -125,28 +120,48 @@ The \textbf{integration of wearable haptics with immersive \AR appears to be one
|
|||||||
\subfig{bau2012revel}
|
\subfig{bau2012revel}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
\section{Research Challenges of Wearable Visuo-Haptic Augmented Reality}
|
\subsectionstarbookmark{Visuo-Haptic Augmentations}
|
||||||
\label{research_challenges}
|
|
||||||
|
|
||||||
The integration of wearable haptics with \AR to create a visuo-haptic augmented environment is complex and presents many perceptual and interaction challenges.
|
Providing coherent and effective haptic feedback to visual virtual and augmented objects in \AR is complex.
|
||||||
% \ie sensing the augmented environment and acting effectively upon it.
|
To identify the challenges involved, we propose to \textbf{represent the user's experience with such a visuo-haptic \AE as an interaction loop}, shown in \figref{interaction-loop}.
|
||||||
In this thesis, we propose to \textbf{represent the user's experience with such a visuo-haptic augmented environment as an interaction loop}, shown in \figref{interaction-loop}.
|
|
||||||
It is based on the interaction loops of users with \ThreeD systems \cite[p.84]{laviolajr20173d}.
|
It is based on the interaction loops of users with \ThreeD systems \cite[p.84]{laviolajr20173d}.
|
||||||
The \RE and the user's hand are tracked in real time by sensors and reconstructed in visual and haptic \VEs.
|
The \RE and the user's hand are tracked in real time by sensors and reconstructed in visual and haptic \VEs.
|
||||||
The interactions between the virtual hand and objects are then simulated, and rendered as feedback to the user using a \AR/\VR headset and wearable haptics.
|
The interactions between the virtual hand and objects are then simulated, and rendered as feedback to the user using an \AR headset and wearable haptics.
|
||||||
Because the visuo-haptic \VE is displayed in real time and aligned with the \RE, the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the \RE.
|
It is important that the visuo-haptic \VE is registered with the \RE and rendered in real time.
|
||||||
|
This gives the user the illusion of directly perceiving and interacting with the virtual content as if it were part of the \RE.
|
||||||
|
|
||||||
\fig{interaction-loop}{The interaction loop between a user and a visuo-haptic augmented environment as proposed in this thesis.}[
|
\fig{interaction-loop}{The interaction loop between a user and a visuo-haptic \AE as proposed in this thesis.}[
|
||||||
A user interacts with the visual (in blue) and haptic (in red) \VEs through a virtual hand (in purple) interaction technique that tracks real hand movements and simulates contact with virtual objects.
|
A user interacts with the visual (in blue) and haptic (in red) \VEs through a virtual hand (in purple) interaction technique that tracks real hand movements and simulates contact with virtual objects.
|
||||||
The visual and haptic \VEs are rendered back using an immersive \AR headset and wearable haptics, and are perceived by the user to be registered and co-localized with the \RE (in gray).
|
The visual and haptic \VEs are rendered back using an \AR headset and wearable haptics, and are perceived by the user to be registered and co-localized with the \RE (in gray).
|
||||||
%\protect\footnotemark
|
%\protect\footnotemark
|
||||||
]
|
]
|
||||||
|
|
||||||
In this context, we focus on two main research challenges:
|
Implementing such an interaction loop with an \AR system involves design and technical problems \cite{jeon2015haptic,marchand2016pose}.
|
||||||
|
One of the \textbf{key issue is the \emph{registration} of the \VE with the \RE}.
|
||||||
|
It is the precise spatial and temporal alignment of the virtual content with the \RE such that a user perceives the virtual as part of the real world.
|
||||||
|
For visual \AR, a real camera is usually attached to the \AR display to estimate in real time the \emph{pose} (position and orientation) of the display within the \RE.
|
||||||
|
The virtual camera that captures the \VE is set to the same pose and intrinsic parameters (focal length, angle of view, distortion) as the real camera \cite{marchand2016pose}.
|
||||||
|
The user is then displayed with the combined images from the real and virtual cameras.
|
||||||
|
With haptic \AR, the haptic feedback must be similarly registered with the \RE \cite{jeon2015haptic}, \eg adding a virtual force in the same direction and at the same time as a real force when pressing a surface with a tool.
|
||||||
|
Depending on the type of haptic device and the haptic property to be rendered, this registration process involves different sensors, estimation techniques, and time requirements.
|
||||||
|
This can be difficult to achieve and remains one of the main research challenge in haptic \AR.
|
||||||
|
|
||||||
|
In addition, visual and haptic \textbf{\AR systems require models to simulate the interaction} with the \VE \cite{jeon2015haptic,marchand2016pose}.
|
||||||
|
As mentioned above, models of the \RE are needed to properly register the \VE, but also of the real objects to be augmented and manipulated.
|
||||||
|
Haptic \AR also often needs models of the user's contacts with the augmented objects.
|
||||||
|
Depending on the rendered haptic property and the required feedback fidelity, these can be complex estimates of the contacts (points, forces) and object properties (shape, material, mass, deformation, etc.).
|
||||||
|
Computational and rendering models are also needed to render the virtual visual and haptic stimuli to the user.
|
||||||
|
While \ThreeD visual rendering is mature, rendering haptic properties still faces many research challenges due to the complexity of the human touch \cite{pacchierotti2017wearable,culbertson2018haptics}.
|
||||||
|
However, a balance has to be found between the perceptual accuracy of all models used and the real-time constraints of the interaction loop.
|
||||||
|
|
||||||
|
\section{Research Challenges of Wearable Visuo-Haptic Augmented Reality}
|
||||||
|
\label{research_challenges}
|
||||||
|
|
||||||
|
The integration of wearable haptics with \AR headsets to create a visuo-haptic \AE is a promising solution, but presents many perceptual and interaction challenges.
|
||||||
|
In this thesis, we focus on two main research challenges:
|
||||||
\textbf{(I) providing plausible and coherent visuo-haptic augmentations}, and
|
\textbf{(I) providing plausible and coherent visuo-haptic augmentations}, and
|
||||||
\textbf{(II) enabling effective manipulation of the augmented environment}.
|
\textbf{(II) enabling effective manipulation of the \AE}.
|
||||||
Each of these challenges also raises numerous design, technical, perceptual and user experience issues specific to wearable haptics and immersive \AR.
|
Each of these challenges also raises numerous design, technical, perceptual and user experience issues specific to wearable haptics and \AR headsets.
|
||||||
%, as well as virtual rendering and user experience issues.% in integrating these two sensorimotor feedbacks into a coherent and seamless visuo-haptic augmented environment.
|
|
||||||
|
|
||||||
%\footnotetext{%
|
%\footnotetext{%
|
||||||
% The icons are \href{https://creativecommons.org/licenses/by/3.0/}{CC BY} licensed:
|
% The icons are \href{https://creativecommons.org/licenses/by/3.0/}{CC BY} licensed:
|
||||||
@@ -158,64 +173,59 @@ Each of these challenges also raises numerous design, technical, perceptual and
|
|||||||
\subsectionstarbookmark{Challenge I: Providing Plausible and Coherent Visuo-Haptic Augmentations}
|
\subsectionstarbookmark{Challenge I: Providing Plausible and Coherent Visuo-Haptic Augmentations}
|
||||||
|
|
||||||
\textbf{Many haptic devices have been designed and evaluated specifically for use in \VR}, providing the user with rich kinesthetic and tactile feedback on virtual objects, increasing the realism and effectiveness of interaction with them \cite{culbertson2018haptics}.
|
\textbf{Many haptic devices have been designed and evaluated specifically for use in \VR}, providing the user with rich kinesthetic and tactile feedback on virtual objects, increasing the realism and effectiveness of interaction with them \cite{culbertson2018haptics}.
|
||||||
Although closely related, \AR and \VR have key differences in their respective renderings that can affect user perception.
|
Although closely related, \AR and \VR headsets have key differences in their respective renderings that can affect user perception.
|
||||||
|
|
||||||
%As such, in \VR, visual sensations are particularly dominant in perception, and conflicts with haptic sensations are also specifically created to influence the user's perception, for example to create pseudo-haptic \cite{ujitoko2021survey} or haptic retargeting \cite{azmandian2016haptic} effects.
|
Many hand-held or wearable haptic devices take the form of controllers, gloves or exoskeletons, all of which cover the fingertips and are therefore not suitable for \AR headsets.
|
||||||
|
|
||||||
Many hand-held or wearable haptic devices take the form of controllers, gloves or exoskeletons, all of which cover the fingertips and are therefore not suitable for \AR.
|
|
||||||
The \textbf{user's hand must be free to touch and interact with the \RE while wearing a wearable haptic device}.
|
The \textbf{user's hand must be free to touch and interact with the \RE while wearing a wearable haptic device}.
|
||||||
Instead, it is possible to place the haptic actuator close to the point of contact with the \RE, \eg providing haptic feedback on the nail \cite{ando2007fingernailmounted,teng2021touch}, another phalanx \cite{asano2015vibrotactile,detinguy2018enhancing,salazar2020altering} or the wrist \cite{pezent2019tasbi,sarac2022perceived} for rendering fingertip contact with virtual content.
|
Instead, it is possible to place the haptic actuator close to the point of contact with the \RE, \eg providing haptic feedback on the nail \cite{ando2007fingernailmounted,teng2021touch}, another phalanx \cite{asano2015vibrotactile,detinguy2018enhancing,salazar2020altering} or the wrist \cite{pezent2019tasbi,sarac2022perceived} for rendering fingertip contact with virtual content.
|
||||||
Therefore, when touching a virtual or augmented object, \textbf{the real and virtual visual sensations are perceived as co-localized, but the virtual haptic feedback is not}.
|
Therefore, when touching a virtual or augmented object, \textbf{the real and virtual visual sensations are perceived as co-localized, but the virtual haptic feedback is not}.
|
||||||
It remains to be investigated how such potential discrepancies affect the overall perception to design visuo-haptic augmentations adapted to \AR.
|
Such potential visuo-haptic discrepancies may negatively affect the user's perception of the registration of the \VE with the \RE.
|
||||||
|
This remains to be investigated to understand how to design visuo-haptic augmentations adapted to \AR headsets.
|
||||||
|
|
||||||
%So far, most of the \AR studies and applications only add visual and haptic sensations to the user's overall perception of the environment, but conversely it is more difficult to remove sensations.
|
Therefore, the \textbf{added visual and haptic virtual sensations may also be perceived as incoherent} with each other, for example with a lower rendering quality, a temporal latency, a spatial misalignment, or a combination of these.
|
||||||
%Visual and haptic augmentations of the \RE add sensations to the user's overall perception.
|
It could also be caused by the limited rendering capabilities of wearable haptic devices \cite{pacchierotti2017wearable}.
|
||||||
The \textbf{added visual and haptic virtual sensations may also be perceived as incoherent} with the sensations of the real objects, for example with a lower rendering quality, a temporal latency, a spatial shift, or a combination of these.
|
Finally, the user can still see the \RE with an \AR headset, including their hands, augmented real objects and worn haptic devices, unlike \VR where there is total control over the visual rendering.
|
||||||
Moreover, in \AR the user can still see the real world environment, including their hands, augmented real objects and worn haptic devices, unlike \VR where there is total control over the visual rendering. % of the hand and \VE.
|
It is therefore unclear to what extent the real and virtual visuo-haptic sensations will be perceived as a whole, and to what extent they will conflict or complement each other.
|
||||||
It is therefore unclear to what extent the real and virtual visuo-haptic sensations will be perceived as a whole, and to what extent they will conflict or complement each other. % in the perception of the augmented environment.
|
With a better understanding of \textbf{how visual factors can influence the perception of haptic augmentations}, the many wearable haptic devices that already exist but have not yet been fully explored with \AR can be better applied, and new visuo-haptic augmentations adapted to \AR can be designed.
|
||||||
With a better understanding of \textbf{how visual factors can influence the perception of haptic augmentations}, the many wearable haptic systems that already exist but have not yet been fully explored with \AR can be better applied, and new visuo-haptic augmentations adapted to \AR can be designed.
|
|
||||||
|
|
||||||
\subsectionstarbookmark{Challenge II: Enabling Effective Manipulation of the Augmented Environment}
|
\subsectionstarbookmark{Challenge II: Enabling Effective Manipulation of the Augmented Environment}
|
||||||
|
|
||||||
Touching, \textbf{grasping and manipulating virtual objects are fundamental interactions for \AR} \cite{kim2018revisiting}, \VR \cite{bergstrom2021how} and \VEs in general \cite[p.385]{laviolajr20173d}.
|
Touching, \textbf{grasping and manipulating virtual objects are fundamental interactions for \AR} \cite{kim2018revisiting}, \VR \cite{bergstrom2021how} and \VEs in general \cite[p.385]{laviolajr20173d}.
|
||||||
In the previous challenge section, we described how the user's hand should be free to interact with the \RE using a wearable haptic device.
|
In the previous challenge section, we described how the user's hand should be free to interact with the \RE using a wearable haptic device.
|
||||||
We can then expect a seamless and direct manipulation of the virtual content with the hands, as if it were real.
|
We can then expect a seamless and direct manipulation of the virtual content with the hands, as if it were real.
|
||||||
%Since the hand is not occupied or covered with a haptic device so as to not impair interaction with the \RE, as described in the previous section, one can expect a seamless and direct manipulation of the hand with the virtual content as if it were real.
|
When touching a real object that is visually augmented, the user's hand is physically constrained by the real object, allowing for easy and natural interaction.
|
||||||
When touching a visually augmenting a real object, the user's hand is physically constrained by the object, allowing for easy and natural interaction.
|
|
||||||
However, \textbf{manipulating a purely virtual object with the bare hand can be challenging}, especially without good haptic feedback \cite{maisto2017evaluation,meli2018combining}. %, and one will rely on visual and haptic feedback to guide the interaction.
|
|
||||||
In addition, wearable haptic devices are limited to cutaneous feedback, and cannot provide forces to constrain the hand contact with the virtual object \cite{pacchierotti2017wearable}.
|
In addition, wearable haptic devices are limited to cutaneous feedback, and cannot provide forces to constrain the hand contact with the virtual object \cite{pacchierotti2017wearable}.
|
||||||
|
|
||||||
Current \AR systems have visual rendering limitations that also affect interaction with virtual objects. %, due to depth underestimation, a lack of mutual occlusions, and hand tracking latency.
|
Current \AR headsets have visual rendering limitations that also affect interaction with virtual objects.
|
||||||
\AR is the display of superimposed images of the virtual world, synchronized with the user's current view of the real world.
|
\AR is the display of superimposed images of the virtual world, synchronized with the user's current view of the real world.
|
||||||
However, the depth perception of virtual objects is often underestimated \cite{peillard2019studying,adams2022depth}.
|
However, the depth perception of virtual objects is often underestimated \cite{peillard2019studying,adams2022depth}.
|
||||||
There is also often \textbf{a lack of mutual occlusions between the hand and a virtual object}, that is the hand can hide the object or be hidden by the object \cite{macedo2023occlusion}.
|
There is also often \textbf{a lack of mutual occlusions between the hand and a virtual object}, that is the hand can hide the object or be hidden by the object \cite{macedo2023occlusion}.
|
||||||
Finally, as illustrated in our interaction loop \figref{interaction-loop}, interaction with a virtual object is an illusion, because the real hand controls in real time a virtual hand, like an avatar, whose contacts with virtual objects are then simulated in the \VE.
|
Finally, as illustrated in \figref{interaction-loop}, interaction with a virtual object is an illusion, because the real hand controls in real time a virtual hand, like an avatar, whose contacts with virtual objects are then simulated in the \VE.
|
||||||
Therefore, there is inevitably a latency between the movements of the real hand and the feedback movements of the virtual object, and a spatial shift between the real hand and the virtual hand, whose movements are constrained to the virtual object touched \cite{prachyabrued2014visual}.
|
Therefore, there is inevitably a latency between the movements of the real hand and the feedback movements of the virtual object, and a spatial misalignment between the real hand and the virtual hand, whose movements are constrained to the virtual object touched \cite{prachyabrued2014visual}.
|
||||||
These three rendering limitations make it \textbf{difficult to perceive the position of the fingers relative to the object} before touching or grasping it, but also to estimate the force required to grasp the virtual object and move it to a desired location.
|
These three rendering limitations make it \textbf{difficult to perceive the position of the fingers relative to the object} before touching or grasping it, but also to estimate the force required to grasp the virtual object and move it to a desired location.
|
||||||
|
|
||||||
Hence, it is necessary to provide feedback that allows the user to efficiently contact, grasp and manipulate a virtual object with the hand.
|
Hence, it is necessary to provide feedback that allows the user to efficiently contact, grasp and manipulate a virtual object with the hand.
|
||||||
Yet, it is unclear which type of visual and wearable haptic feedback, or their combination, is best suited to guide the manipulation of a virtual object. %, and whether one or the other of a combination of the two is most beneficial for users.
|
Yet, it is unclear which type of visual and wearable haptic feedback, or their combination, is best suited to guide the manipulation of a virtual object.
|
||||||
|
|
||||||
\section{Approach and Contributions}
|
\section{Approach and Contributions}
|
||||||
\label{contributions}
|
\label{contributions}
|
||||||
|
|
||||||
%The aim of this thesis is to understand how immersive visual and wearable haptic augmentations complement each other in the context of direct hand perception and manipulation with virtual and augmented objects.
|
As we described in \secref{research_challenges}, providing a coherent and effective visuo-haptic \AE to a user is complex and raises many issues.
|
||||||
As we described in \secref{research_challenges}, providing a coherent and effective visuo-haptic augmented environment to a user is complex and raises many issues.
|
|
||||||
Our approach is to:
|
Our approach is to:
|
||||||
\begin{enumerate*}[label=(\arabic*)]
|
\begin{enumerate*}[label=(\arabic*)]
|
||||||
\item design immersive and wearable visuo-haptic renderings that augment both the objects being interacted with and the hand interacting with them, and
|
\item design wearable visuo-haptic renderings that augment both the objects being interacted with and the hand interacting with them, and
|
||||||
\item evaluate in user studies how these visuo-haptic renderings affect the interaction of these objects with the hand using psychophysical, performance and user experience methods.
|
\item evaluate in user studies how these visuo-haptic renderings affect the interaction of these objects with the hand using psychophysical, performance and user experience methods.
|
||||||
\end{enumerate*}
|
\end{enumerate*}
|
||||||
|
|
||||||
We consider two main axes of research, each addressing one of the research challenges identified above:
|
We consider two main axes of research, each addressing one of the research challenges identified above:
|
||||||
\begin{enumerate*}[label=(\Roman*)]
|
\begin{enumerate*}[label=(\Roman*)]
|
||||||
\item \textbf{augmenting the visuo-haptic texture perception of real surfaces}, and % with visuo-haptic texture augmentations, and
|
\item \textbf{augmenting the visuo-haptic texture perception of real surfaces}, and
|
||||||
\item \textbf{improving the manipulation of virtual objects}.% with visuo-haptic augmentations of the hand-object interaction.
|
\item \textbf{improving the manipulation of virtual objects}.
|
||||||
\end{enumerate*}
|
\end{enumerate*}
|
||||||
Our contributions are summarized in \figref{contributions}.
|
Our contributions are summarized in \figref{contributions}.
|
||||||
|
|
||||||
\fig[0.95]{contributions}{Summary of our contributions through the simplified interaction loop.}[
|
\fig[0.95]{contributions}{Summary of our contributions through the simplified interaction loop.}[
|
||||||
The contributions are represented in dark grey boxes, the research axes in green circles. % and the research objectives in light green circles.
|
The contributions are represented in dark grey boxes, the research axes in green circles.
|
||||||
The first axis is \textbf{(I)} the design and evaluation of the perception of visuo-haptic texture augmentations of real surfaces, directly touched by the hand.
|
The first axis is \textbf{(I)} the design and evaluation of the perception of visuo-haptic texture augmentations of real surfaces, directly touched by the hand.
|
||||||
The second axis focuses on \textbf{(II)} improving the manipulation of virtual objects with the bare hand using visuo-haptic augmentations of the hand as interaction feedback.
|
The second axis focuses on \textbf{(II)} improving the manipulation of virtual objects with the bare hand using visuo-haptic augmentations of the hand as interaction feedback.
|
||||||
]
|
]
|
||||||
@@ -223,17 +233,14 @@ Our contributions are summarized in \figref{contributions}.
|
|||||||
\subsectionstarbookmark{Axis I: Augmenting the Texture Perception of Real Surfaces}
|
\subsectionstarbookmark{Axis I: Augmenting the Texture Perception of Real Surfaces}
|
||||||
|
|
||||||
Wearable haptic devices have proven effective in modifying the perception of a touched real surface, without altering the object or covering the fingertip, forming haptic augmentation \cite{bau2012revel,detinguy2018enhancing,salazar2020altering}.
|
Wearable haptic devices have proven effective in modifying the perception of a touched real surface, without altering the object or covering the fingertip, forming haptic augmentation \cite{bau2012revel,detinguy2018enhancing,salazar2020altering}.
|
||||||
%It is achieved by placing the haptic actuator close to the fingertip, to let it free to touch the surface, and rendering tactile stimuli timely synchronized with the finger movement.
|
|
||||||
%It enables rich haptic feedback as the combination of kinesthetic sensation from the real and cutaneous sensation from the actuator.
|
|
||||||
However, wearable haptic augmentation with \AR has been little explored, as well as the visuo-haptic augmentation of texture.
|
However, wearable haptic augmentation with \AR has been little explored, as well as the visuo-haptic augmentation of texture.
|
||||||
Texture is indeed one of the most fundamental perceived properties of a surface material \cite{hollins1993perceptual,okamoto2013psychophysical}, perceived equally well by sight and touch \cite{bergmanntiest2007haptic,baumgartner2013visual}, and one of the most studied haptic (only, without visual) augmentation \cite{unger2011roughness,culbertson2014modeling,asano2015vibrotactile,strohmeier2017generating,friesen2024perceived}.
|
Texture is indeed one of the most fundamental perceived properties of a surface material \cite{hollins1993perceptual,okamoto2013psychophysical}, perceived equally well by sight and touch \cite{bergmanntiest2007haptic,baumgartner2013visual}, and one of the most studied haptic (only, without visual) augmentation \cite{unger2011roughness,culbertson2014modeling,asano2015vibrotactile,strohmeier2017generating,friesen2024perceived}.
|
||||||
%Coherently substitute the visuo-haptic texture of a surface directly touched by a finger is an important step towards a \AR capable of visually and haptically augmenting the \RE of a user in a plausible way.
|
For this first axis of research, we propose to \textbf{design and evaluate the perception of wearable virtual visuo-haptic textures augmenting real surfaces}.
|
||||||
For this first axis of research, we propose to \textbf{design and evaluate the perception of wearable virtual visuo-haptic textures augmenting real surfaces}. %, using an immersive \AR headset and a wearable vibrotactile device.
|
|
||||||
To this end, we (1) design a system for rendering wearable visuo-haptic texture augmentations, to (2) evaluate how the perception of haptic texture augmentations is affected by visual feedback of the virtual hand and the environment, and (3) investigate the perception of co-localized visuo-haptic texture augmentations.
|
To this end, we (1) design a system for rendering wearable visuo-haptic texture augmentations, to (2) evaluate how the perception of haptic texture augmentations is affected by visual feedback of the virtual hand and the environment, and (3) investigate the perception of co-localized visuo-haptic texture augmentations.
|
||||||
|
|
||||||
First, an effective approach to render haptic textures is to generate a vibrotactile signal that represents the finger-texture interaction \cite{culbertson2014modeling,asano2015vibrotactile}.
|
First, an effective approach to render haptic textures is to generate a vibrotactile signal that represents the finger-texture interaction \cite{culbertson2014modeling,asano2015vibrotactile}.
|
||||||
Yet, to achieve natural interaction with the hand and coherent visuo-haptic feedback, it requires a real time rendering of the textures, no constraints on hand movements, and good synchronization between the visual and haptic feedback.
|
Yet, to achieve natural interaction with the hand and coherent visuo-haptic feedback, it requires a real time rendering of the textures, no constraints on hand movements, and good synchronization between the visual and haptic feedback.
|
||||||
Thus, our first objective is to \textbf{design an immersive, real time system} that allows free exploration of \textbf{wearable visuo-haptic texture augmentations} on real surfaces with the bare hand.
|
Thus, our first objective is to \textbf{design a real time system} that allows free exploration of \textbf{wearable visuo-haptic texture augmentations} on real surfaces with the bare hand.
|
||||||
This will form the basis of the next two chapters in this section.
|
This will form the basis of the next two chapters in this section.
|
||||||
|
|
||||||
Second, many works have investigated the haptic augmentations of texture, but none have integrated them with \AR and \VR, or considered the influence of visual feedback on their perception.
|
Second, many works have investigated the haptic augmentations of texture, but none have integrated them with \AR and \VR, or considered the influence of visual feedback on their perception.
|
||||||
@@ -241,23 +248,21 @@ Still, it is known that visual feedback can alter the perception of real and vir
|
|||||||
Hence, our second objective is to \textbf{evaluate how the perception of wearable haptic texture augmentation is affected by the visual feedback of the virtual hand and the environment} (real, augmented or virtual).
|
Hence, our second objective is to \textbf{evaluate how the perception of wearable haptic texture augmentation is affected by the visual feedback of the virtual hand and the environment} (real, augmented or virtual).
|
||||||
|
|
||||||
Finally, visuo-haptic texture databases have been created from real texture captures \cite{culbertson2014penn,balasubramanian2024sens3} to be rendered as virtual textures with hand-held haptic devices that are perceived as similar to real textures \cite{culbertson2015should,friesen2024perceived}.
|
Finally, visuo-haptic texture databases have been created from real texture captures \cite{culbertson2014penn,balasubramanian2024sens3} to be rendered as virtual textures with hand-held haptic devices that are perceived as similar to real textures \cite{culbertson2015should,friesen2024perceived}.
|
||||||
However, the rendering of these textures in an immersive and natural visuo-haptic \AR using wearable haptics remains to be investigated.
|
However, the rendering of these textures with and \AR headset and wearable haptics remains to be investigated.
|
||||||
Our third objective is to \textbf{evaluate the perception of simultaneous and co-localized wearable visuo-haptic texture augmentations} of real surfaces in \AR. %, and to understand to what extent each sensory modality contributes to the overall perception of the augmented texture.
|
Our third objective is to \textbf{evaluate the perception of simultaneous and co-localized wearable visuo-haptic texture augmentations} of real surfaces in \AR.
|
||||||
|
|
||||||
\subsectionstarbookmark{Axis II: Improving the Manipulation of Virtual Objects}
|
\subsectionstarbookmark{Axis II: Improving the Manipulation of Virtual Objects}
|
||||||
|
|
||||||
In immersive and wearable visuo-haptic \AR, the hand is free to touch and interact seamlessly with real, augmented and virtual objects.
|
With wearable visuo-haptic \AR, the hand is free to touch and interact seamlessly with real, augmented and virtual objects.
|
||||||
Hence, a user can expect natural and direct contact and manipulation of virtual objects with the bare hand.
|
Hence, a user can expect natural and direct contact and manipulation of virtual objects with the bare hand.
|
||||||
However, the intangibility of the visual \VE, the display limitations of current visual \OST-\AR systems and the inherent spatial and temporal discrepancies between the user's hand actions and the visual feedback in the \VE can make interaction with virtual objects particularly challenging.
|
However, the intangibility of the visual \VE, the display limitations of current \AR headsets and the inherent spatial and temporal discrepancies between the user's hand actions and the visual feedback in the \VE can make interaction with virtual objects particularly challenging.
|
||||||
%However, the intangibility of the virtual visual environment, the lack of kinesthetic feedback of wearable haptics, the visual rendering limitations of current \AR systems, as well as the spatial and temporal discrepancies between the \RE, the visual feedback, and the haptic feedback, can make the interaction with virtual objects with bare hands particularly challenging.
|
Two particular sensory feedbacks are known to improve such direct virtual object manipulation, but have not been properly investigated with \AR headsets: visual feedback of the virtual hand \cite{piumsomboon2014graspshell,prachyabrued2014visual} and delocalized haptic feedback \cite{lopes2018adding,teng2021touch}.
|
||||||
Two particular sensory feedbacks are known to improve such direct virtual object manipulation, but have not been properly investigated in immersive \AR: visual feedback of the virtual hand \cite{piumsomboon2014graspshell,prachyabrued2014visual} and delocalized haptic feedback \cite{lopes2018adding,teng2021touch}.
|
For this second axis of research, we propose to \textbf{design and evaluate visuo-haptic augmentations of the hand as interaction feedback with virtual objects} in \AR.
|
||||||
For this second axis of research, we propose to \textbf{design and evaluate visuo-haptic augmentations of the hand as interaction feedback with virtual objects} in immersive \OST-\AR.
|
|
||||||
We consider the effect on user performance and experience of (1) the visual feedback of the virtual hand as augmentation of the real hand and (2) different delocalized haptic feedback of virtual object manipulation with the hand in combination with visual hand augmentations.
|
We consider the effect on user performance and experience of (1) the visual feedback of the virtual hand as augmentation of the real hand and (2) different delocalized haptic feedback of virtual object manipulation with the hand in combination with visual hand augmentations.
|
||||||
|
|
||||||
First, the visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
|
First, the visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
|
||||||
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in an immersive context of virtual object manipulation \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}. % with the bare hand.% from simulating mutual occlusions between the hand and virtual objects \cite{piumsomboon2014graspshell,al-kalbani2016analysis} to displaying the virtual hand as an avatar overlay \cite{blaga2017usability,yoon2020evaluating}, augmenting the real hand.
|
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in a context of virtual object manipulation with a headset \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}.
|
||||||
\OST-\AR also has significant perceptual differences from \VR due to the visibility of the real hand and environment, which can affect user experience and performance \cite{yoon2020evaluating}.
|
\AR headsets also has significant perceptual differences from \VR due to the visibility of the real hand and environment, which can affect user experience and performance \cite{yoon2020evaluating}.
|
||||||
%, and these visual hand augmentations have not been evaluated .
|
|
||||||
Thus, our fourth objective is to \textbf{investigate the visual feedback of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects.
|
Thus, our fourth objective is to \textbf{investigate the visual feedback of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects.
|
||||||
|
|
||||||
Second, as described above, the haptic actuators need to be moved away from the fingertips to not impair the hand movements, sensations and interactions with the \RE.
|
Second, as described above, the haptic actuators need to be moved away from the fingertips to not impair the hand movements, sensations and interactions with the \RE.
|
||||||
@@ -268,8 +273,6 @@ Our last objective is to \textbf{investigate the delocalized haptic feedback of
|
|||||||
\section{Thesis Overview}
|
\section{Thesis Overview}
|
||||||
\label{thesis_overview}
|
\label{thesis_overview}
|
||||||
|
|
||||||
%This thesis is divided in four parts.
|
|
||||||
%In \textbf{\partref{background}}, we first describe the context and background of our research, within which
|
|
||||||
With this current \textit{Introduction} chapter, we have presented the research challenges, objectives, approach and contributions of this thesis.
|
With this current \textit{Introduction} chapter, we have presented the research challenges, objectives, approach and contributions of this thesis.
|
||||||
|
|
||||||
In \textbf{\chapref{related_work}}, we then review previous work on the perception and manipulation of virtual and augmented objects, directly with the hand, using either wearable haptics, \AR, or their combination.
|
In \textbf{\chapref{related_work}}, we then review previous work on the perception and manipulation of virtual and augmented objects, directly with the hand, using either wearable haptics, \AR, or their combination.
|
||||||
@@ -284,10 +287,10 @@ We then address each of our two research axes in a dedicated part.
|
|||||||
In \textbf{\partref{perception}}, we present our contributions to the first axis of research: modifying the visuo-haptic texture perception of real surfaces.
|
In \textbf{\partref{perception}}, we present our contributions to the first axis of research: modifying the visuo-haptic texture perception of real surfaces.
|
||||||
We evaluate how the visual feedback of the hand (real or virtual), the environment (\AR or \VR) and the textures (coherent, different or not shown) affect the perception of virtual vibrotactile textures rendered on real surfaces and touched directly with the index finger.
|
We evaluate how the visual feedback of the hand (real or virtual), the environment (\AR or \VR) and the textures (coherent, different or not shown) affect the perception of virtual vibrotactile textures rendered on real surfaces and touched directly with the index finger.
|
||||||
|
|
||||||
In \textbf{\chapref{vhar_system}}, we design and implement a system for rendering visuo-haptic virtual textures that augment real surfaces. %, using an immersive \OST-\AR headset and a wearable vibrotactile device.
|
In \textbf{\chapref{vhar_system}}, we design and implement a system for rendering visuo-haptic virtual textures that augment real surfaces.
|
||||||
The haptic textures represent a periodical patterned texture rendered by a wearable vibrotactile actuator worn on the middle phalanx of the finger touching the surface.
|
The haptic textures represent a periodical patterned texture rendered by a wearable vibrotactile actuator worn on the middle phalanx of the finger touching the surface.
|
||||||
The pose estimation of the real hand and the environment is achieved using a vision-based technique.
|
The pose estimation of the real hand and the environment is achieved using a vision-based technique.
|
||||||
The visual rendering is done using the immersive \OST-\AR headset Microsoft HoloLens~2.
|
The visual rendering is done using the \OST-\AR headset Microsoft HoloLens~2.
|
||||||
The system allows free visual and haptic exploration of the textures, as if they were real, and forms the basis of the next two chapters.
|
The system allows free visual and haptic exploration of the textures, as if they were real, and forms the basis of the next two chapters.
|
||||||
|
|
||||||
In \textbf{\chapref{xr_perception}}, we investigate in a psychophysical user study how different the perception of haptic texture augmentations is in \AR \vs \VR and when touched by a virtual hand \vs one's own hand.
|
In \textbf{\chapref{xr_perception}}, we investigate in a psychophysical user study how different the perception of haptic texture augmentations is in \AR \vs \VR and when touched by a virtual hand \vs one's own hand.
|
||||||
@@ -298,12 +301,12 @@ The virtual textures are paired visual and haptic captures of real surfaces \cit
|
|||||||
Our objective is to assess the perceived realism, coherence and roughness of the combination of nine representative visuo-haptic texture pairs.
|
Our objective is to assess the perceived realism, coherence and roughness of the combination of nine representative visuo-haptic texture pairs.
|
||||||
|
|
||||||
\noindentskip
|
\noindentskip
|
||||||
In \textbf{\partref{manipulation}}, we describe our contributions to the second axis of research: improving direct hand manipulation of virtual objects using visuo-haptic augmentations of the hand as interaction feedback with virtual objects in immersive \OST-\AR.
|
In \textbf{\partref{manipulation}}, we describe our contributions to the second axis of research: improving direct hand manipulation of virtual objects using visuo-haptic augmentations of the hand as interaction feedback with virtual objects in \AR.
|
||||||
|
|
||||||
In \textbf{\chapref{visual_hand}}, we investigate in a user study six visual feedback as hand augmentations, as a set of the most popular hand augmentation in the \AR literature.
|
In \textbf{\chapref{visual_hand}}, we investigate in a user study six visual feedback as hand augmentations, as a set of the most popular hand augmentation in the \AR literature.
|
||||||
Using the \OST-\AR headset Microsoft HoloLens~2, we evaluate their effect on user performance and experience in two representative manipulation tasks: push-and-slide and grasp-and-place a virtual object directly with the hand.
|
Using the \OST-\AR headset Microsoft HoloLens~2, we evaluate their effect on user performance and experience in two representative manipulation tasks: push-and-slide and grasp-and-place a virtual object directly with the hand.
|
||||||
|
|
||||||
In \textbf{\chapref{visuo_haptic_hand}}, we evaluate in a user study delocalized haptic feedback of hand manipulation with virtual object using two vibrotactile contact techniques provided at five different positionings on the hand. %, as haptic rendering of virtual object manipulation with the hand.
|
In \textbf{\chapref{visuo_haptic_hand}}, we evaluate in a user study delocalized haptic feedback of hand manipulation with virtual object using two vibrotactile contact techniques provided at five different positionings on the hand.
|
||||||
They are compared with the two most representative visual hand augmentations from the previous chapter, resulting in twenty visuo-haptic hand feedbacks that are evaluated within the same experimental setup and design.
|
They are compared with the two most representative visual hand augmentations from the previous chapter, resulting in twenty visuo-haptic hand feedbacks that are evaluated within the same experimental setup and design.
|
||||||
|
|
||||||
\noindentskip
|
\noindentskip
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
\section{Perception and Interaction with the Hand}
|
\section{Perception and Interaction with the Hand}
|
||||||
\label{haptic_hand}
|
\label{haptic_hand}
|
||||||
|
|
||||||
% describe how the hand senses and acts on its environment to perceive the haptic properties of real everyday objects.
|
|
||||||
|
|
||||||
The haptic sense has specific characteristics that make it unique in regard to other senses.
|
The haptic sense has specific characteristics that make it unique in regard to other senses.
|
||||||
It enables us to perceive a large diversity of properties of everyday objects, up to a complex combination of sensations produced by numerous sensory receptors distributed throughout the body, but especially in the hand \cite{johansson2009coding}.
|
It enables us to perceive a large diversity of properties of everyday objects, up to a complex combination of sensations produced by numerous sensory receptors distributed throughout the body, but especially in the hand \cite{johansson2009coding}.
|
||||||
It also allows us to act on these objects, to come into contact with them, to grasp them and to actively explore them. % , and to manipulate them.
|
It also allows us to act on these objects, to come into contact with them, to grasp them and to actively explore them.
|
||||||
%This implies that the haptic perception is localized at the points of contact between the hand and the environment, \ie we cannot haptically perceive an object without actively touching it.
|
|
||||||
These two mechanisms, \emph{action} and \emph{perception}, are closely associated and both are essential to form a complete haptic experience of interacting with the environment using the hand \cite{lederman2009haptic}.
|
These two mechanisms, \emph{action} and \emph{perception}, are closely associated and both are essential to form a complete haptic experience of interacting with the environment using the hand \cite{lederman2009haptic}.
|
||||||
|
|
||||||
\subsection{The Haptic Sense}
|
\subsection{The Haptic Sense}
|
||||||
@@ -91,13 +88,13 @@ As illustrated in \figref{sensorimotor_continuum}, \textcite{jones2006human} del
|
|||||||
\item \emph{Gestures}, or non-prehensible skilled movements, are motor activities without constant contact with an object. Examples include pointing at a target, typing on a keyboard, accompanying speech with gestures, or signing in sign language \cite{yoon2020evaluating}.
|
\item \emph{Gestures}, or non-prehensible skilled movements, are motor activities without constant contact with an object. Examples include pointing at a target, typing on a keyboard, accompanying speech with gestures, or signing in sign language \cite{yoon2020evaluating}.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\fig[0.65]{sensorimotor_continuum}{ The sensorimotor continuum of the hand function proposed by and adapted from \textcite{jones2006human}.}[%
|
\fig[0.65]{sensorimotor_continuum}{ The sensorimotor continuum of the hand function proposed by and adapted from \textcite{jones2006human}.}[
|
||||||
Functions of the hand are classified into four categories based on the relative importance of sensory and motor components.
|
Functions of the hand are classified into four categories based on the relative importance of sensory and motor components.
|
||||||
\protect\footnotemark
|
\protect\footnotemark
|
||||||
]
|
]
|
||||||
|
|
||||||
This classification has been further refined by \textcite{bullock2013handcentric} into 15 categories of possible hand interactions with an object.
|
This classification has been further refined by \textcite{bullock2013handcentric} into 15 categories of possible hand interactions with an object.
|
||||||
In this thesis, we are interested in exploring visuo-haptic texture augmentations (\partref{perception}) and grasping of virtual objects (\partref{manipulation}) using immersive \AR and wearable haptics.
|
In this thesis, we are interested in exploring visuo-haptic texture augmentations (\partref{perception}) and grasping of virtual objects (\partref{manipulation}) using an \AR headset and wearable haptics.
|
||||||
|
|
||||||
\subsubsection{Hand Anatomy and Motion}
|
\subsubsection{Hand Anatomy and Motion}
|
||||||
\label{hand_anatomy}
|
\label{hand_anatomy}
|
||||||
@@ -115,8 +112,8 @@ Each finger is formed by a chain of 3 phalanges, proximal, middle and distal, ex
|
|||||||
The joints at the base of each phalanx allow flexion and extension, \ie folding and unfolding movements relative to the preceding bone.
|
The joints at the base of each phalanx allow flexion and extension, \ie folding and unfolding movements relative to the preceding bone.
|
||||||
The proximal phalanges can also adduct and abduct, \ie move the fingers towards and away from each other.
|
The proximal phalanges can also adduct and abduct, \ie move the fingers towards and away from each other.
|
||||||
Finally, the metacarpal of the thumb is capable of flexion/extension and adduction/abduction, which allows the thumb to oppose the other fingers.
|
Finally, the metacarpal of the thumb is capable of flexion/extension and adduction/abduction, which allows the thumb to oppose the other fingers.
|
||||||
These axes of movement are called DoFs and can be represented by a \emph{kinematic model} of the hand with 27 DoFs as shown in \figref{blausen2014medical_hand}.
|
These axes of movement are called \DoFs and can be represented by a \emph{kinematic model} of the hand with 27 \DoFs as shown in \figref{blausen2014medical_hand}.
|
||||||
Thus, the thumb has 5 DoFs, each of the other four fingers has 4 DoFs and the wrist has 6 DoFs and can take any position (3 DoFs) or orientation (3 DoFs) in space \cite{erol2007visionbased}.
|
Thus, the thumb has 5 \DoFs, each of the other four fingers has 4 \DoFs and the wrist has 6 \DoFs and can take any position (3 \DoFs) or orientation (3 \DoFs) in space \cite{erol2007visionbased}.
|
||||||
|
|
||||||
This complex structure enables the hand to perform a wide range of movements and gestures. However, the way we explore and grasp objects follows simpler patterns, depending on the object being touched and the aim of the interaction.
|
This complex structure enables the hand to perform a wide range of movements and gestures. However, the way we explore and grasp objects follows simpler patterns, depending on the object being touched and the aim of the interaction.
|
||||||
|
|
||||||
@@ -188,8 +185,7 @@ But when running the finger over the surface with a lateral movement (\secref{ex
|
|||||||
In particular, when the asperities are smaller than \qty{0.1}{mm}, such as paper fibers, the pressure cues are no longer captured and only the movement, \ie the vibrations, can be used to detect the roughness \cite{hollins2000evidence}.
|
In particular, when the asperities are smaller than \qty{0.1}{mm}, such as paper fibers, the pressure cues are no longer captured and only the movement, \ie the vibrations, can be used to detect the roughness \cite{hollins2000evidence}.
|
||||||
This limit distinguishes \emph{macro-roughness} from \emph{micro-roughness}.
|
This limit distinguishes \emph{macro-roughness} from \emph{micro-roughness}.
|
||||||
|
|
||||||
%The physical properties of the surface determine the haptic perception of roughness.
|
The perception of roughness can be characterized by the density of the surface elements: the perceived (subjective) intensity of roughness increases with the spacing between the elements.
|
||||||
The perception of roughness can be characterized by the density of the surface elements: the perceived (subjective) intensity of roughness increases with the spacing between the elements. %, for macro-roughness \cite{klatzky2003feeling,lawrence2007haptic} and micro-roughness \cite{bensmaia2003vibrations}.
|
|
||||||
For macro-textures, the size of the elements, the force applied and the speed of exploration have limited effects on the intensity perceived \cite{klatzky2010multisensory}: macro-roughness is a \emph{spatial perception}.
|
For macro-textures, the size of the elements, the force applied and the speed of exploration have limited effects on the intensity perceived \cite{klatzky2010multisensory}: macro-roughness is a \emph{spatial perception}.
|
||||||
This allows us to read Braille \cite{lederman2009haptic}.
|
This allows us to read Braille \cite{lederman2009haptic}.
|
||||||
However, the speed of exploration affects the perceived intensity of micro-roughness \cite{bensmaia2003vibrations}.
|
However, the speed of exploration affects the perceived intensity of micro-roughness \cite{bensmaia2003vibrations}.
|
||||||
@@ -279,99 +275,10 @@ Stiffness depends on the structure of the object: a thick object can be more com
|
|||||||
\subfig[.45]{bergmanntiest2009cues}
|
\subfig[.45]{bergmanntiest2009cues}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
%\textcite{bergmanntiest2009cues} showed how of these two physical measures in the perception of hardness.
|
|
||||||
An object with low stiffness, but high Young's modulus can be perceived as hard, and vice versa, as shown in \figref{bergmanntiest2009cues}.
|
An object with low stiffness, but high Young's modulus can be perceived as hard, and vice versa, as shown in \figref{bergmanntiest2009cues}.
|
||||||
With finger pressure, a relative difference (the \emph{Weber fraction}) of \percent{\sim 15} is required to discriminate between two objects of different stiffness or elasticity.
|
With finger pressure, a relative difference (the \emph{Weber fraction}) of \percent{\sim 15} is required to discriminate between two objects of different stiffness or elasticity.
|
||||||
However, in the absence of pressure sensations (by placing a thin disc between the finger and the object), the necessary relative difference becomes much larger (Weber fraction of \percent{\sim 50}).
|
However, in the absence of pressure sensations (by placing a thin disc between the finger and the object), the necessary relative difference becomes much larger (Weber fraction of \percent{\sim 50}).
|
||||||
That is, \textcite{bergmanntiest2009cues} showed the perception of hardness relies on \percent{90} on surface deformation cues and \percent{10} on displacement cues.
|
That is, \textcite{bergmanntiest2009cues} showed the perception of hardness relies on \percent{90} on surface deformation cues and \percent{10} on displacement cues.
|
||||||
%Finally, when pressing with the finger, the perceived hardness intensity $h$ follows a power law with the stiffness $k$ \cite{harper1964subjective}:
|
|
||||||
%\begin{equation}{hardness_intensity}
|
|
||||||
% h = k^{0.8}
|
|
||||||
%\end{equation}
|
|
||||||
|
|
||||||
%En pressant du doigt, l'intensité perçue (subjective) de dureté suit avec la raideur une relation selon une loi de puissance avec un exposant de \num{0.8} \cite{harper1964subjective}, \ie quand la raideur double, la dureté perçue augmente de \num{1.7}.
|
|
||||||
%\textcite{bergmanntiest2009cues} ont ainsi observé une relation quadratique d'égale intensité perçue de dureté, comme illustré sur la \figref{bergmanntiest2009cues}.
|
|
||||||
|
|
||||||
%\subsubsection{Friction}
|
|
||||||
%\label{friction}
|
|
||||||
%
|
|
||||||
%Friction (or slipperiness) is the perception of \emph{resistance to movement} on a surface \cite{bergmanntiest2010tactual}.
|
|
||||||
%Sandpaper is typically perceived as sticky because it has a strong resistance to sliding on its surface, while glass is perceived as more slippery.
|
|
||||||
%This perceptual property is closely related to the perception of roughness \cite{hollins1993perceptual,baumgartner2013visual}.
|
|
||||||
%
|
|
||||||
%When running the finger on a surface with a lateral movement (\secref{exploratory_procedures}), the skin-surface contacts generate frictional forces in the opposite direction to the finger movement, giving kinesthetic cues, and also stretch the skin, giving cutaneous cues.
|
|
||||||
%As illustrated in \figref{smith1996subjective_1}, a stick-slip phenomenon can also occur, where the finger is intermittently slowed by friction before continuing to move, on both rough and smooth surfaces \cite{derler2013stick}.
|
|
||||||
%The amplitude of the frictional force $F_s$ is proportional to the normal force of the finger $F_n$, \ie the force perpendicular to the surface, according to a coefficient of friction $\mu$:
|
|
||||||
%\begin{equation}{friction}
|
|
||||||
% F_s = \mu \, F_n
|
|
||||||
%\end{equation}
|
|
||||||
%The perceived intensity of friction is thus roughly related to the friction coefficient $\mu$ \cite{smith1996subjective}.
|
|
||||||
%However, it is a complex perception because it is more determined by the micro-scale interactions between the surface and the skin: It depends on many factors such as the normal force applied, the speed of movement, the contact area and the moisture of the skin and the surface \cite{adams2013finger,messaoud2016relation}.
|
|
||||||
%In this sense, the perception of friction is still poorly understood \cite{okamoto2013psychophysical}.
|
|
||||||
%
|
|
||||||
%\begin{subfigs}{smith1996subjective}{Perceived intensity of friction of different materials by active exploration with the finger \cite{smith1996subjective}. }[
|
|
||||||
% \item Measurements of normal $F_n$ and tangential $F_t$ forces when exploring two surfaces: one smooth (glass) and one rough (nyloprint). The fluctuations in the tangential force are due to the stick-slip phenomenon. The coefficient of friction $\mu$ can be estimated as the slope of the relationship between the normal and tangential forces.
|
|
||||||
% \item Perceived friction intensity (vertical axis) as a function of the estimated friction coefficient $\mu$ of the exploration (horizontal axis) for four materials (shapes and colors).
|
|
||||||
% ]
|
|
||||||
% \subfigsheight{55mm}
|
|
||||||
% \subfig{smith1996subjective_1}
|
|
||||||
% \subfig{smith1996subjective_2}
|
|
||||||
%\end{subfigs}
|
|
||||||
%
|
|
||||||
%Yet, it is a fundamental perception for grasping and manipulating objects.
|
|
||||||
%The forces of friction make it indeed possible to hold the object firmly in the hand and prevent it from slipping
|
|
||||||
%The perception of friction also allows us to automatically and very quickly adjust the force we apply to the object in order to grasp it \cite{johansson1984roles}.
|
|
||||||
%If the finger is anaesthetized, the lack of cutaneous sensation prevents effective adjustment of the gripping force: the forces of the object on the finger are no longer correctly perceived, and the fingers then press harder on the object in compensation, but without achieving good opposition of the fingers \cite{witney2004cutaneous}.
|
|
||||||
|
|
||||||
%\subsubsection{Temperature}
|
|
||||||
%\label{temperature}
|
|
||||||
%
|
|
||||||
%Temperature (or coldness/warmness) is the perception of the \emph{transfer of heat} between the touched surface and the skin \cite{bergmanntiest2010tactual}:
|
|
||||||
%When heat is removed from (added to) the skin, the surface is perceived as cold (hot).
|
|
||||||
%Metal will be perceived as colder than wood at the same room temperature: This perception is different from the physical temperature of the material and is therefore an important property for distinguishing between materials \cite{ho2006contribution}.
|
|
||||||
%This perception depends on the thermal conductivity and heat capacity of the material, the volume of the object, the initial temperature difference and the area of contact between the surface and the skin \cite{kappers2013haptic}.
|
|
||||||
%For example, a larger object or a smoother surface, which increases the contact area, causes more heat circulation and a more intense temperature sensation (hot or cold) \cite{bergmanntiest2008thermosensory}.
|
|
||||||
|
|
||||||
%Parce qu'elle est basée sur la circulation de la chaleur, la perception de la température est plus lente que les autres propriétés matérielles et demande un toucher statique (voir \figref{exploratory_procedures}) de plusieurs secondes pour que la température de la peau s'équilibre avec celle de l'objet.
|
|
||||||
%La température $T(t)$ du doigt à l'instant $t$ et au contact avec une surface suit une loi décroissante exponentielle, où $T_s$ est la température initiale de la peau, $T_e$ est la température de la surface, $t$ est le temps et $\tau$ est la constante de temps:
|
|
||||||
%\begin{equation}{temperature}
|
|
||||||
% T(t) = (T_s - T_e) \, e^{-t / \tau} + T_e
|
|
||||||
%\end{equation}
|
|
||||||
%Le taux de transfert de chaleur, décrit par $\tau$, et l'écart de température $T_s - T_e$, sont les deux indices essentiels pour la perception de la température.
|
|
||||||
%Dans des conditions de la vie de tous les jours, avec une température de la pièce de \qty{20}{\celsius}, une différence relative du taux de transfert de chaleur de \percent{43} ou un écart de \qty{2}{\celsius} est nécessaire pour percevoir une différence de température \cite{bergmanntiest2009tactile}.
|
|
||||||
|
|
||||||
%\subsubsection{Spatial Properties}
|
|
||||||
%\label{spatial_properties}
|
|
||||||
|
|
||||||
%Weight, size and shape are haptic spatial properties that are independent of the material properties described above.
|
|
||||||
|
|
||||||
%Weight (or heaviness/lightness) is the perceived \emph{mass} of the object \cite{bergmanntiest2010haptic}.
|
|
||||||
%It is typically estimated by holding the object statically in the palm of the hand to feel the gravitational force (\secref{exploratory_procedures}).
|
|
||||||
%A relative weight difference of \percent{8} is then required to be perceptible \cite{brodie1985jiggling}.
|
|
||||||
%By lifting the object, it is also possible to feel the object's force of inertia, \ie its resistance to velocity.
|
|
||||||
%This provides an additional perceptual cue to its mass and slightly improves weight discrimination.
|
|
||||||
%For both gravity and inertia, kinesthetic cues to force are much more important than cutaneous cues to pressure \cite{bergmanntiest2012investigating}.
|
|
||||||
%Le lien entre le poids physique et l'intensité perçue est variable selon les individus \cite{kappers2013haptic}.
|
|
||||||
|
|
||||||
%Size can be perceived as the object's \emph{length} (in one dimension) or its \emph{volume} (in three dimensions) \cite{kappers2013haptic}.
|
|
||||||
%In both cases, and if the object is small enough, a precision grip (\figref{gonzalez2014analysis}) between the thumb and index finger can discriminate between sizes with an accuracy of \qty{1}{\mm}, but with an overestimation of length (power law with exponent \qty{1.3}).
|
|
||||||
%Alternatively, it is necessary to follow the contours of the object with the fingers to estimate its length (\secref{exploratory_procedures}), but with ten times less accuracy and an underestimation of length (power law with an exponent of \qty{0.9}) \cite{bergmanntiest2011cutaneous}.
|
|
||||||
%The perception of the volume of an object that is not small is typically done by hand enclosure, but the estimate is strongly influenced by the size, shape and mass of the object, for an identical volume \cite{kahrimanovic2010haptic}.
|
|
||||||
|
|
||||||
%The shape of an object can be defined as the perception of its \emph{global geometry}, \ie its shape and contours.
|
|
||||||
%This is the case, for example, when looking for a key in a pocket.
|
|
||||||
%The exploration of contours and enclosure are then employed, as for the estimation of length and volume.
|
|
||||||
%If the object is not known in advance, object identification is rather slow, taking several seconds \cite{norman2004visual}.
|
|
||||||
%Therefore, the exploration of other properties is favoured to recognize the object more quickly, in particular marked edges \cite{klatzky1987there}, \eg a screw among nails (\figref{plaisier2009salient_2}), or certain material properties \cite{lakatos1999haptic,plaisier2009salient}, \eg a metal object among plastic objects.
|
|
||||||
|
|
||||||
%\begin{subfigs}{plaisier2009salient}{Identifcation of a sphere among cubes \cite{plaisier2009salient}. }[
|
|
||||||
% \item The shape has a significant effect on the perception of the volume of an object, \eg a sphere is perceived smaller than a cube of the same volume.
|
|
||||||
% \item The absence of a marked edge on the sphere makes it easy to identify among cubes.
|
|
||||||
% ]
|
|
||||||
% \subfigsheight{40mm}
|
|
||||||
% \subfig{plaisier2009salient_1}
|
|
||||||
% \subfig{plaisier2009salient_2}
|
|
||||||
%\end{subfigs}
|
|
||||||
|
|
||||||
\subsection{Conclusion}
|
\subsection{Conclusion}
|
||||||
\label{haptic_sense_conclusion}
|
\label{haptic_sense_conclusion}
|
||||||
|
|||||||
@@ -52,8 +52,6 @@ Moreover, as detailed in \secref{object_properties}, cutaneous sensations are ne
|
|||||||
\subfig{leonardis20173rsr}
|
\subfig{leonardis20173rsr}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
% Tradeoff realistic and cost + analogy with sound, Hi-Fi costs a lot and is realistic, but 40$ BT headphone is more practical and enough, as cutaneous feedback without kinesthesic could be enough for wearable haptics and far more affordable and comfortable than world- or body-grounded haptics + cutaneous even better than kine for rendering surface curvature and fine manipulation
|
|
||||||
|
|
||||||
\subsection{Wearable Haptic Devices for the Hand}
|
\subsection{Wearable Haptic Devices for the Hand}
|
||||||
\label{wearable_haptic_devices}
|
\label{wearable_haptic_devices}
|
||||||
|
|
||||||
@@ -114,7 +112,6 @@ The simplicity of this approach allows the belt to be placed anywhere on the han
|
|||||||
\begin{subfigs}{tangential_belts}{Tangential motion actuators and compression belts. }[][
|
\begin{subfigs}{tangential_belts}{Tangential motion actuators and compression belts. }[][
|
||||||
\item A skin strech actuator for the fingertip \cite{leonardis2015wearable}.
|
\item A skin strech actuator for the fingertip \cite{leonardis2015wearable}.
|
||||||
\item A 3 \DoF actuator capable of normal and tangential motion on the fingertip \cite{schorr2017fingertip}.
|
\item A 3 \DoF actuator capable of normal and tangential motion on the fingertip \cite{schorr2017fingertip}.
|
||||||
%\item A shearing belt actuator for the fingertip \cite{minamizawa2007gravity}.
|
|
||||||
\item The hRing, a shearing belt actuator for the proximal phalanx of the finger \cite{pacchierotti2016hring}.
|
\item The hRing, a shearing belt actuator for the proximal phalanx of the finger \cite{pacchierotti2016hring}.
|
||||||
\item Tasbi, a wristband capable of pressure and vibrotactile feedback \cite{pezent2022design}.
|
\item Tasbi, a wristband capable of pressure and vibrotactile feedback \cite{pezent2022design}.
|
||||||
]
|
]
|
||||||
@@ -183,29 +180,21 @@ The integration of the real and virtual sensations into a single property percep
|
|||||||
In particular, the visual rendering of a touched object can also influence the perception of its haptic properties, \eg by modifying its visual texture in \AR or \VR, as discussed in the \secref{visuo_haptic}.
|
In particular, the visual rendering of a touched object can also influence the perception of its haptic properties, \eg by modifying its visual texture in \AR or \VR, as discussed in the \secref{visuo_haptic}.
|
||||||
|
|
||||||
\textcite{bhatia2024augmenting} categorize the haptic augmentations into three types: direct touch, touch-through, and tool-mediated.
|
\textcite{bhatia2024augmenting} categorize the haptic augmentations into three types: direct touch, touch-through, and tool-mediated.
|
||||||
In \emph{direct touch}, the haptic device does not cover the inside of the hand so as not to impair the user's interaction with the \RE, and is typically achieved with wearable haptics.
|
In \emph{direct touch}, the haptic device does not cover the inside of the hand so as not to impair the interaction of the user with the \RE, and is typically achieved with wearable haptics.
|
||||||
In touch-through and tool-mediated, or \emph{indirect feel-through} \cite{jeon2015haptic}, the haptic device is placed between the hand and the \RE.
|
In touch-through and tool-mediated, or \emph{indirect feel-through} \cite{jeon2015haptic}, the haptic device is placed between the hand and the \RE.
|
||||||
%We are interested in direct touch augmentations with wearable haptics (\secref{wearable_haptic_devices}), as their integration with \AR is particularly promising for free hand interaction with visuo-haptic augmentations.
|
|
||||||
Many haptic augmentations were first developed with touch-through devices, and some (but not all) were later transposed to direct touch augmentation with wearable haptic devices.
|
Many haptic augmentations were first developed with touch-through devices, and some (but not all) were later transposed to direct touch augmentation with wearable haptic devices.
|
||||||
%We also focus on tactile augmentations stimulating the mechanoreceptors of the skin (\secref{haptic_sense}), thus excluding temperature perception, as they are the most common existing haptic interfaces.
|
|
||||||
|
|
||||||
Since we have chosen to focus in \secref{object_properties} on the haptic perception of roughness and hardness of objects, we review below the methods to modify the perception of these properties.
|
Since we have chosen to focus in \secref{object_properties} on the haptic perception of roughness and hardness of objects, we review below the methods to modify the perception of these properties.
|
||||||
Of course, wearable haptics can also be used in a direct touch context to modify the perceived friction \cite{konyo2008alternative,salazar2020altering}, weight \cite{minamizawa2007gravity}, or local deformation \cite{salazar2020altering} of real objects, but they are rare \cite{bhatia2024augmenting} and will not be detailed here.
|
Of course, wearable haptics can also be used in a direct touch context to modify the perceived friction \cite{konyo2008alternative,salazar2020altering}, weight \cite{minamizawa2007gravity}, or local deformation \cite{salazar2020altering} of real objects, but they are rare \cite{bhatia2024augmenting} and will not be detailed here.
|
||||||
|
|
||||||
% \cite{klatzky2003feeling} : rendering roughness, friction, deformation, temperatures
|
|
||||||
% \cite{girard2016haptip} : renderings with a tangential motion actuator
|
|
||||||
|
|
||||||
\subsubsection{Roughness Augmentation}
|
\subsubsection{Roughness Augmentation}
|
||||||
\label{texture_rendering}
|
\label{texture_rendering}
|
||||||
|
|
||||||
To modify the perception of the haptic roughness (or texture, see \secref{roughness}) of a real object, vibrations are typically applied to the skin by the haptic device as the user moves over the surface.
|
To modify the perception of the haptic roughness (or texture, see \secref{roughness}) of a real object, vibrations are typically applied to the skin by the haptic device as the user moves over the surface.
|
||||||
%This is because running the finger or a tool on a textured surface generates pressures and vibrations (\secref{roughness}) at frequencies that are too high for rendering capabilities of most haptic devices \cite{campion2005fundamental,culbertson2018haptics}.
|
|
||||||
There are two main approaches to modify virtual textures perception: \emph{simulation models} and \emph{data-driven models} \cite{klatzky2013haptic,culbertson2018haptics}.
|
There are two main approaches to modify virtual textures perception: \emph{simulation models} and \emph{data-driven models} \cite{klatzky2013haptic,culbertson2018haptics}.
|
||||||
|
|
||||||
\paragraph{Simulation Models}
|
\paragraph{Simulation Models}
|
||||||
|
|
||||||
%Simulations of virtual textures are based on the physics of the interaction between the finger and the surface, and are used to generate the vibrations that the user feels when running the finger over the surface.
|
|
||||||
|
|
||||||
The simplest texture simulation model is a 1D sinusoidal grating $v(t)$ with spatial period $\lambda$ and amplitude $A$ that is scanned by the user at velocity $\dot{x}(t)$:
|
The simplest texture simulation model is a 1D sinusoidal grating $v(t)$ with spatial period $\lambda$ and amplitude $A$ that is scanned by the user at velocity $\dot{x}(t)$:
|
||||||
\begin{equation}{grating_rendering}
|
\begin{equation}{grating_rendering}
|
||||||
v(t) = A \sin(\frac{2 \pi \dot{x}(t)}{\lambda})
|
v(t) = A \sin(\frac{2 \pi \dot{x}(t)}{\lambda})
|
||||||
@@ -220,8 +209,6 @@ The perceived roughness increased proportionally to the virtual texture amplitud
|
|||||||
\textcite{ujitoko2019modulating} instead used a square wave signal and a hand-held stylus with an embedded voice-coil.
|
\textcite{ujitoko2019modulating} instead used a square wave signal and a hand-held stylus with an embedded voice-coil.
|
||||||
\textcite{friesen2024perceived} compared the frequency modulation of \eqref{grating_rendering} with amplitude modulation (\figref{friesen2024perceived}), and found that the frequency modulation was perceived as more similar to real sinusoidal gratings for lower spatial periods (\qty{0.5}{\mm}) but both modulations were effective for higher spatial periods (\qty{1.5}{\mm}).
|
\textcite{friesen2024perceived} compared the frequency modulation of \eqref{grating_rendering} with amplitude modulation (\figref{friesen2024perceived}), and found that the frequency modulation was perceived as more similar to real sinusoidal gratings for lower spatial periods (\qty{0.5}{\mm}) but both modulations were effective for higher spatial periods (\qty{1.5}{\mm}).
|
||||||
|
|
||||||
%\textcite{friesen2024perceived} proposed
|
|
||||||
|
|
||||||
The model in \eqref{grating_rendering} can be extended to 2D textures by adding a second sinusoidal grating with an orthogonal orientation as \textcite{girard2016haptip}.
|
The model in \eqref{grating_rendering} can be extended to 2D textures by adding a second sinusoidal grating with an orthogonal orientation as \textcite{girard2016haptip}.
|
||||||
More complex models have also been developed to be physically accurate and reproduce with high fidelity the roughness perception of real patterned surfaces \cite{unger2011roughness}, but they require high-fidelity force feedback devices that are expensive and have a limited workspace.
|
More complex models have also been developed to be physically accurate and reproduce with high fidelity the roughness perception of real patterned surfaces \cite{unger2011roughness}, but they require high-fidelity force feedback devices that are expensive and have a limited workspace.
|
||||||
|
|
||||||
@@ -239,10 +226,7 @@ More complex models were then created to more systematically capture everyday te
|
|||||||
This led to the release of the \HaTT database, a public set of stylus recordings and models of 100 haptic textures \cite{culbertson2014one}.
|
This led to the release of the \HaTT database, a public set of stylus recordings and models of 100 haptic textures \cite{culbertson2014one}.
|
||||||
A similar database, but captured from a direct touch context with the fingertip, has also recently been released \cite{balasubramanian2024sens3}.
|
A similar database, but captured from a direct touch context with the fingertip, has also recently been released \cite{balasubramanian2024sens3}.
|
||||||
A common limitation of these data-driven models is that they can only render \emph{isotropic} textures: their record does not depend on the position of the measure, and the rendering is the same regardless of the direction of the movement.
|
A common limitation of these data-driven models is that they can only render \emph{isotropic} textures: their record does not depend on the position of the measure, and the rendering is the same regardless of the direction of the movement.
|
||||||
This was eventually addressed to include the user's velocity direction into the capture, modelling and rendering of the textures \cite{abdulali2016datadriven,abdulali2018datadriven}.
|
This was eventually addressed to include the user's velocity direction into the capture, modelling and rendering of the textures \cite{abdulali2016datadriven,abdulali2016datadrivena}.
|
||||||
|
|
||||||
%A third approach is to model
|
|
||||||
%Alternative models have been proposed to both render both isotropic and patterned textures \cite{chan2021hasti}., or to simulate the vibrations from the (visual) texture maps used to visually render a \ThreeD object \cite{chan2021hasti}.
|
|
||||||
|
|
||||||
Using the user's velocity magnitude and normal force as input, these data-driven models are able to interpolate from the scan measures to generate a virtual texture in real time as vibrations with a high realism.
|
Using the user's velocity magnitude and normal force as input, these data-driven models are able to interpolate from the scan measures to generate a virtual texture in real time as vibrations with a high realism.
|
||||||
When comparing real textures felt through a stylus with their virtual models rendered with a voice-coil actuator attached to the stylus (\figref{culbertson2012refined}), the virtual textures were found to accurately reproduce the perception of roughness, but hardness and friction were not rendered properly \cite{culbertson2014modeling}.
|
When comparing real textures felt through a stylus with their virtual models rendered with a voice-coil actuator attached to the stylus (\figref{culbertson2012refined}), the virtual textures were found to accurately reproduce the perception of roughness, but hardness and friction were not rendered properly \cite{culbertson2014modeling}.
|
||||||
@@ -331,38 +315,6 @@ A challenge with this technique is to provide the vibration feedback at the righ
|
|||||||
Vibrations on contact have been employed with wearable haptics, but to the best of our knowledge only to render virtual objects \cite{pezent2019tasbi,teng2021touch,sabnis2023haptic}.
|
Vibrations on contact have been employed with wearable haptics, but to the best of our knowledge only to render virtual objects \cite{pezent2019tasbi,teng2021touch,sabnis2023haptic}.
|
||||||
We describe them in the \secref{vhar_haptics}.
|
We describe them in the \secref{vhar_haptics}.
|
||||||
|
|
||||||
%A promising alternative approach
|
|
||||||
%\cite{kildal20103dpress}
|
|
||||||
|
|
||||||
%\begin{subfigs}{vibtration_grains}{Augmenting perceived stiffness of a real surface with vibrations. }
|
|
||||||
% \subfigsheight{35mm}
|
|
||||||
% \subfig{sabnis2023haptic_device}
|
|
||||||
% \subfig{sabnis2023haptic_control}
|
|
||||||
%\end{subfigs}
|
|
||||||
|
|
||||||
%\textcite{choi2021perceived} combined and compared these two rendering approaches (spring-damper and exponential decaying sinusoids) but to render purely virtual surfaces.
|
|
||||||
%They found that the perceived intensity of the virtual hardness $\tilde{h}$ followed a power law, similarly to \eqref{hardness_intensity}, with the amplitude $a$, the %frequency $f$ and the damping $b$ of the vibration, but not the decay time $\tau$.
|
|
||||||
%\cite{park2023perceptual}
|
|
||||||
|
|
||||||
%\subsubsection{Friction}
|
|
||||||
%\label{friction_rendering}
|
|
||||||
|
|
||||||
%\cite{konyo2008alternative}
|
|
||||||
%\cite{provancher2009fingerpad}
|
|
||||||
%\cite{smith2010roughness}
|
|
||||||
%\cite{jeon2011extensions}
|
|
||||||
%\cite{salazar2020altering}
|
|
||||||
%\cite{yim2021multicontact}
|
|
||||||
|
|
||||||
%\subsubsection{Weight}
|
|
||||||
%\label{weight_rendering}
|
|
||||||
|
|
||||||
%\cite{minamizawa2007gravity}
|
|
||||||
%\cite{minamizawa2008interactive}
|
|
||||||
%\cite{jeon2011extensions}
|
|
||||||
%\cite{choi2017grabity}
|
|
||||||
%\cite{culbertson2017waves}
|
|
||||||
|
|
||||||
\subsection{Conclusion}
|
\subsection{Conclusion}
|
||||||
\label{wearable_haptics_conclusion}
|
\label{wearable_haptics_conclusion}
|
||||||
|
|
||||||
@@ -372,6 +324,3 @@ While many haptic devices can be worn on the hand, only a few can be considered
|
|||||||
If the haptic rendering of the device is timely associated with the user's touch actions on a real object, the perceived haptic properties of the object can be modified.
|
If the haptic rendering of the device is timely associated with the user's touch actions on a real object, the perceived haptic properties of the object can be modified.
|
||||||
Several haptic augmentation methods have been developed to modify the perceived roughness and hardness, mostly using vibrotactile feedback and, to a lesser extent, pressure feedback.
|
Several haptic augmentation methods have been developed to modify the perceived roughness and hardness, mostly using vibrotactile feedback and, to a lesser extent, pressure feedback.
|
||||||
However, not all of these haptic augmentations have yet been already transposed to wearable haptics, and the use of wearable haptic augmentations has not yet been investigated in the context of \AR.
|
However, not all of these haptic augmentations have yet been already transposed to wearable haptics, and the use of wearable haptic augmentations has not yet been investigated in the context of \AR.
|
||||||
|
|
||||||
%, unlike most previous actuators that are designed specifically for fingertips and would require mechanical adaptation to be placed on other parts of the hand.
|
|
||||||
% thanks to the vibration propagation and the sensory capabilities distributed throughout the skin, they can be placed without adaption and on any part of the hand
|
|
||||||
|
|||||||
@@ -1,18 +1,9 @@
|
|||||||
\section{Manipulating Objects with the Hands in AR}
|
\section{Manipulating Objects with the Hands in AR}
|
||||||
\label{augmented_reality}
|
\label{augmented_reality}
|
||||||
|
|
||||||
%As with haptic systems (\secref{wearable_haptics}), visual
|
|
||||||
\AR devices generate and integrate virtual content into the user's perception of their real environment (\RE), creating the illusion of the \emph{presence} of the virtual \cite{azuma1997survey,skarbez2021revisiting}.
|
\AR devices generate and integrate virtual content into the user's perception of their real environment (\RE), creating the illusion of the \emph{presence} of the virtual \cite{azuma1997survey,skarbez2021revisiting}.
|
||||||
Immersive systems such as headsets leave the hands free to interact with virtual objects (virtual objects), promising natural and intuitive interactions similar to those with everyday real objects \cite{billinghurst2021grand,hertel2021taxonomy}.
|
Among the different types of devices, \AR headsets leave the hands free to interact with virtual objects.
|
||||||
|
This promises natural and intuitive interactions similar to those with everyday real objects \cite{billinghurst2021grand,hertel2021taxonomy}.
|
||||||
%\begin{subfigs}{sutherland1968headmounted}{Photos of the first \AR system \cite{sutherland1968headmounted}. }[
|
|
||||||
% \item The \AR headset.
|
|
||||||
% \item Wireframe \ThreeD virtual objects were displayed registered in the \RE (as if there were part of it).
|
|
||||||
% ]
|
|
||||||
% \subfigsheight{45mm}
|
|
||||||
% \subfig{sutherland1970computer3}
|
|
||||||
% \subfig{sutherland1970computer2}
|
|
||||||
%\end{subfigs}
|
|
||||||
|
|
||||||
\subsection{What is Augmented Reality?}
|
\subsection{What is Augmented Reality?}
|
||||||
\label{what_is_ar}
|
\label{what_is_ar}
|
||||||
@@ -23,15 +14,12 @@ Fixed to the ceiling, the headset displayed a stereoscopic (one image per eye) p
|
|||||||
\subsubsection{A Definition of AR}
|
\subsubsection{A Definition of AR}
|
||||||
\label{ar_definition}
|
\label{ar_definition}
|
||||||
|
|
||||||
%\footnotetext{There quite confusion in the literature and in (because of) the industry about the terms \AR and \MR. The term \MR is very often used as a synonym of \AR, or a version of \AR that enables an interaction with the virtual content. The title of this section refers to the title of the highly cited paper by \textcite{speicher2019what} that examines this debate.}
|
|
||||||
|
|
||||||
The first formal definition of \AR was proposed by \textcite{azuma1997survey}: (1) combine real and virtual, (2) be interactive in real time, and (3) register real and virtual\footnotemark.
|
The first formal definition of \AR was proposed by \textcite{azuma1997survey}: (1) combine real and virtual, (2) be interactive in real time, and (3) register real and virtual\footnotemark.
|
||||||
Each of these characteristics is essential: the real-virtual combination distinguishes \AR from \VR, a movie with integrated digital content is not interactive and a \TwoD overlay like an image filter is not registered.
|
Each of these characteristics is essential: the real-virtual combination distinguishes \AR from \VR, a movie with integrated digital content is not interactive and a 2D overlay like an image filter is not registered.
|
||||||
There are also two key aspects of this definition: it does not focus on technology or method, but on the user's perspective of the system experience, and it does not specify a particular human sense, \ie it can be auditory \cite{yang2022audio}, haptic \cite{bhatia2024augmenting}, or even olfactory \cite{brooks2021stereosmell} or gustatory \cite{brooks2023taste}.
|
There are also two key aspects of this definition: it does not focus on technology or method, but on the user's perspective of the system experience, and it does not specify a particular human sense, \ie it can be auditory \cite{yang2022audio}, haptic \cite{bhatia2024augmenting}, or even olfactory \cite{brooks2021stereosmell} or gustatory \cite{brooks2023taste}.
|
||||||
Yet, most research has focused on visual augmentation, and the term \AR (without a prefix) is almost always understood as visual \AR.
|
Yet, most research has focused on visual augmentation, and the term \AR (without a prefix) is almost always understood as visual \AR.
|
||||||
|
|
||||||
\footnotetext{This third characteristic has been slightly adapted to use the version of \textcite{marchand2016pose}, the original definition was: \enquote{registered in \ThreeD}.}
|
\footnotetext{This third characteristic has been slightly adapted to use the version of \textcite{marchand2016pose}, the original definition was: \enquote{registered in \ThreeD}.}
|
||||||
%For example, \textcite{milgram1994taxonomy} proposed a taxonomy of \MR experiences based on the degree of mixing real and virtual environments, and \textcite{skarbez2021revisiting} revisited this taxonomy to include the user's perception of the experience.
|
|
||||||
|
|
||||||
\begin{subfigs}{ar_applications}{Examples of \AR applications. }[][
|
\begin{subfigs}{ar_applications}{Examples of \AR applications. }[][
|
||||||
\item Visuo-haptic surgery training with cutting into virtual soft tisues \cite{harders2009calibration}.
|
\item Visuo-haptic surgery training with cutting into virtual soft tisues \cite{harders2009calibration}.
|
||||||
@@ -59,14 +47,13 @@ However, the user experience in \AR is still highly dependent on the display use
|
|||||||
\label{ar_displays}
|
\label{ar_displays}
|
||||||
|
|
||||||
To experience a virtual content combined and registered with the \RE, an output device that display the \VE to the user is necessary.
|
To experience a virtual content combined and registered with the \RE, an output device that display the \VE to the user is necessary.
|
||||||
%An output device is more formally defined as an output \emph{\UI}
|
|
||||||
There is a large variety of \AR displays with different methods of combining the real and virtual content, and different locations on the \RE or the user \cite[p.126]{billinghurst2015survey}.
|
There is a large variety of \AR displays with different methods of combining the real and virtual content, and different locations on the \RE or the user \cite[p.126]{billinghurst2015survey}.
|
||||||
|
|
||||||
In \emph{\VST-\AR}, the virtual images are superimposed to images of the \RE captured by a camera \cite{marchand2016pose}, and the combined real-virtual image is displayed on a screen to the user, as illustrated in \figref{itoh2022indistinguishable_vst}, \eg \figref{hartl2013mobile}.
|
In \emph{\VST-\AR}, the virtual images are superimposed to images of the \RE captured by a camera \cite{marchand2016pose}, and the combined real-virtual image is displayed on a screen to the user, as illustrated in \figref{itoh2022indistinguishable_vst}. An example is shown in \figref{hartl2013mobile}.
|
||||||
This augmented view through the camera has the advantage of a complete control on the real-virtual combination such as mutual occlusion between real and virtual objects \cite{macedo2023occlusion}, coherent lighting and no delay between the real and virtual images \cite{kruijff2010perceptual}.
|
This augmented view through the camera has the advantage of a complete control on the real-virtual combination such as mutual occlusion between real and virtual objects \cite{macedo2023occlusion}, coherent lighting and no delay between the real and virtual images \cite{kruijff2010perceptual}.
|
||||||
But, due to the camera and the screen, the user's view is degraded with a lower resolution, frame rate, field of view, and an overall visual latency compared to proprioception \cite{kruijff2010perceptual}.
|
But, due to the camera and the screen, the user's view is degraded with a lower resolution, frame rate, field of view, and an overall visual latency compared to proprioception \cite{kruijff2010perceptual}.
|
||||||
|
|
||||||
An \emph{\OST-\AR} directly combines the virtual images with the real world view using a transparent optical system \cite{itoh2022indistinguishable} to like augmented glasses, as illustrated in \figref{itoh2022indistinguishable_ost}, \eg \figref{lee2013spacetop}.
|
An \emph{\OST-\AR} display directly combines the virtual images with the real world view using a transparent optical system \cite{itoh2022indistinguishable} to be like augmented glasses, as illustrated in \figref{itoh2022indistinguishable_ost}. An example is shown in \figref{lee2013spacetop}.
|
||||||
These displays feature a direct, preserved view of the \RE at the cost of more difficult registration (spatial misalignment or temporal latency between the real and virtual content) \cite{grubert2018survey} and mutual real-virtual occlusion \cite{macedo2023occlusion}.
|
These displays feature a direct, preserved view of the \RE at the cost of more difficult registration (spatial misalignment or temporal latency between the real and virtual content) \cite{grubert2018survey} and mutual real-virtual occlusion \cite{macedo2023occlusion}.
|
||||||
|
|
||||||
Finally, \emph{projection-based \AR} overlays the virtual images on the real world using a projector, as illustrated in \figref{roo2017one_2}, \eg \figref{roo2017inner}.
|
Finally, \emph{projection-based \AR} overlays the virtual images on the real world using a projector, as illustrated in \figref{roo2017one_2}, \eg \figref{roo2017inner}.
|
||||||
@@ -86,16 +73,13 @@ It doesn't require the user to wear the display, but requires a real surface to
|
|||||||
Regardless the \AR display, it can be placed at different locations \cite{bimber2005spatial}, as shown in \figref{roo2017one_1}.
|
Regardless the \AR display, it can be placed at different locations \cite{bimber2005spatial}, as shown in \figref{roo2017one_1}.
|
||||||
\emph{Spatial \AR} is usually projection-based displays placed at fixed location (\figref{roo2017inner}), but it can also be \OST or \VST \emph{fixed windows} (\figref{lee2013spacetop}).
|
\emph{Spatial \AR} is usually projection-based displays placed at fixed location (\figref{roo2017inner}), but it can also be \OST or \VST \emph{fixed windows} (\figref{lee2013spacetop}).
|
||||||
Alternatively, \AR displays can be \emph{hand-held}, like a \VST smartphone (\figref{hartl2013mobile}), or body-attached, like a micro-projector used as a flashlight \cite[p.141]{billinghurst2015survey}.
|
Alternatively, \AR displays can be \emph{hand-held}, like a \VST smartphone (\figref{hartl2013mobile}), or body-attached, like a micro-projector used as a flashlight \cite[p.141]{billinghurst2015survey}.
|
||||||
Finally, \AR displays can be head-worn like \VR \emph{headsets} or glasses, providing a highly immersive and portable experience.
|
Finally, \AR displays can be head-worn like \VR \emph{headsets} or glasses, providing a portable experience.
|
||||||
%Smartphones, shipped with sensors, computing ressources and algorithms, are the most common \AR today's displays, but research and development promise more immersive and interactive \AR with headset displays \cite{billinghurst2021grand}.
|
|
||||||
|
|
||||||
\fig[0.75]{roo2017one_1}{Locations of \AR displays from eye-worn to spatially projected. Adapted by \textcite{roo2017one} from \textcite{bimber2005spatial}.}
|
\fig[0.75]{roo2017one_1}{Locations of \AR displays from eye-worn to spatially projected. Adapted by \textcite{roo2017one} from \textcite{bimber2005spatial}.}
|
||||||
|
|
||||||
\subsubsection{Presence and Embodiment in AR}
|
\subsubsection{Presence and Embodiment in AR}
|
||||||
\label{ar_presence_embodiment}
|
\label{ar_presence_embodiment}
|
||||||
|
|
||||||
%Despite the clear and acknowledged definition presented in \secref{ar_definition} and the viewpoint of this thesis that \AR and \VR are two type of \MR experience with different levels of mixing real and virtual environments, as presented in \secref[introduction]{visuo_haptic_augmentations}, there is still a debate on defining \AR and \MR as well as how to characterize and categorized such experiences \cite{speicher2019what,skarbez2021revisiting}.
|
|
||||||
|
|
||||||
Presence and embodiment are two key concepts that characterize the user experience in \AR and \VR.
|
Presence and embodiment are two key concepts that characterize the user experience in \AR and \VR.
|
||||||
While there is a large literature on these topics in \VR, they are less defined and studied for \AR \cite{genay2022being,tran2024survey}.
|
While there is a large literature on these topics in \VR, they are less defined and studied for \AR \cite{genay2022being,tran2024survey}.
|
||||||
These concepts will be useful for the design, evaluation, and discussion of our contributions:
|
These concepts will be useful for the design, evaluation, and discussion of our contributions:
|
||||||
@@ -104,7 +88,8 @@ In particular, we will investigate the effect of the visual feedback of the virt
|
|||||||
\paragraph{Presence}
|
\paragraph{Presence}
|
||||||
\label{ar_presence}
|
\label{ar_presence}
|
||||||
|
|
||||||
\AR and \VR are both essentially illusions as the virtual content does not physically exist but is just digitally simulated and rendered to the user's senses through display devices.
|
\AR and \VR are both essentially illusions.
|
||||||
|
The virtual content does not actually exist physically, but is only digitally simulated and rendered to the user's senses through display devices.
|
||||||
Such experience of disbelief suspension in \VR is what is called \emph{presence}, and it can be decomposed into two dimensions: place illusion and plausibility \cite{slater2009place,slater2022separate}.
|
Such experience of disbelief suspension in \VR is what is called \emph{presence}, and it can be decomposed into two dimensions: place illusion and plausibility \cite{slater2009place,slater2022separate}.
|
||||||
\emph{Place illusion} is the sense of the user of \enquote{being there} in the \VE (\figref{presence-vr}).
|
\emph{Place illusion} is the sense of the user of \enquote{being there} in the \VE (\figref{presence-vr}).
|
||||||
It emerges from the real time rendering of the \VE from the user's perspective: to be able to move around inside the \VE and look from different point of views.
|
It emerges from the real time rendering of the \VE from the user's perspective: to be able to move around inside the \VE and look from different point of views.
|
||||||
@@ -112,15 +97,12 @@ It emerges from the real time rendering of the \VE from the user's perspective:
|
|||||||
It doesn't mean that the virtual events are realistic, \ie that reproduce the real world with high fidelity \cite{skarbez2017survey}, but that they are believable and coherent with the user's expectations.
|
It doesn't mean that the virtual events are realistic, \ie that reproduce the real world with high fidelity \cite{skarbez2017survey}, but that they are believable and coherent with the user's expectations.
|
||||||
In the same way, a film can be plausible even if it is not realistic, such as a cartoon or a science-fiction movie.
|
In the same way, a film can be plausible even if it is not realistic, such as a cartoon or a science-fiction movie.
|
||||||
|
|
||||||
%The \AR presence is far less defined and studied than for \VR \cite{tran2024survey}
|
|
||||||
For \AR, \textcite{slater2022separate} proposed to invert place illusion to what we can call \enquote{object illusion}, \ie the sense of the virtual object to \enquote{feels here} in the \RE (\figref{presence-ar}).
|
For \AR, \textcite{slater2022separate} proposed to invert place illusion to what we can call \enquote{object illusion}, \ie the sense of the virtual object to \enquote{feels here} in the \RE (\figref{presence-ar}).
|
||||||
As with \VR, virtual objects must be able to be seen from different angles by moving the head, but also, this is more difficult, appear to be coherent enough with the \RE \cite{skarbez2021revisiting}, \eg occlude or be occluded by real objects \cite{macedo2023occlusion}, cast shadows or reflect lights.
|
As with \VR, virtual objects must be able to be seen from different angles by moving the head, but also, this is more difficult, appear to be coherent enough with the \RE \cite{skarbez2021revisiting}, \eg occlude or be occluded by real objects \cite{macedo2023occlusion}, cast shadows or reflect lights.
|
||||||
The plausibility can be applied to \AR as is, but the virtual objects must additionally have knowledge of the \RE and react accordingly to it to be, again, perceived as coherently behaving with the real world \cite{skarbez2021revisiting}.
|
The plausibility can be applied to \AR as is, but the virtual objects must additionally have knowledge of the \RE and react accordingly to it to be, again, perceived as coherently behaving with the real world \cite{skarbez2021revisiting}.
|
||||||
%\textcite{skarbez2021revisiting} also named place illusion for \AR as \enquote{immersion} and plausibility as \enquote{coherence}, and these terms will be used in the remainder of this thesis.
|
|
||||||
%One main issue with presence is how to measure it both in \VR \cite{slater2022separate} and \AR \cite{tran2024survey}.
|
|
||||||
|
|
||||||
\begin{subfigs}{presence}{
|
\begin{subfigs}{presence}{
|
||||||
The sense of immersion in virtual and augmented environments. Adapted from \textcite{stevens2002putting}.
|
The sense of immersion in virtual and \AEs. Adapted from \textcite{stevens2002putting}.
|
||||||
}[][
|
}[][
|
||||||
\item Place illusion is the sense of the user of \enquote{being there} in the \VE.
|
\item Place illusion is the sense of the user of \enquote{being there} in the \VE.
|
||||||
\item Objet illusion is the sense of the virtual object to \enquote{feels here} in the \RE.
|
\item Objet illusion is the sense of the virtual object to \enquote{feels here} in the \RE.
|
||||||
@@ -142,7 +124,7 @@ In \AR, it could take the form of body accessorization, \eg wearing virtual clot
|
|||||||
\subsection{Direct Hand Manipulation in AR}
|
\subsection{Direct Hand Manipulation in AR}
|
||||||
\label{ar_interaction}
|
\label{ar_interaction}
|
||||||
|
|
||||||
A user in \AR must be able to interact with the virtual content to fulfil the second point of \textcite{azuma1997survey}'s definition (\secref{ar_definition}) and complete our proposed visuo-haptic interaction loop (\figref[introduction]{interaction-loop}). %, \eg through a hand-held controller, a real object, or even directly with the hands.
|
A user in \AR must be able to interact with the virtual content to fulfil the second point of \textcite{azuma1997survey}'s definition (\secref{ar_definition}) and complete our proposed visuo-haptic interaction loop (\figref[introduction]{interaction-loop}).
|
||||||
In all examples of \AR applications shown in \secref{ar_applications}, the user interacts with the \VE using their hands, either directly or through a physical interface.
|
In all examples of \AR applications shown in \secref{ar_applications}, the user interacts with the \VE using their hands, either directly or through a physical interface.
|
||||||
|
|
||||||
\subsubsection{User Inputs and Interaction Techniques}
|
\subsubsection{User Inputs and Interaction Techniques}
|
||||||
@@ -150,25 +132,24 @@ In all examples of \AR applications shown in \secref{ar_applications}, the user
|
|||||||
|
|
||||||
For a user to interact with a computer system (desktop, mobile, \AR, etc.), they first perceive the state of the system and then acts upon it through an input device \cite[p.145]{laviolajr20173d}.
|
For a user to interact with a computer system (desktop, mobile, \AR, etc.), they first perceive the state of the system and then acts upon it through an input device \cite[p.145]{laviolajr20173d}.
|
||||||
Such input devices form an input \emph{\UI} that captures and translates user's actions to the computer.
|
Such input devices form an input \emph{\UI} that captures and translates user's actions to the computer.
|
||||||
Similarly, an output \UI render and display the state of the system to the user (such as a \AR/\VR display, \secref{ar_displays}, or an haptic actuator, \secref{wearable_haptic_devices}).
|
Similarly, an output \UI render and display the state of the system to the user (such as an \AR/\VR display, \secref{ar_displays}, or an haptic actuator, \secref{wearable_haptic_devices}).
|
||||||
|
|
||||||
Inputs \UI can be either an \emph{active sensing}, a held or worn device, such as a mouse, a touch screen, or a hand-held controller, or a \emph{passive sensing}, that does not require a contact, such as eye trackers, voice recognition, or hand tracking \cite[p.294]{laviolajr20173d}.
|
Inputs \UI can be either an \emph{active sensing}, \ie a held or worn device, such as a mouse, a touch screen, or a hand-held controller, or a \emph{passive sensing}, that does not require a contact, such as eye trackers, voice recognition, or hand tracking \cite[p.294]{laviolajr20173d}.
|
||||||
The captured information from the sensors is then translated into actions within the computer system by an \emph{interaction technique}. %(\figref{interaction-technique}).
|
The captured information from the sensors is then translated into actions within the computer system by an \emph{interaction technique}.
|
||||||
For example, a cursor on a screen can be moved using either with a mouse or with the arrow keys on a keyboard, or a two-finger swipe on a touchscreen can be used to scroll or zoom an image.
|
For example, a cursor on a screen can be moved using either with a mouse or with the arrow keys on a keyboard, or a two-finger swipe on a touchscreen can be used to scroll or zoom an image.
|
||||||
Choosing useful and efficient \UIs and interaction techniques is crucial for the user experience and the tasks that can be performed within the system.
|
Choosing useful and efficient \UIs and interaction techniques is crucial for the user experience and the tasks that can be performed within the system.
|
||||||
|
|
||||||
%\fig[0.5]{interaction-technique}{An interaction technique map user inputs to actions within a computer system. Adapted from \textcite{billinghurst2005designing}.}
|
|
||||||
|
|
||||||
\subsubsection{Tasks with Virtual Environments}
|
\subsubsection{Tasks with Virtual Environments}
|
||||||
\label{ve_tasks}
|
\label{ve_tasks}
|
||||||
|
|
||||||
\textcite{laviolajr20173d} (p.385) classify interaction techniques into three categories based on the tasks they enable users to perform: manipulation, navigation, and system control.
|
\textcite{laviolajr20173d} (p.385) classify interaction techniques into three categories based on the tasks they enable users to perform: manipulation, navigation, and system control.
|
||||||
\textcite{hertel2021taxonomy} proposed a similar taxonomy of interaction techniques specifically for immersive \AR.
|
\textcite{hertel2021taxonomy} proposed a similar taxonomy of interaction techniques specifically for \AR headsets.
|
||||||
|
In this thesis we focus on manipulation tasks of virtual content directly with the hands, more specifically on touching visuo-haptic textures with a finger (\partref{perception}) and positioning and rotating virtual objects pushed and grasp by the hand (\partref{manipulation}).
|
||||||
|
|
||||||
The \emph{manipulation tasks} are the most fundamental tasks in \AR and \VR systems, and the building blocks for more complex interactions.
|
The \emph{manipulation tasks} are the most fundamental tasks in \AR and \VR systems, and the building blocks for more complex interactions.
|
||||||
\emph{Selection} is the identification or acquisition of a specific virtual object, \eg pointing at a target as in \figref{grubert2015multifi}, touching a button with a finger, or grasping an object with a hand.
|
\emph{Selection} is the identification or acquisition of a specific virtual object, \eg pointing at a target as in \figref{grubert2015multifi}, touching a button with a finger, or grasping an object with a hand.
|
||||||
\emph{Positioning} and \emph{rotation} of a selected object are the change of its position and orientation in \ThreeD space respectively.
|
\emph{Positioning} and \emph{rotation} of a selected object are the change of its position and orientation in \ThreeD space respectively.
|
||||||
It is also common to \emph{resize} a virtual object to change its size.
|
It is also common to \emph{resize} a virtual object to change its size (\figref{piumsomboon2013userdefined_2}).
|
||||||
These three operations are geometric (rigid) manipulations of the object: they do not change its shape.
|
These three operations are geometric (rigid) manipulations of the object: they do not change its shape.
|
||||||
|
|
||||||
The \emph{navigation tasks} are the movements of the user within the \VE.
|
The \emph{navigation tasks} are the movements of the user within the \VE.
|
||||||
@@ -177,33 +158,30 @@ Wayfinding is the cognitive planning of the movement, such as path finding or ro
|
|||||||
|
|
||||||
The \emph{system control tasks} are changes to the system state through commands or menus such as creating, deleting, or modifying virtual objects, \eg as in \figref{roo2017onea}. It is also the input of text, numbers, or symbols.
|
The \emph{system control tasks} are changes to the system state through commands or menus such as creating, deleting, or modifying virtual objects, \eg as in \figref{roo2017onea}. It is also the input of text, numbers, or symbols.
|
||||||
|
|
||||||
In this thesis we focus on manipulation tasks of virtual content directly with the hands, more specifically on touching visuo-haptic textures with a finger (\partref{perception}) and positioning and rotating virtual objects pushed and grasp by the hand (\partref{manipulation}).
|
|
||||||
|
|
||||||
\begin{subfigs}{interaction-techniques}{Interaction techniques in \AR. }[][
|
\begin{subfigs}{interaction-techniques}{Interaction techniques in \AR. }[][
|
||||||
\item Spatial selection of virtual item of an extended display using a hand-held smartphone \cite{grubert2015multifi}.
|
\item Spatial selection of virtual item of an extended display using a hand-held smartphone \cite{grubert2015multifi}.
|
||||||
|
\item Resizing a virtual object with a bimanual gesture \cite{piumsomboon2013userdefined}.
|
||||||
\item Displaying as an overlay registered on the \RE the route to follow \cite{grubert2017pervasive}.
|
\item Displaying as an overlay registered on the \RE the route to follow \cite{grubert2017pervasive}.
|
||||||
\item Virtual drawing on a real object with a hand-held pen \cite{roo2017onea}.
|
\item Virtual drawing on a real object with a hand-held pen \cite{roo2017onea}.
|
||||||
\item Simultaneous Localization and Mapping (SLAM) algorithms such as KinectFusion \cite{newcombe2011kinectfusion} reconstruct the \RE in real time and enables to register the \VE in it.
|
|
||||||
]
|
]
|
||||||
\subfigsheight{35.5mm}
|
\subfigsheight{35.5mm}
|
||||||
\subfigbox{grubert2015multifi}
|
\subfigbox{grubert2015multifi}
|
||||||
|
\subfigbox{piumsomboon2013userdefined_2}
|
||||||
\subfigbox{grubert2017pervasive}
|
\subfigbox{grubert2017pervasive}
|
||||||
\subfigbox{roo2017onea}
|
\subfigbox{roo2017onea}
|
||||||
\subfigbox{newcombe2011kinectfusion}
|
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
\subsubsection{The Gap between Real and Virtual}
|
\subsubsection{The Gap between Real and Virtual}
|
||||||
\label{real_virtual_gap}
|
\label{real_virtual_gap}
|
||||||
|
|
||||||
In \AR and \VR, the state of the system is displayed to the user as a \ThreeD spatial \VE.
|
In \AR and \VR, the state of the system is displayed to the user as a \ThreeD spatial \VE.
|
||||||
In an immersive and portable \AR system, this \VE is experienced at a 1:1 scale and as an integral part of the \RE.
|
With an \AR headset, the \VE can be experienced at a 1:1 scale and as an integral part of the \RE.
|
||||||
The rendering gap between the real and virtual elements, as described on our interaction loop in \figref[introduction]{interaction-loop}, is thus experienced as narrow or even not consciously perceived by the user.
|
The rendering gap between the real and virtual elements, as described on our interaction loop in \figref[introduction]{interaction-loop}, is thus experienced as narrow or even not consciously perceived by the user.
|
||||||
This manifests as a sense of presence of the virtual, as described in \secref{ar_presence}.
|
This manifests as a sense of presence of the virtual, as described in \secref{ar_presence}.
|
||||||
|
|
||||||
As the gap between real and virtual rendering is reduced, one could expect a similar and seamless interaction with the \VE as with a \RE, which \textcite{jacob2008realitybased} called \emph{reality based interactions}.
|
As the gap between real and virtual rendering is reduced, one could expect a similar and seamless interaction with the \VE as with a \RE, which \textcite{jacob2008realitybased} called \emph{reality based interactions}.
|
||||||
As of today, an immersive \AR system tracks itself with the user in \ThreeD, using tracking sensors and pose estimation algorithms \cite{marchand2016pose}, \eg as in \figref{newcombe2011kinectfusion}.
|
As of today, an \AR system tracks itself with the user in \ThreeD, using tracking sensors and pose estimation algorithms \cite{marchand2016pose}.
|
||||||
It enables the \VE to be registered with the \RE and the user simply moves to navigate within the virtual content.
|
It enables the \VE to be registered with the \RE and the user simply moves to navigate within the virtual content.
|
||||||
%This tracking and mapping of the user and \RE into the \VE is named the \enquote{extent of world knowledge} by \textcite{skarbez2021revisiting}, \ie to what extent the \AR system knows about the \RE and is able to respond to changes in it.
|
|
||||||
However, direct hand manipulation of virtual content is a challenge that requires specific interaction techniques \cite{billinghurst2021grand}.
|
However, direct hand manipulation of virtual content is a challenge that requires specific interaction techniques \cite{billinghurst2021grand}.
|
||||||
It is often achieved using two interaction techniques: \emph{tangible objects} and \emph{virtual hands} \cite[p.165]{billinghurst2015survey}.
|
It is often achieved using two interaction techniques: \emph{tangible objects} and \emph{virtual hands} \cite[p.165]{billinghurst2015survey}.
|
||||||
|
|
||||||
@@ -211,10 +189,8 @@ It is often achieved using two interaction techniques: \emph{tangible objects} a
|
|||||||
\label{ar_tangibles}
|
\label{ar_tangibles}
|
||||||
|
|
||||||
As \AR integrates visual virtual content into \RE perception, it can involve real surrounding objects as \UI: to either visually augment them (\figref{roo2017inner}), or to use them as physical proxies to support interaction with virtual objects \cite{ishii1997tangible}.
|
As \AR integrates visual virtual content into \RE perception, it can involve real surrounding objects as \UI: to either visually augment them (\figref{roo2017inner}), or to use them as physical proxies to support interaction with virtual objects \cite{ishii1997tangible}.
|
||||||
%According to \textcite{billinghurst2005designing}
|
|
||||||
Each virtual object is coupled to a real object and physically manipulated through it, providing a direct, efficient and seamless interaction with both the real and virtual content \cite{billinghurst2005designing}.
|
Each virtual object is coupled to a real object and physically manipulated through it, providing a direct, efficient and seamless interaction with both the real and virtual content \cite{billinghurst2005designing}.
|
||||||
The real objects are called \emph{tangible} in this usage context.
|
The real objects are called \emph{tangible} in this usage context.
|
||||||
%This technique is similar to mapping the movements of a mouse to a virtual cursor on a screen.
|
|
||||||
|
|
||||||
Methods have been developed to automatically pair and adapt the virtual objects for rendering with available tangibles of similar shape and size \cite{hettiarachchi2016annexing,jain2023ubitouch} (\figref{jain2023ubitouch}).
|
Methods have been developed to automatically pair and adapt the virtual objects for rendering with available tangibles of similar shape and size \cite{hettiarachchi2016annexing,jain2023ubitouch} (\figref{jain2023ubitouch}).
|
||||||
The issue with these \emph{space-multiplexed} interfaces is the large number and variety of tangibles required.
|
The issue with these \emph{space-multiplexed} interfaces is the large number and variety of tangibles required.
|
||||||
@@ -245,12 +221,11 @@ Similarly, in \secref{tactile_rendering} we described how a material property (\
|
|||||||
\subsubsection{Manipulating with Virtual Hands}
|
\subsubsection{Manipulating with Virtual Hands}
|
||||||
\label{ar_virtual_hands}
|
\label{ar_virtual_hands}
|
||||||
|
|
||||||
%can track the user's movements and use them as inputs to the \VE \textcite[p.172]{billinghurst2015survey}.
|
|
||||||
Initially tracked by active sensing devices such as gloves or controllers, it is now possible to track hands in real time using passive sensing (\secref{interaction_techniques}) and computer vision algorithms natively integrated into \AR/\VR headsets \cite{tong2023survey}.
|
Initially tracked by active sensing devices such as gloves or controllers, it is now possible to track hands in real time using passive sensing (\secref{interaction_techniques}) and computer vision algorithms natively integrated into \AR/\VR headsets \cite{tong2023survey}.
|
||||||
Our hands allow us to manipulate real everyday objects (\secref{grasp_types}), hence virtual hand interaction techniques seem to be the most natural way to manipulate virtual objects \cite[p.400]{laviolajr20173d}.
|
Our hands allow us to manipulate real everyday objects (\secref{grasp_types}), hence virtual hand interaction techniques seem to be the most natural way to manipulate virtual objects \cite[p.400]{laviolajr20173d}.
|
||||||
|
|
||||||
The user's hand being tracked is reconstructed as a \emph{virtual hand} model in the \VE \cite[p.405]{laviolajr20173d}.
|
The user's hand being tracked is reconstructed as a \emph{virtual hand} model in the \VE \cite[p.405]{laviolajr20173d}.
|
||||||
The simplest models represent the hand as a rigid \ThreeD object that follows the movements of the real hand with \qty{6}{DoF} (position and orientation in space) \cite{talvas2012novel}.
|
The simplest models represent the hand as a rigid \ThreeD object that follows the movements of the real hand with 6 \DoF (position and orientation in space) \cite{talvas2012novel}.
|
||||||
An alternative is to model only the fingertips (\figref{lee2007handy}) or the whole hand (\figref{hilliges2012holodesk_1}) as points.
|
An alternative is to model only the fingertips (\figref{lee2007handy}) or the whole hand (\figref{hilliges2012holodesk_1}) as points.
|
||||||
The most common technique is to reconstruct all the phalanges of the hand in an articulated kinematic model (\secref{hand_anatomy}) \cite{borst2006spring}.
|
The most common technique is to reconstruct all the phalanges of the hand in an articulated kinematic model (\secref{hand_anatomy}) \cite{borst2006spring}.
|
||||||
|
|
||||||
@@ -284,7 +259,6 @@ While a visual feedback of the virtual hand in \VR can compensate for these issu
|
|||||||
\subsection{Visual Feedback of Virtual Hands in AR}
|
\subsection{Visual Feedback of Virtual Hands in AR}
|
||||||
\label{ar_visual_hands}
|
\label{ar_visual_hands}
|
||||||
|
|
||||||
%In \VR, since the user is fully immersed in the \VE and cannot see their real hands, it is necessary to represent them virtually (\secref{ar_embodiment}).
|
|
||||||
When interacting with a physics-based virtual hand method (\secref{ar_virtual_hands}) in \VR, the visual feedback of the virtual hand has an influence on perception, interaction performance, and preference of users \cite{prachyabrued2014visual,argelaguet2016role,grubert2018effects,schwind2018touch}.
|
When interacting with a physics-based virtual hand method (\secref{ar_virtual_hands}) in \VR, the visual feedback of the virtual hand has an influence on perception, interaction performance, and preference of users \cite{prachyabrued2014visual,argelaguet2016role,grubert2018effects,schwind2018touch}.
|
||||||
In a pick-and-place manipulation task in \VR, \textcite{prachyabrued2014visual} and \textcite{canales2019virtual} found that the visual hand feedback whose motion was constrained to the surface of the virtual objects similar as to \textcite{borst2006spring} (\enquote{Outer Hand} in \figref{prachyabrued2014visual}) performed the worst, while the visual hand feedback following the tracked human hand (thus penetrating the virtual objects, \enquote{Inner Hand} in \figref{prachyabrued2014visual}) performed the best, though it was rather disliked.
|
In a pick-and-place manipulation task in \VR, \textcite{prachyabrued2014visual} and \textcite{canales2019virtual} found that the visual hand feedback whose motion was constrained to the surface of the virtual objects similar as to \textcite{borst2006spring} (\enquote{Outer Hand} in \figref{prachyabrued2014visual}) performed the worst, while the visual hand feedback following the tracked human hand (thus penetrating the virtual objects, \enquote{Inner Hand} in \figref{prachyabrued2014visual}) performed the best, though it was rather disliked.
|
||||||
\textcite{prachyabrued2014visual} also found that the best compromise was a double feedback, showing both the virtual hand and the tracked hand (\enquote{2-Hand} in \figref{prachyabrued2014visual}).
|
\textcite{prachyabrued2014visual} also found that the best compromise was a double feedback, showing both the virtual hand and the tracked hand (\enquote{2-Hand} in \figref{prachyabrued2014visual}).
|
||||||
@@ -294,13 +268,7 @@ A visual hand feedback while in \VE also seems to affect how one grasps an objec
|
|||||||
\fig{prachyabrued2014visual}{Visual hand feedback affect user experience in \VR \cite{prachyabrued2014visual}.}
|
\fig{prachyabrued2014visual}{Visual hand feedback affect user experience in \VR \cite{prachyabrued2014visual}.}
|
||||||
|
|
||||||
Conversely, a user sees their own hands in \AR, and the mutual occlusion between the hands and the virtual objects is a common issue (\secref{ar_displays}), \ie hiding the virtual object when the real hand is in front of it, and hiding the real hand when it is behind the virtual object (\figref{hilliges2012holodesk_2}).
|
Conversely, a user sees their own hands in \AR, and the mutual occlusion between the hands and the virtual objects is a common issue (\secref{ar_displays}), \ie hiding the virtual object when the real hand is in front of it, and hiding the real hand when it is behind the virtual object (\figref{hilliges2012holodesk_2}).
|
||||||
%For example, in \figref{hilliges2012holodesk_2}, the user is pinching a virtual cube in \OST-\AR with their thumb and index fingers, but while the index is behind the cube, it is seen as in front of it.
|
While in \VST-\AR, this could be solved as a masking problem by combining the real and virtual images \cite{battisti2018seamless}, \eg in \figref{suzuki2014grasping}, in \OST-\AR, this is much more difficult because the \VE is displayed as a transparent 2D image on top of the \ThreeD \RE, which cannot be easily masked \cite{macedo2023occlusion}.
|
||||||
While in \VST-\AR, this could be solved as a masking problem by combining the real and virtual images \cite{battisti2018seamless}, \eg in \figref{suzuki2014grasping}, in \OST-\AR, this is much more difficult because the \VE is displayed as a transparent \TwoD image on top of the \ThreeD \RE, which cannot be easily masked \cite{macedo2023occlusion}.
|
|
||||||
%Yet, even in \VST-\AR,
|
|
||||||
|
|
||||||
%An alternative is to render the virtual objects and the virtual hand semi-transparents, so that they are partially visible even when one is occluding the other (\figref{buchmann2005interaction}).
|
|
||||||
%Although perceived as less natural, this seems to be preferred to a mutual visual occlusion in \VST-\AR \cite{buchmann2005interaction,ha2014wearhand,piumsomboon2014graspshell} and \VR \cite{vanveldhuizen2021effect}, but has not yet been evaluated in \OST-\AR.
|
|
||||||
%However, this effect still causes depth conflicts that make it difficult to determine if one's hand is behind or in front of a virtual object, \eg the thumb is in front of the virtual cube, but could be perceived to be behind it.
|
|
||||||
|
|
||||||
Since the \VE is intangible, adding a visual feedback of the virtual hand in \AR that is physically constrained to the virtual objects would achieve a similar result to the double-hand feedback of \textcite{prachyabrued2014visual}.
|
Since the \VE is intangible, adding a visual feedback of the virtual hand in \AR that is physically constrained to the virtual objects would achieve a similar result to the double-hand feedback of \textcite{prachyabrued2014visual}.
|
||||||
A virtual object overlaying a real object in \OST-\AR can vary in size and shape without degrading user experience or manipulation performance \cite{kahl2021investigation,kahl2023using}.
|
A virtual object overlaying a real object in \OST-\AR can vary in size and shape without degrading user experience or manipulation performance \cite{kahl2021investigation,kahl2023using}.
|
||||||
@@ -308,16 +276,11 @@ This suggests that a visual hand feedback superimposed on the real hand as a par
|
|||||||
|
|
||||||
Few works have compared different visual feedback of the virtual hand in \AR or with wearable haptic feedback.
|
Few works have compared different visual feedback of the virtual hand in \AR or with wearable haptic feedback.
|
||||||
Rendering the real hand as a semi-transparent hand in \VST-\AR is perceived as less natural but seems to be preferred to a mutual visual occlusion for interaction with real and virtual objects \cite{buchmann2005interaction,piumsomboon2014graspshell}.
|
Rendering the real hand as a semi-transparent hand in \VST-\AR is perceived as less natural but seems to be preferred to a mutual visual occlusion for interaction with real and virtual objects \cite{buchmann2005interaction,piumsomboon2014graspshell}.
|
||||||
%Although perceived as less natural, this seems to be preferred to a mutual visual occlusion in \VST-\AR \cite{buchmann2005interaction,ha2014wearhand,piumsomboon2014graspshell} and \VR \cite{vanveldhuizen2021effect}, but has not yet been evaluated in \OST-\AR.
|
Similarly, \textcite{blaga2017usability} evaluated direct hand manipulation in \VST-\AR with a skeleton-like rendering \vs no visual hand feedback: while user performance did not improve, participants felt more confident with the virtual hand (\figref{blaga2017usability}).
|
||||||
Similarly, \textcite{blaga2017usability} evaluated direct hand manipulation in non-immersive \VST-\AR with a skeleton-like rendering \vs no visual hand feedback: while user performance did not improve, participants felt more confident with the virtual hand (\figref{blaga2017usability}).
|
In a collaborative task in \OST-\AR \vs \VR headsets, \textcite{yoon2020evaluating} showed that a realistic human hand rendering was the most preferred over a low-polygon hand and a skeleton-like hand for the remote partner.
|
||||||
%\textcite{krichenbauer2018augmented} found that participants were \percent{22} faster in immersive \VST-\AR than in \VR in the same pick-and-place manipulation task, but no visual hand rendering was used in \VR while the real hand was visible in \AR.
|
|
||||||
In a collaborative task in immersive \OST-\AR \vs \VR, \textcite{yoon2020evaluating} showed that a realistic human hand rendering was the most preferred over a low-polygon hand and a skeleton-like hand for the remote partner.
|
|
||||||
\textcite{genay2021virtual} found that the sense of embodiment with robotic hands overlay in \OST-\AR was stronger when the environment contained both real and virtual objects (\figref{genay2021virtual}).
|
\textcite{genay2021virtual} found that the sense of embodiment with robotic hands overlay in \OST-\AR was stronger when the environment contained both real and virtual objects (\figref{genay2021virtual}).
|
||||||
Finally, \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the visual and haptic feedback of the hand in \VST-\AR, as detailed in the next section (\secref{vhar_rings}).
|
Finally, \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the visual and haptic feedback of the hand in \VST-\AR, as detailed in the next section (\secref{vhar_rings}).
|
||||||
Taken together, these results suggest that a visual augmentation of the hand in \AR could improve usability and performance in direct hand manipulation tasks, but the best rendering has yet to be determined.
|
Taken together, these results suggest that a visual augmentation of the hand in \AR could improve usability and performance in direct hand manipulation tasks, but the best rendering has yet to be determined.
|
||||||
%\cite{chan2010touching} : cues for touching (selection) virtual objects.
|
|
||||||
%\textcite{saito2021contact} found that masking the real hand with a textured \ThreeD opaque virtual hand did not improve performance in a reach-to-grasp task but displaying the points of contact on the virtual object did.
|
|
||||||
%To the best of our knowledge, evaluating the role of a visual rendering of the hand displayed \enquote{and seen} directly above real tracked hands in immersive OST-AR has not been explored, particularly in the context of virtual object manipulation.
|
|
||||||
|
|
||||||
\begin{subfigs}{visual-hands}{Visual feedback of the virtual hand in \AR. }[][
|
\begin{subfigs}{visual-hands}{Visual feedback of the virtual hand in \AR. }[][
|
||||||
\item Grasping a virtual object in \OST-\AR with no visual hand feedback \cite{hilliges2012holodesk}.
|
\item Grasping a virtual object in \OST-\AR with no visual hand feedback \cite{hilliges2012holodesk}.
|
||||||
@@ -332,7 +295,6 @@ Taken together, these results suggest that a visual augmentation of the hand in
|
|||||||
\subfig{buchmann2005interaction}
|
\subfig{buchmann2005interaction}
|
||||||
\subfig{blaga2017usability}
|
\subfig{blaga2017usability}
|
||||||
\subfig{genay2021virtual}
|
\subfig{genay2021virtual}
|
||||||
%\subfig{yoon2020evaluating}
|
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
\subsection{Conclusion}
|
\subsection{Conclusion}
|
||||||
@@ -340,8 +302,8 @@ Taken together, these results suggest that a visual augmentation of the hand in
|
|||||||
|
|
||||||
\AR systems integrate virtual content into the user's perception as if it were part of the \RE.
|
\AR systems integrate virtual content into the user's perception as if it were part of the \RE.
|
||||||
\AR headsets now enable real-time pose estimation of the head and hands, and high-quality display of virtual content, while being portable and mobile.
|
\AR headsets now enable real-time pose estimation of the head and hands, and high-quality display of virtual content, while being portable and mobile.
|
||||||
They enable highly immersive augmented environments that users can explore with a strong sense of the presence of the virtual content.
|
They create \AEs that users can explore with a strong sense of the presence of the virtual content.
|
||||||
However, without direct and seamless interaction with the virtual objects using the hands, the coherence of the augmented environment experience is compromised.
|
However, without direct and seamless interaction with the virtual objects using the hands, the coherence of the \AE experience is compromised.
|
||||||
In particular, when manipulating virtual objects in \OST-\AR, there is a lack of mutual occlusion and interaction cues between the hands and the virtual content, which could be mitigated by a visual augmentation of the hand.
|
In particular, when manipulating virtual objects in \OST-\AR, there is a lack of mutual occlusion and interaction cues between the hands and the virtual content, which could be mitigated by a visual augmentation of the hand.
|
||||||
A common alternative approach is to use real objects as proxies for interaction with virtual objects, but this raises concerns about their coherence with visual augmentations.
|
A common alternative approach is to use real objects as proxies for interaction with virtual objects, but this raises concerns about their coherence with visual augmentations.
|
||||||
In this context, the use of wearable haptic systems worn on the hand seems to be a promising solution both for improving direct hand manipulation of virtual objects and for coherent visuo-haptic augmentation of touched real objects.
|
In this context, the use of wearable haptic systems worn on the hand seems to be a promising solution both for improving direct hand manipulation of virtual objects and for coherent visuo-haptic augmentation of touched real objects.
|
||||||
|
|||||||
@@ -5,10 +5,7 @@ Perception and manipulation of objects with the hand typically involves both the
|
|||||||
Each sense has unique capabilities for perceiving certain object properties, such as color for vision or temperature for touch, but they are equally capable for many properties, such as roughness, hardness, or geometry \cite{baumgartner2013visual}.
|
Each sense has unique capabilities for perceiving certain object properties, such as color for vision or temperature for touch, but they are equally capable for many properties, such as roughness, hardness, or geometry \cite{baumgartner2013visual}.
|
||||||
|
|
||||||
Both \AR and wearable haptic systems integrate virtual content into the user's perception as sensory illusions.
|
Both \AR and wearable haptic systems integrate virtual content into the user's perception as sensory illusions.
|
||||||
It is essential to understand how a visuo-haptic rendering of a virtual object is perceived as a coherent object property, and how wearable haptics have been integrated with immersive \AR.%, especially in immersive \AR where the haptic actuator is moved away so as not to cover the inside of the hand.
|
It is essential to understand how a visuo-haptic rendering of a virtual object is perceived as a coherent object property, and how wearable haptics have been integrated with \AR headsets.
|
||||||
|
|
||||||
% spatial and temporal integration of visuo-haptic feedback as perceptual cues vs proprioception and real touch sensations
|
|
||||||
% delocalized : not at the point of contact = difficult to integrate with other perceptual cues ?
|
|
||||||
|
|
||||||
\subsection{Visuo-Haptic Perception of Virtual and Augmented Objects}
|
\subsection{Visuo-Haptic Perception of Virtual and Augmented Objects}
|
||||||
\label{vh_perception}
|
\label{vh_perception}
|
||||||
@@ -36,32 +33,23 @@ and the integrated variance $\sigma^2$ is the inverse of the sum of the individu
|
|||||||
\end{equation}
|
\end{equation}
|
||||||
|
|
||||||
This was demonstrated by \textcite{ernst2002humans} in a user study where participants estimated the height of a virtual bar using a fixed-window \OST-\AR display (\secref{ar_displays}) and force-feedback devices worn on the thumb and index finger (\secref{wearability_level}), as shown in \figref{ernst2002humans_setup}.
|
This was demonstrated by \textcite{ernst2002humans} in a user study where participants estimated the height of a virtual bar using a fixed-window \OST-\AR display (\secref{ar_displays}) and force-feedback devices worn on the thumb and index finger (\secref{wearability_level}), as shown in \figref{ernst2002humans_setup}.
|
||||||
%They first measured the individual variances of the visual and haptic estimates (\figref{ernst2002humans_within}) and then the combined variance of the visuo-haptic estimates (\figref{ernst2002humans_visuo-haptic}).
|
|
||||||
On each trial, participants compared the visuo-haptic reference bar (of a fixed height) to a visuo-haptic comparison bar (of a variable height) in a \TIFC task (one bar is tested first, a pause, then the other) and indicated which was taller.
|
On each trial, participants compared the visuo-haptic reference bar (of a fixed height) to a visuo-haptic comparison bar (of a variable height) in a \TIFC task (one bar is tested first, a pause, then the other) and indicated which was taller.
|
||||||
The reference bar had different conflicting visual $s_v$ and haptic $s_h$ heights, and different noise levels were added to the visual feedback to increase its variance.
|
The reference bar had different conflicting visual $s_v$ and haptic $s_h$ heights, and different noise levels were added to the visual feedback to increase its variance.
|
||||||
The objective was to determine a \PSE between the comparison and reference bars, where the participant was equally likely to choose one or the other (\percent{50} of the trials).
|
The objective was to determine a \PSE between the comparison and reference bars, where the participant was equally likely to choose one or the other (\percent{50} of the trials).
|
||||||
%\figref{ernst2002humans_within} shows the discrimination of participants with only the haptic or visual feedback, and how much the estimation becomes difficult (thus higher variance) when noise is added to the visual feedback.
|
|
||||||
\figref{ernst2004merging_results} shows that when the visual noise was low, the visual feedback had more weight, but as visual noise increased, haptic feedback gained more weight, as predicted by the \MLE model.
|
\figref{ernst2004merging_results} shows that when the visual noise was low, the visual feedback had more weight, but as visual noise increased, haptic feedback gained more weight, as predicted by the \MLE model.
|
||||||
|
|
||||||
\begin{subfigs}{ernst2002humans}{
|
\begin{subfigs}{ernst2002humans}{
|
||||||
Visuo-haptic perception of height of a virtual bar \cite{ernst2002humans}.
|
Visuo-haptic perception of height of a virtual bar \cite{ernst2002humans}.
|
||||||
}[][
|
}[][
|
||||||
\item Experimental setup.%: Participants estimated height visually with an \OST-\AR display and haptically with force-feedback devices worn on the thumb and index fingers.
|
\item Experimental setup.
|
||||||
%\item with only haptic feedback (red) or only visual feedback (blue, with different added noise),
|
|
||||||
%\item combined visuo-haptic feedback (purple, with different visual noises).
|
|
||||||
\item Proportion of trials (vertical axis) where the comparison bar (horizontal axis) was perceived taller than the reference bar as function of increase variance (inverse of reliability) of the visual feedback (colors).
|
\item Proportion of trials (vertical axis) where the comparison bar (horizontal axis) was perceived taller than the reference bar as function of increase variance (inverse of reliability) of the visual feedback (colors).
|
||||||
The reference had different conflicting visual $s_v$ and haptic $s_h$ heights.
|
The reference had different conflicting visual $s_v$ and haptic $s_h$ heights.
|
||||||
]
|
]
|
||||||
\subfig[.34]{ernst2002humans_setup}
|
\subfig[.34]{ernst2002humans_setup}
|
||||||
\subfig[.64]{ernst2004merging_results}
|
\subfig[.64]{ernst2004merging_results}
|
||||||
%\subfig{ernst2002humans_within}
|
|
||||||
%\subfig{ernst2002humans_visuo-haptic}
|
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
%Hence, the \MLE model explains how a (visual) virtual object in \AR can be perceived as coherent when combined with real haptic sensations of a tangible or a wearable haptic feedback.
|
|
||||||
The \MLE model implies that when seeing and touching a virtual object in \AR, the combination of visual and haptic stimuli, real or virtual, presented to the user can be perceived as a coherent single object property.
|
The \MLE model implies that when seeing and touching a virtual object in \AR, the combination of visual and haptic stimuli, real or virtual, presented to the user can be perceived as a coherent single object property.
|
||||||
%As long as the user is able to associate the sensations as the same object property, and even if there are discrepancies between the sensations, the overall perception can be influenced by changing one of the stimuli, as discussed in the next sections.
|
|
||||||
%for example by including tangible objects, wearable haptic feedback, or even by altering the visual rendering of the virtual object, as discussed in the next sections.
|
|
||||||
|
|
||||||
\subsubsection{Influence of Visual Rendering on Haptic Perception}
|
\subsubsection{Influence of Visual Rendering on Haptic Perception}
|
||||||
\label{visual_haptic_influence}
|
\label{visual_haptic_influence}
|
||||||
@@ -72,10 +60,9 @@ More precisely, when surfaces are evaluated by vision or touch alone, both sense
|
|||||||
|
|
||||||
The overall perception can then be modified by changing one of the sensory modalities.
|
The overall perception can then be modified by changing one of the sensory modalities.
|
||||||
\textcite{yanagisawa2015effects} altered the perceived roughness, stiffness, and friction of real tactile materials touched by the finger by superimposing different real visual textures using a half-mirror.
|
\textcite{yanagisawa2015effects} altered the perceived roughness, stiffness, and friction of real tactile materials touched by the finger by superimposing different real visual textures using a half-mirror.
|
||||||
In a similar setup, but in immersive \VST-\AR, \textcite{kitahara2010sensory} overlaid visual textures on real textured surfaces touched through a glove: many visual textures were found to match the real haptic textures.
|
In a similar setup, but in \VST-\AR, \textcite{kitahara2010sensory} overlaid visual textures on real textured surfaces touched through a glove: many visual textures were found to match the real haptic textures.
|
||||||
\textcite{degraen2019enhancing} and \textcite{gunther2022smooth} also combined multiple virtual objects in \VR with \ThreeD-printed hair structures or with everyday real surfaces, respectively.
|
\textcite{degraen2019enhancing} and \textcite{gunther2022smooth} also combined multiple virtual objects in \VR with \ThreeD-printed hair structures or with everyday real surfaces, respectively.
|
||||||
They found that the visual perception of roughness and hardness influenced the haptic perception, and that only a few real objects seemed to be sufficient to match all the visual virtual objects (\figref{gunther2022smooth}).
|
They found that the visual perception of roughness and hardness influenced the haptic perception, and that only a few real objects seemed to be sufficient to match all the visual virtual objects (\figref{gunther2022smooth}).
|
||||||
%Taken together, these studies suggest that a set of haptic textures, real or virtual, can be perceived as coherent with a larger set of visual virtual textures.
|
|
||||||
|
|
||||||
\fig{gunther2022smooth}{In a passive touch context in \VR, only a smooth and a rough real surfaces were found to match all the visual virtual objects \cite{gunther2022smooth}.}
|
\fig{gunther2022smooth}{In a passive touch context in \VR, only a smooth and a rough real surfaces were found to match all the visual virtual objects \cite{gunther2022smooth}.}
|
||||||
|
|
||||||
@@ -93,17 +80,15 @@ For example, in a fixed \VST-\AR screen (\secref{ar_displays}), by visually defo
|
|||||||
\subfig{ban2014displaying}
|
\subfig{ban2014displaying}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
%In all of these studies, the visual expectations of participants influenced their haptic perception.
|
|
||||||
%In particular, in \AR and \VR, the perception of a haptic rendering or augmentation can be influenced by the visual rendering of the virtual object.
|
|
||||||
|
|
||||||
\subsubsection{Perception of Visuo-Haptic Rendering in AR and VR}
|
\subsubsection{Perception of Visuo-Haptic Rendering in AR and VR}
|
||||||
\label{ar_vr_haptic}
|
\label{ar_vr_haptic}
|
||||||
|
|
||||||
Some studies have investigated the visuo-haptic perception of virtual objects rendered with force-feedback and vibrotactile feedback in \AR and \VR.
|
Some studies have investigated the visuo-haptic perception of virtual objects rendered with force-feedback and vibrotactile feedback in \AR and \VR.
|
||||||
|
|
||||||
In an immersive \VST-\AR setup, \textcite{knorlein2009influence} rendered a virtual piston using force-feedback haptics that participants pressed directly with their hand (\figref{visuo-haptic-stiffness}).
|
In \VST-\AR, \textcite{knorlein2009influence} rendered a virtual piston using force-feedback haptics that participants pressed directly with their hand (\figref{visuo-haptic-stiffness}).
|
||||||
In a \TIFC task (\secref{sensations_perception}), participants pressed two pistons and indicated which was stiffer.
|
In a \TIFC task (\secref{sensations_perception}), participants pressed two pistons and indicated which was stiffer.
|
||||||
One had a reference stiffness but an additional visual or haptic delay, while the other varied with a comparison stiffness but had no delay. \footnote{Participants were not told about the delays and stiffness tested, nor which piston was the reference or comparison. The order of the pistons (which one was pressed first) was also randomized.}
|
One had a reference stiffness but an additional visual or haptic delay, while the other varied with a comparison stiffness but had no delay.
|
||||||
|
\footnote{Participants were not told about the delays and stiffness tested, nor which piston was the reference or comparison. The order of the pistons (which one was pressed first) was also randomized.}
|
||||||
Adding a visual delay increased the perceived stiffness of the reference piston, while adding a haptic delay decreased it, and adding both delays cancelled each other out (\figref{knorlein2009influence_2}).
|
Adding a visual delay increased the perceived stiffness of the reference piston, while adding a haptic delay decreased it, and adding both delays cancelled each other out (\figref{knorlein2009influence_2}).
|
||||||
|
|
||||||
\begin{subfigs}{visuo-haptic-stiffness}{
|
\begin{subfigs}{visuo-haptic-stiffness}{
|
||||||
@@ -116,18 +101,13 @@ Adding a visual delay increased the perceived stiffness of the reference piston,
|
|||||||
\subfig[.55]{knorlein2009influence_2}
|
\subfig[.55]{knorlein2009influence_2}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
%explained how these delays affected the integration of the visual and haptic perceptual cues of stiffness.
|
|
||||||
The stiffness $\tilde{k}(t)$ of the piston is indeed estimated at time $t$ by both sight and proprioception as the ratio of the exerted force $F(t)$ and the displacement $\Delta L(t)$ of the piston, following \eqref{stiffness}, but with potential visual $\Delta t_v$ or haptic $\Delta t_h$ delays.
|
The stiffness $\tilde{k}(t)$ of the piston is indeed estimated at time $t$ by both sight and proprioception as the ratio of the exerted force $F(t)$ and the displacement $\Delta L(t)$ of the piston, following \eqref{stiffness}, but with potential visual $\Delta t_v$ or haptic $\Delta t_h$ delays.
|
||||||
%\begin{equation}{stiffness_delay}
|
|
||||||
% \tilde{k}(t) = \frac{F(t + \Delta t_h)}{D(t + \Delta t_v)}
|
|
||||||
%\end{equation}
|
|
||||||
Therefore, the perceived stiffness $\tilde{k}(t)$ increases with a haptic delay in force and decreases with a visual delay in displacement \cite{diluca2011effects}.
|
Therefore, the perceived stiffness $\tilde{k}(t)$ increases with a haptic delay in force and decreases with a visual delay in displacement \cite{diluca2011effects}.
|
||||||
|
|
||||||
\textcite{gaffary2017ar} compared perceived stiffness of virtual pistons in \OST-\AR and \VR.
|
\textcite{gaffary2017ar} compared perceived stiffness of virtual pistons in \OST-\AR and \VR.
|
||||||
However, the force-feedback device and the participant's hand were not visible (\figref{gaffary2017ar}).
|
However, the force-feedback device and the participant's hand were not visible (\figref{gaffary2017ar}).
|
||||||
The reference piston was judged to be stiffer when seen in \VR than in \AR, without participants noticing this difference, and more force was exerted on the piston overall in \VR.
|
The reference piston was judged to be stiffer when seen in \VR than in \AR, without participants noticing this difference, and more force was exerted on the piston overall in \VR.
|
||||||
This suggests that the haptic stiffness of virtual objects feels \enquote{softer} in an augmented environment than in a full \VE.
|
This suggests that the haptic stiffness of virtual objects feels \enquote{softer} in an \AE than in a full \VE.
|
||||||
%Two differences that could be worth investigating with the two previous studies are the type of \AR (visuo or optical) and to see the hand touching the virtual object.
|
|
||||||
|
|
||||||
Finally, \textcite{diluca2019perceptual} investigated the perceived simultaneity of visuo-haptic contact with a virtual object in \VR.
|
Finally, \textcite{diluca2019perceptual} investigated the perceived simultaneity of visuo-haptic contact with a virtual object in \VR.
|
||||||
The contact was rendered both by a vibrotactile piezoelectric device on the fingertip and by a visual change in the color of the virtual object.
|
The contact was rendered both by a vibrotactile piezoelectric device on the fingertip and by a visual change in the color of the virtual object.
|
||||||
@@ -135,7 +115,7 @@ The visuo-haptic simultaneity was varied by adding a visual delay or by triggeri
|
|||||||
No participant (out of 19) was able to detect a \qty{50}{\ms} visual lag and a \qty{15}{\ms} haptic lead, and only half of them detected a \qty{100}{\ms} visual lag and a \qty{70}{\ms} haptic lead.
|
No participant (out of 19) was able to detect a \qty{50}{\ms} visual lag and a \qty{15}{\ms} haptic lead, and only half of them detected a \qty{100}{\ms} visual lag and a \qty{70}{\ms} haptic lead.
|
||||||
|
|
||||||
These studies have shown how the latency of the visual rendering of a virtual object or the type of environment (\VE or \RE) can affect the perceived haptic stiffness of the object, rendered with a grounded force-feedback device.
|
These studies have shown how the latency of the visual rendering of a virtual object or the type of environment (\VE or \RE) can affect the perceived haptic stiffness of the object, rendered with a grounded force-feedback device.
|
||||||
We describe in the next section how wearable haptics have been integrated with immersive \AR.
|
We describe in the next section how wearable haptics have been integrated with \AR.
|
||||||
|
|
||||||
\begin{subfigs}{gaffary2017ar}{Perception of haptic stiffness in \OST-\AR \vs \VR \cite{gaffary2017ar}. }[][
|
\begin{subfigs}{gaffary2017ar}{Perception of haptic stiffness in \OST-\AR \vs \VR \cite{gaffary2017ar}. }[][
|
||||||
\item Experimental setup: a virtual piston was pressed with a force-feedback placed to the side of the participant.
|
\item Experimental setup: a virtual piston was pressed with a force-feedback placed to the side of the participant.
|
||||||
@@ -150,32 +130,28 @@ We describe in the next section how wearable haptics have been integrated with i
|
|||||||
\subsection{Wearable Haptics for Direct Hand Interaction in AR}
|
\subsection{Wearable Haptics for Direct Hand Interaction in AR}
|
||||||
\label{vhar_haptics}
|
\label{vhar_haptics}
|
||||||
|
|
||||||
A few wearable haptic devices have been specifically designed or experimentally tested for direct hand interaction in immersive \AR.
|
A few wearable haptic devices have been specifically designed or experimentally tested for direct hand interaction in \AR.
|
||||||
Since virtual or augmented objects are naturally touched, grasped, and manipulated directly with the fingertips (\secref{exploratory_procedures} and \secref{grasp_types}), the main challenge of wearable haptics for \AR is to provide haptic sensations of these interactions while keeping the fingertips free to interact with the \RE.
|
Since virtual or augmented objects are naturally touched, grasped, and manipulated directly with the fingertips (\secref{exploratory_procedures} and \secref{grasp_types}), the main challenge of wearable haptics for \AR is to provide haptic sensations of these interactions while keeping the fingertips free to interact with the \RE.
|
||||||
Several approaches have been proposed to move the haptic actuator to a different location, on the outside of the finger or the hand, \eg the nail, the top of a phalanx, or the wrist.
|
Several approaches have been proposed to move the haptic actuator to a different location, on the outside of the finger or the hand, \eg the nail, the top of a phalanx, or the wrist.
|
||||||
Yet, they differ greatly in the actuators used (\secref{wearable_haptic_devices}), thus the haptic feedback (\secref{tactile_rendering}), and the placement of the haptic rendering.
|
Yet, they differ greatly in the actuators used (\secref{wearable_haptic_devices}), thus the haptic feedback (\secref{tactile_rendering}), and the placement of the haptic rendering.
|
||||||
|
|
||||||
Other wearable haptic actuators have been proposed for \AR, but are not discussed here.
|
Other wearable haptic actuators have been proposed for \AR, but are not discussed here.
|
||||||
A first reason is that they permanently cover the fingertip and affect the interaction with the \RE, such as thin-skin tactile interfaces \cite{withana2018tacttoo,teng2024haptic} or fluid-based interfaces \cite{han2018hydroring}.
|
A first reason is that they permanently cover the fingertip and affect the interaction with the \RE, such as thin-skin tactile interfaces \cite{withana2018tacttoo,teng2024haptic} or fluid-based interfaces \cite{han2018hydroring}.
|
||||||
Another category of actuators relies on systems that cannot be considered as portable, such as REVEL \cite{bau2012revel}, which provide friction sensations with reverse electrovibration that must modify the real objects to augment, or Electrical Muscle Stimulation (EMS) devices \cite{lopes2018adding}, which provide kinesthetic feedback by contracting the muscles.
|
Another category of actuators relies on systems that cannot be considered as portable, such as REVEL \cite{bau2012revel}, which provide friction sensations with reverse electrovibration that must modify the real objects to augment, or electrical muscle stimulation (EMS) devices \cite{lopes2018adding}, which provide kinesthetic feedback by contracting the muscles.
|
||||||
|
|
||||||
\subsubsection{Nail-Mounted Devices}
|
\subsubsection{Nail-Mounted Devices}
|
||||||
\label{vhar_nails}
|
\label{vhar_nails}
|
||||||
|
|
||||||
\textcite{ando2007fingernailmounted} were the first to move the actuator from the fingertip to propose the nail, as described in \secref{texture_rendering}.
|
\textcite{ando2007fingernailmounted} were the first to move the actuator away from the fingertip.
|
||||||
|
They proposed to relocate it to the nail instead, as described in \secref{texture_rendering}.
|
||||||
This approach was later extended by \textcite{teng2021touch} with Touch\&Fold, a haptic device able to unfold its end-effector on demand to make contact with the fingertip when touching virtual objects (\figref{teng2021touch_1}).
|
This approach was later extended by \textcite{teng2021touch} with Touch\&Fold, a haptic device able to unfold its end-effector on demand to make contact with the fingertip when touching virtual objects (\figref{teng2021touch_1}).
|
||||||
This moving platform also contains a \LRA (\secref{moving_platforms}) and provides contact pressure and texture sensations.
|
This moving platform also contains a \LRA (\secref{moving_platforms}) and provides contact pressure and texture sensations.
|
||||||
%The whole system is very compact (\qtyproduct{24 x 24 x 41}{\mm}), lightweight (\qty{9.5}{\g}), and fully portable by including a battery and Bluetooth wireless communication. \qty{20}{\ms} for the Bluetooth
|
|
||||||
When touching virtual objects in \OST-\AR with the index finger, this device was found to be more realistic overall (5/7) than vibrations with a \LRA at \qty{170}{\Hz} on the nail (3/7).
|
When touching virtual objects in \OST-\AR with the index finger, this device was found to be more realistic overall (5/7) than vibrations with a \LRA at \qty{170}{\Hz} on the nail (3/7).
|
||||||
Still, there is a high (\qty{92}{\ms}) latency for the folding mechanism and this design is not suitable for augmenting real objects.
|
Still, there is a high (\qty{92}{\ms}) latency for the folding mechanism and this design is not suitable for augmenting real objects.
|
||||||
|
|
||||||
% teng2021touch: (5.27+3.03+5.23+5.5+5.47)/5 = 4.9
|
|
||||||
% ando2007fingernailmounted: (2.4+2.63+3.63+2.57+3.2)/5 = 2.9
|
|
||||||
|
|
||||||
With Fingeret, \textcite{maeda2022fingeret} adapted the belt actuators (\secref{belt_actuators}) as a \enquote{finger-side actuator} that leaves the fingertip free (\figref{maeda2022fingeret}).
|
With Fingeret, \textcite{maeda2022fingeret} adapted the belt actuators (\secref{belt_actuators}) as a \enquote{finger-side actuator} that leaves the fingertip free (\figref{maeda2022fingeret}).
|
||||||
Two rollers, one on each side, can deform the skin: When rotated inward, they pull the skin, simulating a contact sensation, and when rotated outward, they push the skin, simulating a release sensation.
|
Two rollers, one on each side, can deform the skin: When rotated inward, they pull the skin, simulating a contact sensation, and when rotated outward, they push the skin, simulating a release sensation.
|
||||||
They can also simulate a texture sensation by rapidly rotating in and out.
|
They can also simulate a texture sensation by rapidly rotating in and out.
|
||||||
%The device is also very compact (\qty{60 x 25 x 36}{\mm}), lightweight (\qty{18}{\g}), and portable with a battery and Bluetooth wireless communication with \qty{83}{\ms} latency.
|
|
||||||
In a user study not in \AR, but directly touching images on a tablet, Fingeret was found to be more realistic (4/7) than a \LRA at \qty{100}{\Hz} on the nail (3/7) for rendering buttons and a patterned texture (\secref{texture_rendering}), but not different from vibrations for rendering high-frequency textures (3.5/7 for both).
|
In a user study not in \AR, but directly touching images on a tablet, Fingeret was found to be more realistic (4/7) than a \LRA at \qty{100}{\Hz} on the nail (3/7) for rendering buttons and a patterned texture (\secref{texture_rendering}), but not different from vibrations for rendering high-frequency textures (3.5/7 for both).
|
||||||
However, as with \textcite{teng2021touch}, finger speed was not taken into account when rendering vibrations, which may have been detrimental to texture perception, as described in \secref{texture_rendering}.
|
However, as with \textcite{teng2021touch}, finger speed was not taken into account when rendering vibrations, which may have been detrimental to texture perception, as described in \secref{texture_rendering}.
|
||||||
|
|
||||||
@@ -201,17 +177,14 @@ Recall that these devices have also been used to modify the perceived stiffness,
|
|||||||
|
|
||||||
In a \VST-\AR setup, \textcite{scheggi2010shape} explored the effect of rendering the weight of a virtual cube placed on a real surface hold with the thumb, index, and middle fingers (\figref{scheggi2010shape}).
|
In a \VST-\AR setup, \textcite{scheggi2010shape} explored the effect of rendering the weight of a virtual cube placed on a real surface hold with the thumb, index, and middle fingers (\figref{scheggi2010shape}).
|
||||||
The middle phalanx of each of these fingers was equipped with a haptic ring of \textcite{minamizawa2007gravity}.
|
The middle phalanx of each of these fingers was equipped with a haptic ring of \textcite{minamizawa2007gravity}.
|
||||||
%However, no proper user study was conducted to evaluate this feedback.% on the manipulation of the cube.
|
|
||||||
%that simulated the weight of the cube.
|
|
||||||
%A virtual cube that could push on the cube was manipulated with the other hand through a force-feedback device.
|
|
||||||
\textcite{scheggi2010shape} reported that 12 out of 15 participants found the weight haptic feedback essential to feeling the presence of the virtual cube.
|
\textcite{scheggi2010shape} reported that 12 out of 15 participants found the weight haptic feedback essential to feeling the presence of the virtual cube.
|
||||||
|
|
||||||
In a pick-and-place task in non-immersive \VST-\AR involving direct hand manipulation of both virtual and real objects (\figref{maisto2017evaluation}), \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the effects of providing haptic or visual feedback about fingertip-object contacts.
|
In a pick-and-place task in \VST-\AR involving direct hand manipulation of both virtual and real objects (\figref{maisto2017evaluation}), \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the effects of providing haptic or visual feedback about fingertip-object contacts.
|
||||||
They compared the haptic ring of \textcite{pacchierotti2016hring} on the proximal phalanx, the moving platform of \textcite{chinello2020modular} on the fingertip, and a visual feedback of the tracked fingertips as virtual points.
|
They compared the haptic ring of \textcite{pacchierotti2016hring} on the proximal phalanx, the moving platform of \textcite{chinello2020modular} on the fingertip, and a visual feedback of the tracked fingertips as virtual points.
|
||||||
They showed that the haptic feedback improved the completion time, reduced the force exerted on the cubes compared to the visual feedback (\figref{visual-hands}).
|
They showed that the haptic feedback improved the completion time, reduced the force exerted on the cubes compared to the visual feedback (\figref{visual-hands}).
|
||||||
The haptic ring was also perceived as more effective than the moving platform.
|
The haptic ring was also perceived as more effective than the moving platform.
|
||||||
However, the measured difference in performance could be due to either the device or the device position (proximal vs fingertip), or both.
|
However, the measured difference in performance could be due to either the device or the device position (proximal vs fingertip), or both.
|
||||||
These two studies were also conducted in non-immersive setups, where users viewed a screen displaying the visual interactions, and only compared the haptic and visual feedback of the hand-object contacts, but did not examine them together.
|
These two studies were also conducted in static setups, where users viewed a screen displaying the visual interactions, and only compared the haptic and visual feedback of the hand-object contacts, but did not examine them together.
|
||||||
|
|
||||||
\begin{subfigs}{ar_rings}{Wearable haptic ring devices for \AR. }[][
|
\begin{subfigs}{ar_rings}{Wearable haptic ring devices for \AR. }[][
|
||||||
\item Rendering weight of a virtual cube placed on a real surface \cite{scheggi2010shape}.
|
\item Rendering weight of a virtual cube placed on a real surface \cite{scheggi2010shape}.
|
||||||
@@ -221,36 +194,14 @@ These two studies were also conducted in non-immersive setups, where users viewe
|
|||||||
\subfig[.48][m]{maisto2017evaluation}
|
\subfig[.48][m]{maisto2017evaluation}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
%\subsubsection{Wrist Bracelet Devices}
|
|
||||||
%\label{vhar_bracelets}
|
|
||||||
|
|
||||||
With their \enquote{Tactile And Squeeze Bracelet Interface} (Tasbi), already mentioned in \secref{belt_actuators}, \textcite{pezent2019tasbi} and \textcite{pezent2022design} explored the use of a wrist-worn bracelet actuator.
|
With their \enquote{Tactile And Squeeze Bracelet Interface} (Tasbi), already mentioned in \secref{belt_actuators}, \textcite{pezent2019tasbi} and \textcite{pezent2022design} explored the use of a wrist-worn bracelet actuator.
|
||||||
It is capable of providing a uniform pressure sensation (up to \qty{15}{\N} and \qty{10}{\Hz}) and vibration with six \LRAs (\qtyrange{150}{200}{\Hz} bandwidth).
|
It is capable of providing a uniform pressure sensation (up to \qty{15}{\N} and \qty{10}{\Hz}) and vibration with six \LRAs (\qtyrange{150}{200}{\Hz} bandwidth).
|
||||||
A user study was conducted in \VR to compare the perception of visuo-haptic stiffness rendering \cite{pezent2019tasbi}, and showed that the haptic pressure feedback was more important than the visual displacement.
|
A user study was conducted in \VR to compare the perception of visuo-haptic stiffness rendering \cite{pezent2019tasbi}, and showed that the haptic pressure feedback was more important than the visual displacement.
|
||||||
%In a \TIFC task (\secref{sensations_perception}), participants pressed a virtual button with different levels of stiffness via a virtual hand constrained by the \VE (\figref{pezent2019tasbi_2}).
|
|
||||||
%A higher visual stiffness required a larger physical displacement to press the button (C/D ratio, see \secref{pseudo_haptic}), while the haptic stiffness control the rate of the pressure feedback when pressing.
|
|
||||||
%When the visual and haptic stiffness were coherent or when only the haptic stiffness changed, participants easily discriminated two buttons with different stiffness levels (\figref{pezent2019tasbi_3}).
|
|
||||||
%However, if only the visual stiffness changed, participants were not able to discriminate the different stiffness levels (\figref{pezent2019tasbi_4}).
|
|
||||||
%This suggests that in \VR, the haptic pressure is more important perceptual cue than the visual displacement to render stiffness.
|
|
||||||
%A short vibration (\qty{25}{\ms} \qty{175}{\Hz} square-wave) was also rendered when contacting the button, but kept constant across all conditions: It may have affected the overall perception when only the visual stiffness changed.
|
|
||||||
|
|
||||||
%\begin{subfigs}{pezent2019tasbi}{Visuo-haptic stiffness rendering of a virtual button in \VR with the Tasbi bracelet. }[][
|
|
||||||
% \item The \VE seen by the user: the virtual hand (in beige) is constrained by the virtual button. The displacement is proportional to the visual stiffness. The real hand (in green) is hidden by the \VE.
|
|
||||||
% \item When the rendered visuo-haptic stiffness are coherents (in purple) or only the haptic stiffness change (in blue), participants easily discrimated the different levels.
|
|
||||||
% \item When varying only the visual stiffness (in red) but keeping the haptic stiffness constant, participants were not able to discriminate the different stiffness levels.
|
|
||||||
% ]
|
|
||||||
% \subfigsheight{45mm}
|
|
||||||
% \subfig{pezent2019tasbi_2}
|
|
||||||
% \subfig{pezent2019tasbi_3}
|
|
||||||
% \subfig{pezent2019tasbi_4}
|
|
||||||
%\end{subfigs}
|
|
||||||
|
|
||||||
% \cite{sarac2022perceived,palmer2022haptic} not in \AR but studies on relocating to the wrist the haptic feedback of the fingertip-object contacts.
|
|
||||||
|
|
||||||
\subsection{Conclusion}
|
\subsection{Conclusion}
|
||||||
\label{visuo_haptic_conclusion}
|
\label{visuo_haptic_conclusion}
|
||||||
|
|
||||||
Providing coherent visuo-haptic feedback to enhance direct hand perception and manipulation with virtual objects in immersive \AR is challenging.
|
Providing coherent visuo-haptic feedback to enhance direct hand perception and manipulation with virtual objects in \AR is challenging.
|
||||||
While many wearable haptic devices have been developed and are capable of providing varied tactile feedback, few have be integrated or experimentally evaluated for direct hand interaction in \AR.
|
While many wearable haptic devices have been developed and are capable of providing varied tactile feedback, few have be integrated or experimentally evaluated for direct hand interaction in \AR.
|
||||||
Their haptic end-effector must be moved away from the inside of the hand so as not to interfere with the user interaction with the \RE.
|
Their haptic end-effector must be moved away from the inside of the hand so as not to interfere with the user interaction with the \RE.
|
||||||
Different relocation strategies have been proposed for different parts of the hand, such as the nail, the index phalanges, or the wrist, but it remains unclear whether any of them are best suited for direct hand interaction in \AR.
|
Different relocation strategies have been proposed for different parts of the hand, such as the nail, the index phalanges, or the wrist, but it remains unclear whether any of them are best suited for direct hand interaction in \AR.
|
||||||
|
|||||||
@@ -10,13 +10,13 @@ Only a few haptic systems can be considered wearable due to their compactness an
|
|||||||
If their haptic rendering is timely associated with the user's touch actions on a real object, the perceived haptic properties of the object, such as its roughness and hardness, can be modified.
|
If their haptic rendering is timely associated with the user's touch actions on a real object, the perceived haptic properties of the object, such as its roughness and hardness, can be modified.
|
||||||
Wearable haptic augmentation of roughness and hardness is mostly achieved with vibrotactile feedback (\secref{tactile_rendering}).
|
Wearable haptic augmentation of roughness and hardness is mostly achieved with vibrotactile feedback (\secref{tactile_rendering}).
|
||||||
|
|
||||||
\AR headsets integrate virtual content into the user's perception in an immersive way, as if it were part of the \RE, with real-time pose estimation of the head and hands (\secref{what_is_ar}).
|
\AR headsets integrate virtual content into the user's perception as if it were part of the \RE, with real-time pose estimation of the head and hands (\secref{what_is_ar}).
|
||||||
Direct interaction with the hand of virtual content is often implemented using virtual hand interaction technique, which reconstructs the user's hand in the \VE and simulates its interactions with the virtual.
|
Direct interaction with the hand of virtual content is often implemented using virtual hand interaction technique, which reconstructs the user's hand in the \VE and simulates its interactions with the virtual.
|
||||||
However, the perception and manipulation of the virtual is difficult due to the lack of haptic feedback and the mutual occlusion of the hand with the virtual content (\secref{ar_interaction}). %, which could be addressed by a visual augmentation of the hand (\secref{ar_visual_hands}).
|
However, the perception and manipulation of the virtual is difficult due to the lack of haptic feedback and the mutual occlusion of the hand with the virtual content (\secref{ar_interaction}).
|
||||||
Real surrounding objects can also be used as proxies to interact with the virtual, but they may be incoherent with their visual augmentation because they are haptically passive (\secref{ar_interaction}).
|
Real surrounding objects can also be used as proxies to interact with the virtual, but they may be incoherent with their visual augmentation because they are haptically passive (\secref{ar_interaction}).
|
||||||
Wearable haptics on the hand seems to be a promising solution to enable coherent and effective visuo-haptic augmentation of both virtual and real objects.
|
Wearable haptics on the hand seems to be a promising solution to enable coherent and effective visuo-haptic augmentation of both virtual and real objects.
|
||||||
|
|
||||||
\noindentskip In this thesis, we will use wearable haptic feedback in immersive \AR to create visuo-haptic texture augmentation when touching real objects (\partref{perception}) and to improve manipulation of virtual objects (\partref{manipulation}), both directly with the bare hand.
|
\noindentskip In this thesis, we will use wearable haptic feedback with an \AR headset to create visuo-haptic texture augmentation when touching real objects (\partref{perception}) and to improve manipulation of virtual objects (\partref{manipulation}), both directly with the bare hand.
|
||||||
|
|
||||||
First, it is challenging to provide coherent visuo-haptic feedback when augmenting real objects.
|
First, it is challenging to provide coherent visuo-haptic feedback when augmenting real objects.
|
||||||
By integrating different sensory feedback, haptic and visual, real and virtual, into a single object property, perception is somewhat robust to variations in reliability and to spatial and temporal differences.
|
By integrating different sensory feedback, haptic and visual, real and virtual, into a single object property, perception is somewhat robust to variations in reliability and to spatial and temporal differences.
|
||||||
@@ -31,13 +31,7 @@ Their haptic end-effector can't cover the fingertips or the inside of the palm,
|
|||||||
Many strategies have been explored to move the actuator to other parts of the hand.
|
Many strategies have been explored to move the actuator to other parts of the hand.
|
||||||
However, it is still unclear which delocalized positioning is most beneficial, and how it compares or complements the visual feedback of the virtual hand (\secref{vhar_haptics}).
|
However, it is still unclear which delocalized positioning is most beneficial, and how it compares or complements the visual feedback of the virtual hand (\secref{vhar_haptics}).
|
||||||
|
|
||||||
%The lack of mutual occlusion during virtual object manipulation could be addressed by visual feedback of the virtual hand (\secref{ar_visual_hands}).
|
|
||||||
In \chapref{visual_hand}, we will investigate the most common visual feedback of the virtual hand as an augmentation of the real hand in \AR.
|
In \chapref{visual_hand}, we will investigate the most common visual feedback of the virtual hand as an augmentation of the real hand in \AR.
|
||||||
We will compare these visual hand augmentation in virtual object manipulation tasks, directly with the bare hand.
|
We will compare these visual hand augmentation in virtual object manipulation tasks, directly with the bare hand.
|
||||||
Then, in \chapref{visuo_haptic_hand}, we will evaluate five common delocalized positioning of vibrotactile feedback, combined with two contact rendering techniques and visual hand augmentations.
|
Then, in \chapref{visuo_haptic_hand}, we will evaluate five common delocalized positioning of vibrotactile feedback, combined with two contact rendering techniques and visual hand augmentations.
|
||||||
We will compare these visuo-haptic augmentations of the hand with the same direct hand manipulation tasks of virtual objects in \AR.
|
We will compare these visuo-haptic augmentations of the hand with the same direct hand manipulation tasks of virtual objects in \AR.
|
||||||
|
|
||||||
%Wearable haptics on the hand is thus another solution to improve direct hand manipulation of virtual objects in \AR.
|
|
||||||
|
|
||||||
%for rendering contact with the hand when manipulating virtual objects
|
|
||||||
%We will also investigate two contact rendering techniques and compare them with two visual hand augmentations.
|
|
||||||
|
|||||||
BIN
2-related-work/figures/piumsomboon2013userdefined_2.jpeg
Normal file
BIN
2-related-work/figures/piumsomboon2013userdefined_2.jpeg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 27 KiB |
@@ -6,7 +6,6 @@
|
|||||||
\bigskip
|
\bigskip
|
||||||
|
|
||||||
This chapter reviews previous work on the perception and manipulation of virtual and augmented objects directly with the hand, using either wearable haptics, \AR, or their combination.
|
This chapter reviews previous work on the perception and manipulation of virtual and augmented objects directly with the hand, using either wearable haptics, \AR, or their combination.
|
||||||
%Experiencing a visual, haptic, or visuo-haptic augmented environment relies on one to many interaction loops between a user and the environment, as shown in \figref[introduction]{interaction-loop}, and each main step must be addressed and understood: the tracking and modelling of the \RE into a \VE, the interaction techniques to act on the \VE, the rendering of the \VE to the user through visual and haptic user interfaces, and, finally, the user's perception and actions on the overall augmented environment.
|
|
||||||
First, we review how the hand senses and interacts with its environment to perceive and manipulate the haptic properties of real everyday objects.
|
First, we review how the hand senses and interacts with its environment to perceive and manipulate the haptic properties of real everyday objects.
|
||||||
Second, we present how wearable haptic devices and renderings have been used to augment the haptic perception of roughness and hardness of real objects.
|
Second, we present how wearable haptic devices and renderings have been used to augment the haptic perception of roughness and hardness of real objects.
|
||||||
Third, we introduce the principles and user experience of \AR and review the main interaction techniques used to manipulate virtual objects directly with the hand.
|
Third, we introduce the principles and user experience of \AR and review the main interaction techniques used to manipulate virtual objects directly with the hand.
|
||||||
|
|||||||
@@ -2,25 +2,25 @@
|
|||||||
\label{intro}
|
\label{intro}
|
||||||
|
|
||||||
One approach to render virtual haptic textures consists in simulating the roughness of a periodic grating surface as a vibrotactile sinusoidal (\secref[related_work]{texture_rendering}).
|
One approach to render virtual haptic textures consists in simulating the roughness of a periodic grating surface as a vibrotactile sinusoidal (\secref[related_work]{texture_rendering}).
|
||||||
The vibrations are rendered to a voice-coil actuator embedded in a hand-held tool or worn on the finger, but to create the illusion of touching a pattern with a fixed spatial period, the frequency of signal must be modulated according to the finger movement.
|
The vibrations are rendered to a voice-coil actuator embedded in a hand-held tool or worn on the finger (\secref[related_work]{vhar_haptics}).
|
||||||
|
To create the illusion of touching a pattern with a fixed spatial period, the frequency of signal must be modulated according to the finger movement.
|
||||||
Previous work either used mechanical system to track the movement at high frequency \cite{strohmeier2017generating,friesen2024perceived}, or required the user to move at a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,ujitoko2019modulating}.
|
Previous work either used mechanical system to track the movement at high frequency \cite{strohmeier2017generating,friesen2024perceived}, or required the user to move at a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,ujitoko2019modulating}.
|
||||||
However, this method has not yet been integrated in an \AR context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.
|
However, this method has not yet been integrated in an \AR headset context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.
|
||||||
|
|
||||||
%which either constrained hand to a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,friesen2024perceived}, or used mechanical sensors attached to the hand \cite{friesen2024perceived,strohmeier2017generating}
|
|
||||||
|
|
||||||
In this chapter, we propose a \textbf{system for rendering visual and haptic virtual textures that augment real surfaces}.
|
In this chapter, we propose a \textbf{system for rendering visual and haptic virtual textures that augment real surfaces}.
|
||||||
It is implemented with an immersive \OST-\AR headset Microsoft HoloLens~2 and a wearable vibrotactile (voice-coil) device worn on the outside of finger (not covering the fingertip, \secref[related_work]{vhar_haptics}).
|
It is implemented with the \OST-\AR headset Microsoft HoloLens~2 and a wearable vibrotactile (voice-coil) device worn on the outside of finger (not covering the fingertip).
|
||||||
The visuo-haptic augmentations can be \textbf{viewed from any angle} and \textbf{explored freely with the bare finger}, as if they were real textures.
|
The visuo-haptic augmentations rendered with this design allow a user to \textbf{see the textures from any angle} and \textbf{explore them freely with the bare finger}, as if they were real textures.
|
||||||
To ensure both real-time and reliable renderings, the hand and the real surfaces are tracked using a webcam and marker-based pose estimation.
|
To ensure both real-time and reliable renderings, the hand and the real surfaces are tracked using a webcam and marker-based pose estimation.
|
||||||
The haptic textures are rendered as a vibrotactile signal representing a patterned grating texture that is synchronized with the finger movement on the augmented surface.
|
The haptic textures are rendered as a vibrotactile signal representing a patterned grating texture that is synchronized with the finger movement on the augmented surface.
|
||||||
|
The goal of this design is to enable new \AR applications capable of augmenting real objects with virtual visuo-haptic textures in a portable, on-demand manner, and without impairing with the interaction of the user with the \RE.
|
||||||
|
|
||||||
\noindentskip The contributions of this chapter are:
|
\noindentskip The contributions of this chapter are:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item The rendering of virtual vibrotactile roughness textures representing a patterned grating texture in real time from free finger movements and using vision-based finger pose estimation.
|
\item The rendering of virtual vibrotactile roughness textures representing a patterned grating texture in real time from free finger movements and using vision-based finger pose estimation.
|
||||||
\item A system to provide a coherent visuo-haptic texture augmentations of the \RE in a direct touch context using an immersive \AR headset and wearable haptics.
|
\item A system to provide a coherent visuo-haptic texture augmentations of the \RE in a direct touch context using an \OST-\AR headset and wearable haptics.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\noindentskip In the remainder of this chapter, we describe the principles of the system, how the real and virtual environments are registered, the generation of the vibrotactile textures, and measures of visual and haptic rendering latencies.
|
\noindentskip In the remainder of this chapter, we describe the principles of the system, how the \RE and \VE are registered, the generation of the vibrotactile textures, and measures of visual and haptic rendering latencies.
|
||||||
|
|
||||||
\bigskip
|
\bigskip
|
||||||
|
|
||||||
|
|||||||
@@ -1,16 +1,10 @@
|
|||||||
%With a vibrotactile actuator attached to a hand-held device or directly on the finger, it is possible to simulate virtual haptic sensations as vibrations, such as texture, friction or contact vibrations \cite{culbertson2018haptics}.
|
|
||||||
%
|
|
||||||
%We describe a system for rendering vibrotactile roughness textures in real time, on any real surface, touched directly with the index fingertip, with no constraints on hand movement and using a simple camera to track the finger pose.
|
|
||||||
%
|
|
||||||
%We also describe how to pair this tactile rendering with an immersive \AR or \VR headset visual display to provide a coherent visuo-haptic augmentation of the \RE.
|
|
||||||
|
|
||||||
\section{Concept}
|
\section{Concept}
|
||||||
\label{principle}
|
\label{principle}
|
||||||
|
|
||||||
The visuo-haptic texture rendering system is based on:
|
The visuo-haptic texture rendering system is based on:
|
||||||
\begin{enumerate}[label=(\arabic*)]
|
\begin{enumerate}[label=(\arabic*)]
|
||||||
\item a real-time interaction loop between the finger movements and a coherent visuo-haptic feedback simulating the sensation of a touched texture,
|
\item a real-time interaction loop between the finger movements and a coherent visuo-haptic feedback simulating the sensation of a touched texture,
|
||||||
\item a precise alignment of the \VE with its real counterpart, and
|
\item a precise registration of the \VE with its real counterpart, and
|
||||||
\item a modulation of the signal frequency by the estimated finger speed with a phase matching.
|
\item a modulation of the signal frequency by the estimated finger speed with a phase matching.
|
||||||
\end{enumerate}
|
\end{enumerate}
|
||||||
|
|
||||||
@@ -50,17 +44,24 @@ The velocity (without angular velocity) of the finger marker, denoted as $\pose{
|
|||||||
It is then filtered with another 1€ filter with the same parameters, and denoted as $\pose{c}{\hat{\dot{X}}}{f}$.
|
It is then filtered with another 1€ filter with the same parameters, and denoted as $\pose{c}{\hat{\dot{X}}}{f}$.
|
||||||
Finally, this filtered finger velocity is transformed into the augmented surface frame $\poseFrame{s}$ to be used in the vibrotactile signal generation, such as $\pose{s}{\hat{\dot{X}}}{f} = \pose{s}{T}{c} \, \pose{c}{\hat{\dot{X}}}{f}$.
|
Finally, this filtered finger velocity is transformed into the augmented surface frame $\poseFrame{s}$ to be used in the vibrotactile signal generation, such as $\pose{s}{\hat{\dot{X}}}{f} = \pose{s}{T}{c} \, \pose{c}{\hat{\dot{X}}}{f}$.
|
||||||
|
|
||||||
\subsection{Virtual Environment Alignment}
|
\subsection{Virtual Environment Registration}
|
||||||
\label{virtual_real_alignment}
|
\label{virtual_real_registration}
|
||||||
|
|
||||||
%To be able to compare virtual and augmented realities, we then create a \VE that closely replicate the real one.
|
|
||||||
Before a user interacts with the system, it is necessary to design a \VE that will be registered with the \RE during the experiment.
|
Before a user interacts with the system, it is necessary to design a \VE that will be registered with the \RE during the experiment.
|
||||||
Each real element tracked by a marker is modelled virtually, \eg the hand and the augmented surface (\figref{device}).
|
Each real element tracked by a marker is modelled virtually, \eg the hand and the augmented surface (\figref{device}).
|
||||||
In addition, the pose and size of the virtual textures were defined on the virtual replicas.
|
In addition, the pose and size of the virtual textures were defined on the virtual replicas.
|
||||||
During the experiment, the system uses marker pose estimates to align the virtual models with their real world counterparts. %, according to the condition being tested.
|
Prior to any usage, it is necessary to register the \VE with the \RE.
|
||||||
This allows to detect if a finger touches a virtual texture using a collision detection algorithm (Nvidia PhysX), and to show the virtual elements and textures in real-time, aligned with the \RE, using the considered \AR or \VR headset.
|
First, the coordinate system of the headset is manually aligned with that of the external camera by the experimenter using a raycast from the headset to the origin point of the camera (the center of the black rectangle in the box in \figref{apparatus}).
|
||||||
|
This resulted in a \qty{\pm .5}{\cm} spatial alignment error between the \RE and the \VE.
|
||||||
|
While this was sufficient for our use cases, other methods can achieve better accuracy if needed \cite{grubert2018survey}.
|
||||||
|
The registration of the coordinate systems of the camera and the headset thus allows the use of the marker estimation poses performed with the camera to display in the headset the virtual models aligned with their real-world counterparts.
|
||||||
|
|
||||||
In our implementation, the \VE is designed with Unity and the Mixed Reality Toolkit (MRTK)\footnoteurl{https://learn.microsoft.com/windows/mixed-reality/mrtk-unity}.
|
An additional calibration is performed to compensate for the offset between the finger contact point and the estimated marker pose \cite{son2022effect}.
|
||||||
|
The current user then places the index finger on the origin point, whose respective poses are known from the attached fiducial markers.
|
||||||
|
The transformation between the marker pose of the finger and the finger contact point can be estimated and compensated with an inverse transformation.
|
||||||
|
This allows to detect if the calibrated real finger touches a virtual texture using a collision detection algorithm (Nvidia PhysX).
|
||||||
|
|
||||||
|
In our implementation, the \VE is designed with Unity (v2021.1) and the Mixed Reality Toolkit (v2.7)\footnoteurl{https://learn.microsoft.com/windows/mixed-reality/mrtk-unity}.
|
||||||
The visual rendering is achieved using the Microsoft HoloLens~2, an \OST-\AR headset with a \qtyproduct{43 x 29}{\degree} \FoV, a \qty{60}{\Hz} refresh rate, and self-localisation capabilities.
|
The visual rendering is achieved using the Microsoft HoloLens~2, an \OST-\AR headset with a \qtyproduct{43 x 29}{\degree} \FoV, a \qty{60}{\Hz} refresh rate, and self-localisation capabilities.
|
||||||
A \VST-\AR or a \VR headset could have been used as well.
|
A \VST-\AR or a \VR headset could have been used as well.
|
||||||
|
|
||||||
@@ -69,19 +70,15 @@ A \VST-\AR or a \VR headset could have been used as well.
|
|||||||
|
|
||||||
A voice-coil actuator (HapCoil-One, Actronika) is used to display the vibrotactile signal, as it allows the frequency and amplitude of the signal to be controlled independently over time, covers a wide frequency range (\qtyrange{10}{1000}{\Hz}), and outputs the signal accurately with relatively low acceleration distortion\footnote{HapCoil-One specific characteristics are described in its data sheet: \url{https://tactilelabs.com/wp-content/uploads/2023/11/HapCoil_One_datasheet.pdf}}.
|
A voice-coil actuator (HapCoil-One, Actronika) is used to display the vibrotactile signal, as it allows the frequency and amplitude of the signal to be controlled independently over time, covers a wide frequency range (\qtyrange{10}{1000}{\Hz}), and outputs the signal accurately with relatively low acceleration distortion\footnote{HapCoil-One specific characteristics are described in its data sheet: \url{https://tactilelabs.com/wp-content/uploads/2023/11/HapCoil_One_datasheet.pdf}}.
|
||||||
The voice-coil actuator is encased in a \ThreeD printed plastic shell and firmly attached to the middle phalanx of the user's index finger with a Velcro strap, to enable the fingertip to directly touch the environment (\figref{device}).
|
The voice-coil actuator is encased in a \ThreeD printed plastic shell and firmly attached to the middle phalanx of the user's index finger with a Velcro strap, to enable the fingertip to directly touch the environment (\figref{device}).
|
||||||
The actuator is driven by a class D audio amplifier (XY-502 / TPA3116D2, Texas Instrument). %, which has proven to be an effective type of amplifier for driving moving-coil \cite{mcmahan2014dynamic}.
|
The actuator is driven by a class D audio amplifier (XY-502 / TPA3116D2, Texas Instrument).
|
||||||
The amplifier is connected to the audio output of a computer that generates the signal using the WASAPI driver in exclusive mode and the NAudio library\footnoteurl{https://github.com/naudio/NAudio}.
|
The amplifier is connected to the audio output of a computer that generates the signal using the WASAPI driver in exclusive mode and the NAudio library\footnoteurl{https://github.com/naudio/NAudio}.
|
||||||
|
|
||||||
The represented haptic texture is a 1D series of parallels virtual grooves and ridges, similar to the real linear grating textures manufactured for psychophysical roughness perception studies \secref[related_work]{roughness}. %\cite{friesen2024perceived,klatzky2003feeling,unger2011roughness}.
|
The represented haptic texture is a 1D series of parallels virtual grooves and ridges, similar to the real linear grating textures manufactured for psychophysical roughness perception studies \secref[related_work]{roughness}.
|
||||||
It is generated as a square wave audio signal $r$, sampled at \qty{48}{\kilo\hertz}, with a texture period $\lambda$ and an amplitude $A$, similar to \eqref[related_work]{grating_rendering}.
|
It is generated as a square wave audio signal $r$, sampled at \qty{48}{\kilo\hertz}, with a texture period $\lambda$ and an amplitude $A$, similar to \eqref[related_work]{grating_rendering}.
|
||||||
Its frequency is a ratio of the absolute finger filtered (scalar) velocity $\dot{x} = \poseX{s}{|\hat{\dot{X}}|}{f}$, and the texture period $\lambda$ \cite{friesen2024perceived}.
|
Its frequency is a ratio of the absolute finger filtered (scalar) velocity $\dot{x} = \poseX{s}{|\hat{\dot{X}}|}{f}$, and the texture period $\lambda$ \cite{friesen2024perceived}.
|
||||||
As the finger is moving horizontally on the texture, only the $X$ component of the velocity is used.
|
As the finger is moving horizontally on the texture, only the $X$ component of the velocity is used.
|
||||||
This velocity modulation strategy is necessary as the finger position is estimated at a far lower rate (\qty{60}{\hertz}) than the audio signal (unlike high-fidelity force-feedback devices \cite{unger2011roughness}).
|
This velocity modulation strategy is necessary as the finger position is estimated at a far lower rate (\qty{60}{\hertz}) than the audio signal (unlike high-fidelity force-feedback devices \cite{unger2011roughness}).
|
||||||
|
|
||||||
%As the finger position is estimated at a far lower rate (\qty{60}{\hertz}), the filtered finger (scalar) position ${}^t\hat{X}_f$ in the texture frame $\poseFrame{t}$ cannot be directly used. % to render the signal if the finger moves fast or if the texture period is small.
|
|
||||||
%
|
|
||||||
%The best strategy instead is to modulate the frequency of the signal as a ratio of the filtered finger velocity ${}^t\hat{\dot{\mathbf{X}}}_f$ and the texture period $\lambda$ \cite{friesen2024perceived}.
|
|
||||||
%
|
|
||||||
When a new finger velocity $\dot{x}\,(t_j)$ is estimated at time $t_j$, the phase $\phi\,(t_j)$ of the signal $r$ needs also to be adjusted to ensure a continuity in the signal.
|
When a new finger velocity $\dot{x}\,(t_j)$ is estimated at time $t_j$, the phase $\phi\,(t_j)$ of the signal $r$ needs also to be adjusted to ensure a continuity in the signal.
|
||||||
In other words, the sampling of the audio signal runs at \qty{48}{\kilo\hertz}, and its frequency and phase is updated at a far lower rate of \qty{60}{\hertz} when a new finger velocity is estimated.
|
In other words, the sampling of the audio signal runs at \qty{48}{\kilo\hertz}, and its frequency and phase is updated at a far lower rate of \qty{60}{\hertz} when a new finger velocity is estimated.
|
||||||
A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >= t_j$, is thus given by:
|
A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >= t_j$, is thus given by:
|
||||||
@@ -94,11 +91,6 @@ A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >=
|
|||||||
\end{subequations}
|
\end{subequations}
|
||||||
|
|
||||||
This rendering preserves the sensation of a constant spatial frequency of the virtual texture while the finger moves at various speeds, which is crucial for the perception of roughness \cite{klatzky2003feeling,unger2011roughness}.
|
This rendering preserves the sensation of a constant spatial frequency of the virtual texture while the finger moves at various speeds, which is crucial for the perception of roughness \cite{klatzky2003feeling,unger2011roughness}.
|
||||||
%
|
|
||||||
%Note that the finger position and velocity are transformed from the camera frame $\poseFrame{c}$ to the texture frame $\poseFrame{t}$, with the $x$ axis aligned with the texture direction.
|
|
||||||
%
|
|
||||||
%However, when a new finger position is estimated at time $t_j$, the phase $\phi(t_j)$ needs to be adjusted as well with the frequency to ensure a continuity in the signal as described in \eqref{signal_phase}.
|
|
||||||
%
|
|
||||||
The phase matching avoids sudden changes in the actuator movement thus affecting the texture perception in an uncontrolled way (\figref{phase_adjustment}) and, contrary to previous work \cite{asano2015vibrotactile,ujitoko2019modulating}, it enables a free exploration of the texture by the user with no constraints on the finger speed.
|
The phase matching avoids sudden changes in the actuator movement thus affecting the texture perception in an uncontrolled way (\figref{phase_adjustment}) and, contrary to previous work \cite{asano2015vibrotactile,ujitoko2019modulating}, it enables a free exploration of the texture by the user with no constraints on the finger speed.
|
||||||
A square wave is chosen to get a rendering closer to a real grating texture with the sensation of crossing edges \cite{ujitoko2019modulating}, and because the roughness perception of sine wave textures has been shown not to reproduce the roughness perception of real grating textures \cite{unger2011roughness}.
|
A square wave is chosen to get a rendering closer to a real grating texture with the sensation of crossing edges \cite{ujitoko2019modulating}, and because the roughness perception of sine wave textures has been shown not to reproduce the roughness perception of real grating textures \cite{unger2011roughness}.
|
||||||
A square wave also makes it possible to render low signal frequencies that occur when the finger moves slowly or the texture period is large, as the actuator cannot render a pure sine wave signal below \qty{\approx 20}{\Hz} with sufficient amplitude to be perceived.
|
A square wave also makes it possible to render low signal frequencies that occur when the finger moves slowly or the texture period is large, as the actuator cannot render a pure sine wave signal below \qty{\approx 20}{\Hz} with sufficient amplitude to be perceived.
|
||||||
@@ -130,5 +122,8 @@ The total visual latency can be considered slightly high, yet it is typical for
|
|||||||
|
|
||||||
The two filters also introduce a constant lag between the finger movement and the estimated position and velocity, measured at \qty{160 \pm 30}{\ms}.
|
The two filters also introduce a constant lag between the finger movement and the estimated position and velocity, measured at \qty{160 \pm 30}{\ms}.
|
||||||
With respect to the real hand position, it causes a distance error in the displayed virtual hand position, and thus a delay in the triggering of the vibrotactile signal.
|
With respect to the real hand position, it causes a distance error in the displayed virtual hand position, and thus a delay in the triggering of the vibrotactile signal.
|
||||||
This is proportional to the speed of the finger, \eg distance error is \qty{12 \pm 2.3}{\mm} when the finger moves at \qty{75}{\mm\per\second}.
|
This is proportional to the speed of the finger, \eg distance error is \qty{1.2 \pm .2}{\cm} when the finger moves at \qty{7.5}{\cm\per\second}.
|
||||||
%and of the vibrotactile signal frequency with respect to the finger speed.%, that is proportional to the speed of the finger.
|
|
||||||
|
The velocity estimation and delay effects should be examined in more detail, as these
|
||||||
|
factors are critical for ensuring the perceptual quality of the system. A 36 ms discrepancy
|
||||||
|
between movement and feedback may not be sufficient in all scenarios.
|
||||||
|
|||||||
@@ -1,21 +1,12 @@
|
|||||||
\section{Conclusion}
|
\section{Conclusion}
|
||||||
\label{conclusion}
|
\label{conclusion}
|
||||||
|
|
||||||
%Summary of the research problem, method, main findings, and implications.
|
|
||||||
|
|
||||||
In this chapter, we designed and implemented a system for rendering virtual visuo-haptic textures that augment a real surface.
|
In this chapter, we designed and implemented a system for rendering virtual visuo-haptic textures that augment a real surface.
|
||||||
Directly touched with the fingertip, the perceived roughness of the surface can be increased using a wearable vibrotactile voice-coil device mounted on the middle phalanx of the finger.
|
Directly touched with the fingertip, the perceived roughness of the surface can be increased using a wearable vibrotactile voice-coil device mounted on the middle phalanx of the finger.
|
||||||
We adapted the 1D sinusoidal grating rendering method, common in the literature but not yet integrated in a direct touch context, for use with vision-based pose estimation of the finger and paired it with an immersive \AR headset.
|
We adapted the 1D sinusoidal grating rendering method, common in the literature but not yet integrated in a direct touch context, for use with vision-based pose estimation of the finger and paired it with an \OST-\AR headset.
|
||||||
|
|
||||||
Our wearable visuo-haptic augmentation system enable any real surface to be augmented with a minimal setup.
|
Our wearable visuo-haptic augmentation system enable any real surface to be augmented with a minimal setup.
|
||||||
It also allows a free exploration of the textures, as if they were real (\secref[related_work]{ar_presence}), by letting the user view them from different poses and touch them with the bare finger without constraints on hand movements.
|
It also allows a free exploration of the textures, as if they were real (\secref[related_work]{ar_presence}), by letting the user view them from different poses and touch them with the bare finger without constraints on hand movements.
|
||||||
The visual latency we measured is typical of \AR systems, and the haptic latency is below the perceptual detection threshold for vibrotactile rendering.
|
The visual latency we measured is typical of \AR systems, and the haptic latency is below the perceptual detection threshold for vibrotactile rendering.
|
||||||
|
|
||||||
This system forms the basis of the apparatus for the user studies presented in the next two chapters, which evaluate the user perception of these visuo-haptic texture augmentations.
|
This system forms the basis of the apparatus for the user studies presented in the next two chapters, which evaluate the user perception of these visuo-haptic texture augmentations.
|
||||||
|
|
||||||
%\noindentskip This work was presented and published at the VRST 2024 conference:
|
|
||||||
%
|
|
||||||
%Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
|
||||||
%\enquote{How Different Is the Perception of Vibrotactile Texture Roughness in Augmented versus Virtual Reality?}.
|
|
||||||
%In: \textit{ACM Symposium on Virtual Reality Software and Technology}. Trier, Germany, October 2024. pp. 287--296.
|
|
||||||
|
|
||||||
|
|||||||
@@ -2,29 +2,29 @@
|
|||||||
\label{intro}
|
\label{intro}
|
||||||
|
|
||||||
In the previous chapter, we investigated the role of the visual feedback of the virtual hand and the environment (\AR \vs \VR) on the perception of wearable haptic texture augmentation.
|
In the previous chapter, we investigated the role of the visual feedback of the virtual hand and the environment (\AR \vs \VR) on the perception of wearable haptic texture augmentation.
|
||||||
In this chapter, we explore the perception of wearable visuo-haptic texture augmentation of real surfaces touched directly with the finger in an immersive \AR context and without a virtual hand overlay.
|
In this chapter, we explore the perception of wearable visuo-haptic texture augmentation of real surfaces touched directly with the finger.
|
||||||
|
|
||||||
When we look at the surface of an everyday object, we then touch it to confirm or contrast our initial visual impression and to estimate the properties of the object, particularly its texture (\secref[related_work]{visual_haptic_influence}).
|
When we look at the surface of an everyday object, we then touch it to confirm or contrast our initial visual impression and to estimate the properties of the object, particularly its texture (\secref[related_work]{visual_haptic_influence}).
|
||||||
Among the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus (\secref[related_work]{texture_rendering}).
|
Among the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus (\secref[related_work]{texture_rendering}).
|
||||||
Databases of visuo-haptic textures have been developed in this way \cite{culbertson2014one,balasubramanian2024sens3}, but they have not yet been explored in an immersive and direct touch context with \AR and wearable haptics.
|
Databases of visuo-haptic textures have been developed in this way \cite{culbertson2014one,balasubramanian2024sens3}, but they have not yet been explored in a direct touch context with \AR and wearable haptics.
|
||||||
|
|
||||||
In this chapter, we investigate whether simultaneous and \textbf{co-localized visual and wearable haptic texture augmentation of real surfaces} in \AR can be perceived in a coherent and realistic manner, and to what extent each sensory modality would contribute to the overall perception of the augmented texture.
|
In this chapter, we consider simultaneous and \textbf{co-localized visual and wearable haptic texture augmentation of real surfaces} with an \OST-\AR headset and wearable vibrotactile feedback.
|
||||||
We used nine pairs of \textbf{data-driven visuo-haptic textures} from the \HaTT database \cite{culbertson2014one}, which we rendered using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}. %, an \OST-\AR headset, and a wearable voice-coil device worn on the finger.
|
We investigate how these textures can be perceived in a coherent and realistic manner, and to what extent each sensory modality would contribute to the overall perception of the augmented texture.
|
||||||
|
We used nine pairs of \textbf{data-driven visuo-haptic textures} from the \HaTT database \cite{culbertson2014one}, which we rendered using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}.
|
||||||
In a \textbf{user study}, 20 participants freely explored in direct touch the combination of the visuo-haptic texture pairs to rate their coherence, realism and perceived roughness.
|
In a \textbf{user study}, 20 participants freely explored in direct touch the combination of the visuo-haptic texture pairs to rate their coherence, realism and perceived roughness.
|
||||||
We aimed to assess \textbf{which haptic textures were matched with which visual textures}, how the roughness of the visual and haptic textures was perceived, and whether \textbf{the perceived roughness} could explain the matches made between them.
|
We aimed to assess \textbf{which haptic textures were matched with which visual textures}, how the roughness of the visual and haptic textures was perceived, and whether \textbf{the perceived roughness} could explain the matches made between them.
|
||||||
|
|
||||||
\noindentskip The contributions of this chapter are:
|
\noindentskip The contributions of this chapter are:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item Transposition of data-driven visuo-haptic textures to augment real objects in a direct touch context in immersive \AR.
|
\item Transposition of data-driven visuo-haptic textures to augment real objects in a direct touch context in \AR.
|
||||||
\item A user study evaluating with 20 participants the coherence, realism and perceived roughness of nine pairs of these visuo-haptic texture augmentations.
|
\item A user study evaluating with 20 participants the coherence, realism and perceived roughness of nine pairs of these visuo-haptic texture augmentations.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\smallskip
|
\smallskip
|
||||||
|
|
||||||
\fig[0.55]{experiment/view}{First person view of the user study.}[
|
\fig[0.55]{experiment/view}{First person view of the user study.}[
|
||||||
%As seen through the immersive \AR headset.
|
|
||||||
The visual texture overlays were statically displayed on the surfaces, allowing the user to move around to view them from different angles.
|
The visual texture overlays were statically displayed on the surfaces, allowing the user to move around to view them from different angles.
|
||||||
The haptic texture augmentations were generated based on \HaTT data-driven texture models and finger speed, and were rendered on the middle index phalanx.% as it slides on the considered surface.
|
The haptic texture augmentations were generated based on \HaTT data-driven texture models and finger speed, and were rendered on the middle index phalanx.
|
||||||
]
|
]
|
||||||
|
|
||||||
\noindentskip In the next sections, we first describe the apparatus of the user study experimental design, including the two tasks performed. We then present the results obtained and discuss them before concluding.
|
\noindentskip In the next sections, we first describe the apparatus of the user study experimental design, including the two tasks performed. We then present the results obtained and discuss them before concluding.
|
||||||
|
|||||||
@@ -1,19 +1,10 @@
|
|||||||
\section{User Study}
|
\section{User Study}
|
||||||
\label{experiment}
|
\label{experiment}
|
||||||
|
|
||||||
%The user study aimed at analyzing the user perception of real surfaces when augmented through a visuo-haptic texture using \AR and vibrotactile haptic feedback provided on the finger touching the surfaces.
|
|
||||||
%Nine representative visuo-haptic texture pairs from the \HaTT database \cite{culbertson2014one} were investigated in two tasks:
|
|
||||||
%\begin{enumerate}
|
|
||||||
% \item \level{Matching} task: participants had to find the haptic texture that best matched a given visual texture; and
|
|
||||||
% \item \level{Ranking} task: participants had to rank the haptic textures, the visual textures, and the visuo-haptic texture pairs according to their perceived roughness.
|
|
||||||
%\end{enumerate}
|
|
||||||
%Our objective is to assess which haptic textures were associated with which visual textures, how the roughness of the visual and haptic textures are perceived, and whether the perceived roughness can explain the matches made between them.
|
|
||||||
|
|
||||||
\subsection{The textures}
|
\subsection{The textures}
|
||||||
\label{textures}
|
\label{textures}
|
||||||
|
|
||||||
The 100 visuo-haptic texture pairs of the \HaTT database \cite{culbertson2014one} were preliminary tested and compared using the apparatus described in \secref{apparatus} to select the most representative textures for the user study.
|
The 100 visuo-haptic texture pairs of the \HaTT database \cite{culbertson2014one} were preliminary tested and compared using the apparatus described in \secref{apparatus} to select the most representative textures for the user study.
|
||||||
% visuo-haptic system presented in \chapref{vhar_system}, and with the vibrotactile haptic feedback provided on the middle-phalanx of the finger touching a real surface. on the finger on a real surface
|
|
||||||
These texture models were chosen as they are visuo-haptic representations of a wide range of real textures that are publicly available online.
|
These texture models were chosen as they are visuo-haptic representations of a wide range of real textures that are publicly available online.
|
||||||
Nine texture pairs were selected (\figref{experiment/textures}) to cover various perceived roughness, from rough to smooth, as named on the database: \level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, \level{Cork}, \level{Sandpaper~320}, \level{Velcro Hooks}, \level{Plastic Mesh~1}, \level{Terra Cotta}, \level{Coffee Filter}.
|
Nine texture pairs were selected (\figref{experiment/textures}) to cover various perceived roughness, from rough to smooth, as named on the database: \level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, \level{Cork}, \level{Sandpaper~320}, \level{Velcro Hooks}, \level{Plastic Mesh~1}, \level{Terra Cotta}, \level{Coffee Filter}.
|
||||||
All these visual and haptic textures are isotropic: their rendering (appearance or roughness) is the same whatever the direction of the movement on the surface, \ie there are no local deformations (holes, bumps, or breaks).
|
All these visual and haptic textures are isotropic: their rendering (appearance or roughness) is the same whatever the direction of the movement on the surface, \ie there are no local deformations (holes, bumps, or breaks).
|
||||||
@@ -27,18 +18,14 @@ The user study was held in a quiet room with no windows, with one light source o
|
|||||||
Nine \qty{5}{\cm} square cardboards with smooth, white melamine surface, arranged in a \numproduct{3 x 3} grid, were used as real surfaces to augment.
|
Nine \qty{5}{\cm} square cardboards with smooth, white melamine surface, arranged in a \numproduct{3 x 3} grid, were used as real surfaces to augment.
|
||||||
Their poses were estimated with three \qty{2}{\cm} AprilTag fiducial markers glued on the surfaces grid.
|
Their poses were estimated with three \qty{2}{\cm} AprilTag fiducial markers glued on the surfaces grid.
|
||||||
Similarly, a \qty{2}{\cm} fiducial marker was glued on top of the vibrotactile actuator to detect the finger pose.
|
Similarly, a \qty{2}{\cm} fiducial marker was glued on top of the vibrotactile actuator to detect the finger pose.
|
||||||
Positioned \qty{20}{\cm} above the surfaces, a webcam (StreamCam, Logitech) filmed the markers to track finger movements relative to the surfaces, as described in \secref[vhar_system]{virtual_real_alignment}.
|
Positioned \qty{20}{\cm} above the surfaces, a webcam (StreamCam, Logitech) filmed the markers to track finger movements relative to the surfaces, as described in \secref[vhar_system]{virtual_real_registration}.
|
||||||
The visual textures were displayed on the real surfaces using the \OST-\AR headset Microsoft HoloLens~2 running a custom application at \qty{60}{FPS} made with Unity 2021.1 and Mixed Reality Toolkit (MRTK) 2.7.2.
|
The visual textures were displayed on the real surfaces using the \OST-\AR headset Microsoft HoloLens~2 running a custom application at \qty{60}{FPS} made with Unity (v2021.1) and Mixed Reality Toolkit (v2.7).
|
||||||
A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the visual textures, that were used throughout the user study.
|
A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the visual textures, that were used throughout the user study.
|
||||||
|
|
||||||
When a virtual haptic texture was touched, a \qty{48}{kHz} audio signal was generated using the rendering procedure described in \cite{culbertson2014modeling} from the corresponding \HaTT haptic texture model and the measured tangential speed of the finger (\secref[vhar_system]{texture_generation}).
|
When a virtual haptic texture was touched, a \qty{48}{kHz} audio signal was generated using the rendering procedure described in \cite{culbertson2014modeling} from the corresponding \HaTT haptic texture model and the measured tangential speed of the finger (\secref[vhar_system]{texture_generation}).
|
||||||
The normal force on the texture was assumed to be constant at \qty{1.2}{\N} to generate the audio signal from the model, as \textcite{culbertson2015should}, who found that the \HaTT textures can be rendered using only the speed as input without decreasing their perceived realism.
|
The normal force on the texture was assumed to be constant at \qty{1.2}{\N} to generate the audio signal from the model, as \textcite{culbertson2015should}, who found that the \HaTT textures can be rendered using only the speed as input without decreasing their perceived realism.
|
||||||
The rendering of the virtual texture is described in \secref[vhar_system]{texture_generation}.
|
The rendering of the virtual texture is described in \secref[vhar_system]{texture_generation}.
|
||||||
The vibrotactile voice-coil actuator (HapCoil-One, Actronika) was firmly attached to the middle index phalanx of the participant's dominant hand using a Velcro strap, similarly to previous studies \cite{asano2015vibrotactile,friesen2024perceived}.
|
The vibrotactile voice-coil actuator (HapCoil-One, Actronika) was firmly attached to the middle index phalanx of the participant's dominant hand using a Velcro strap, similarly to previous studies \cite{asano2015vibrotactile,friesen2024perceived}.
|
||||||
%An amplifier (XY-502, not branded) converted this audio signal to a current transmitted to the vibrotactile voice-coil actuator (HapCoil-One, Actronika), that was encased in a \ThreeD-printed plastic shell firmly attached to the middle index phalanx of the participant's dominant hand, similarly to previous studies \cite{asano2015vibrotactile,friesen2024perceived}.
|
|
||||||
%This voice-coil actuator was chosen for its wide frequency range (\qtyrange{10}{1000}{\Hz}) and its relatively low acceleration distortion, as specified by the manufacturer\footnoteurl{https://www.actronika.com/haptic-solutions}.
|
|
||||||
%Overall latency was measured to \qty{46 \pm 6}{\ms}, as a result of latency in image acquisition \qty{16 \pm 1}{\ms}, fiducial marker detection \qty{8 \pm 3}{\ms}, network synchronization \qty{4 \pm 1}{\ms}, audio sampling \qty{3 \pm 1}{\ms}, and the vibrotactile actuator latency (\qty{15}{\ms}, as specified by the manufacturer\footnotemark[5]).
|
|
||||||
%This latency was below the \qty{60}{\ms} threshold for vibrotactile feedback \cite{okamoto2009detectability} and was not noticed by the participants.
|
|
||||||
|
|
||||||
\begin{subfigs}{setup}{Textures used and experimental setup of the user study. }[][
|
\begin{subfigs}{setup}{Textures used and experimental setup of the user study. }[][
|
||||||
\item The nine visuo-haptic textures used in the user study, selected from the \HaTT database \cite{culbertson2014one}.
|
\item The nine visuo-haptic textures used in the user study, selected from the \HaTT database \cite{culbertson2014one}.
|
||||||
@@ -56,9 +43,8 @@ The vibrotactile voice-coil actuator (HapCoil-One, Actronika) was firmly attache
|
|||||||
|
|
||||||
Participants were first given written instructions about the experimental setup, the tasks, and the procedure of the user study.
|
Participants were first given written instructions about the experimental setup, the tasks, and the procedure of the user study.
|
||||||
Then, after having signed an informed consent form, they were asked to seat in front of the table with the experimental setup and to wear the \AR headset.
|
Then, after having signed an informed consent form, they were asked to seat in front of the table with the experimental setup and to wear the \AR headset.
|
||||||
%The experimenter firmly attached the plastic shell encasing the vibrotactile actuator to the middle index phalanx of their dominant hand.
|
|
||||||
As the haptic textures generated no audible noise, participants did not wear any noise reduction headphones.
|
As the haptic textures generated no audible noise, participants did not wear any noise reduction headphones.
|
||||||
A calibration of both the HoloLens~2 and the finger pose estimation was performed to ensure the correct alignment of the visual and haptic textures on the real surfaces.
|
A calibration of both the HoloLens~2 and the finger pose estimation was performed to ensure the correct registration of the visuo-haptic textures and the real finger with the real surfaces, as described in \secref[vhar_system]{virtual_real_registration}.
|
||||||
Finally, participants familiarized with the augmented surface in a \qty{2}{min} training session with textures different from the ones used in the user study.
|
Finally, participants familiarized with the augmented surface in a \qty{2}{min} training session with textures different from the ones used in the user study.
|
||||||
|
|
||||||
Participants started with the \level{Matching} task.
|
Participants started with the \level{Matching} task.
|
||||||
@@ -89,8 +75,6 @@ A total of 9 textures \x 3 repetitions = 18 matching trials were performed per p
|
|||||||
In the \level{Ranking} task, participants had to rank the haptic textures, the visual textures, and the visuo-haptic texture pairs according to their perceived roughness.
|
In the \level{Ranking} task, participants had to rank the haptic textures, the visual textures, and the visuo-haptic texture pairs according to their perceived roughness.
|
||||||
It had one within-subjects factor, \factor{Modality} with the following levels: \level{Visual}, \level{Haptic}, \level{Visuo-Haptic}.
|
It had one within-subjects factor, \factor{Modality} with the following levels: \level{Visual}, \level{Haptic}, \level{Visuo-Haptic}.
|
||||||
Each modality level was ranked once per participant following the fixed order listed above (\secref{procedure}).
|
Each modality level was ranked once per participant following the fixed order listed above (\secref{procedure}).
|
||||||
%The order of \level{Modality} was fixed as listed above, and.
|
|
||||||
%A total of 3 modalities = 60 ranking trials were collected.
|
|
||||||
|
|
||||||
\subsection{Participants}
|
\subsection{Participants}
|
||||||
\label{participants}
|
\label{participants}
|
||||||
@@ -110,7 +94,7 @@ For each trial of the \level{Matching} task, the chosen \response{Haptic Texture
|
|||||||
The \response{Completion Time} was also measured as the time between the visual texture display and the haptic texture selection.
|
The \response{Completion Time} was also measured as the time between the visual texture display and the haptic texture selection.
|
||||||
For each modality of the \level{Ranking} task, the \response{Rank} of each of the visual, haptic, or visuo-haptic pairs of the textures presented was recorded.
|
For each modality of the \level{Ranking} task, the \response{Rank} of each of the visual, haptic, or visuo-haptic pairs of the textures presented was recorded.
|
||||||
|
|
||||||
\noindentskip After each of the two tasks, participants answered to the following 7-item Likert scale questions (1=Not at all, 7=Extremely):
|
After each of the two tasks, participants answered to the following 7-item Likert scale questions (1=Not at all, 7=Extremely):
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item \response{Haptic Difficulty}: How difficult was it to differentiate the tactile textures?
|
\item \response{Haptic Difficulty}: How difficult was it to differentiate the tactile textures?
|
||||||
\item \response{Visual Difficulty}: How difficult was it to differentiate the visual textures?
|
\item \response{Visual Difficulty}: How difficult was it to differentiate the visual textures?
|
||||||
@@ -120,4 +104,6 @@ For each modality of the \level{Ranking} task, the \response{Rank} of each of th
|
|||||||
\item \response{Uncomfort}: How uncomfortable was to use the haptic device?
|
\item \response{Uncomfort}: How uncomfortable was to use the haptic device?
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
\noindentskip In an open question, participants also commented on their strategy for completing the \level{Matching} task (\enquote{How did you associate the tactile textures with the visual textures?}) and the \level{Ranking} task (\enquote{How did you rank the textures?}).
|
In an open question, participants also commented on their strategy for completing the \level{Matching} task (\enquote{How did you associate the tactile textures with the visual textures?}) and the \level{Ranking} task (\enquote{How did you rank the textures?}).
|
||||||
|
|
||||||
|
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), \textit{corrr} (v0.4), \textit{FactoMineR} (v2.11), \textit{lme4} (v1.1), and \textit{performance} (v0.13).
|
||||||
|
|||||||
@@ -7,27 +7,27 @@
|
|||||||
\paragraph{Confusion Matrix}
|
\paragraph{Confusion Matrix}
|
||||||
\label{results_matching_confusion_matrix}
|
\label{results_matching_confusion_matrix}
|
||||||
|
|
||||||
\figref{results/matching_confusion_matrix} shows the confusion matrix of the \level{Matching} task with the visual textures and the proportion of haptic texture selected in response, \ie the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture.
|
\figref{results/matching_confusion_matrix} shows the confusion matrix of the \level{Matching} task with the visual textures and the proportion of haptic texture selected in response, \ie the proportion of times the corresponding \response{Haptic Texture} was selected in response to the presentation of the corresponding \factor{Visual Texture}.
|
||||||
A two-sample Pearson Chi-Squared test (\chisqr{64}{540}{420}, \pinf{0.001}) and Holm-Bonferroni adjusted binomial tests indicated that the following (\factor{Visual Texture}, \response{Haptic Texture}) pairs have proportion selections statistically significantly higher than chance (\ie \percent{11} each):
|
To determine which haptic textures were selected most often, the repetitions of the trials were first aggregated by counting the number of selections per participant for each (\factor{Visual Texture}, \response{Haptic Texture}) pair.
|
||||||
|
An \ANOVA based on a Poisson regression (no overdispersion was detected) indicated a statistically significant effect on the number of selections of the interaction \factor{Visual Texture} \x \response{Haptic Texture} (\chisqr{64}{180}{414}, \pinf{0.001}).
|
||||||
|
Post-hoc pairwise comparisons using the Tukey's \HSD test then indicated there was statistically significant differences for the following visual textures:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item (\level{Sandpaper~320}, \level{Coffee Filter}), (\level{Terra Cotta}, \level{Coffee Filter}), and (\level{Coffee Filter}, \level{Coffee Filter}) (\pinf{0.001} each);
|
\item With \level{Sandpaper~320}, \level{Coffee Filter} was more selected than the other haptic textures (\ztest{3.4}, \pinf{0.05} each) except \level{Plastic Mesh~1} and \level{Terra Cotta}.
|
||||||
\item (\level{Cork}, \level{Sandpaper~320}), (\level{Brick~2}, \level{Plastic Mesh~1}), (\level{Brick~2}, \level{Sandpaper~320}), (\level{Plastic Mesh~1}, \level{Sandpaper~320}), and (\level{Sandpaper~320}, \level{Plastic Mesh~1}) (\pinf{0.01}); and
|
\item With \level{Terra Cotta}, \level{Coffee Filter} was more selected than the others (\ztest{3.4}, \pinf{0.05} each) except \level{Plastic Mesh~1} and \level{Terra Cotta}.
|
||||||
\item (\level{Metal Mesh}, \level{Cork}), (\level{Cork}, \level{Velcro Hooks}), (\level{Velcro Hooks}, \level{Plastic Mesh~1}), (\level{Velcro Hooks}, \level{Sandpaper~320}), and (\level{Coffee Filter}, \level{Terra Cotta}) (\pinf{0.05} each).
|
\item With \level{Coffee Filter}, \level{Coffee Filter} was more selected than the others (\ztest{4.0}, \pinf{0.01} each) except \level{Terra Cotta}.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
Except for one visual texture (\level{Sandpaper~100}) and 4 haptic textures (\level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, and \level{Terra Cotta}), all haptic and visual textures were matched statistically significantly higher than chance with at least one visual and haptic texture, respectively.
|
\fig[0.85]{results/matching_confusion_matrix}{Confusion matrix of the \level{Matching} task results.}[
|
||||||
However, many mistakes were made: the expected haptic texture was selected on average only \percent{20} of the time for five of the visual textures, and even around \percent{5} for (visual) \level{Sandpaper~100}, \level{Brick~2}, and \level{Sandpaper~320}.
|
|
||||||
Only haptic \level{Coffee Filter} was correctly selected \percent{59} of the time, and was also particularly matched with the visual \level{Sandpaper~320} and \level{Terra Cotta} (around \percent{45} each).
|
|
||||||
Similarly, the haptic textures \level{Sandpaper~320} and \level{Plastic Mesh~1} were also selected for four and three visual textures, respectively (around \percent{25} each).
|
|
||||||
Additionally, the Spearman correlations between the trials were computed for each participant and only 21 out of 60 were statistically significant (\pinf{0.05}), with a mean \spearman{0.52} (\ci{0.43}{0.59}).
|
|
||||||
|
|
||||||
\fig[0.82]{results/matching_confusion_matrix}{Confusion matrix of the \level{Matching} task.}[
|
|
||||||
With the presented visual textures as columns and the selected haptic texture in proportion as rows.
|
With the presented visual textures as columns and the selected haptic texture in proportion as rows.
|
||||||
The number in a cell is the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture.
|
The number in a cell is the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture.
|
||||||
The diagonal represents the expected correct answers.
|
The diagonal represents the expected correct answers.
|
||||||
Holm-Bonferroni adjusted binomial test results are marked in bold when the proportion is higher than chance (\ie more than \percent{11}, \pinf{0.05}).
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
Many mistakes were made: the expected haptic texture was selected on average only \percent{20} of the time for five of the visual textures, and even around \percent{5} for (visual) \level{Sandpaper~100}, \level{Brick~2}, and \level{Sandpaper~320}.
|
||||||
|
Only haptic \level{Coffee Filter} was correctly selected \percent{57} of the time, and was also particularly matched with the visual \level{Sandpaper~320} and \level{Terra Cotta} (around \percent{44} each).
|
||||||
|
Similarly, the haptic textures \level{Sandpaper~320} and \level{Plastic Mesh~1} were also selected for four and three visual textures, respectively (around \percent{25} each).
|
||||||
|
Additionally, the Spearman correlations between the trials were computed for each participant and only 21 out of 60 were statistically significant (\pinf{0.05}), with a mean \spearman{0.52} \ci{0.43}{0.59}.
|
||||||
|
|
||||||
These results indicate that the participants hesitated between several haptic textures for a given visual texture, as also reported in several comments, some haptic textures being more favored while some others were almost not selected at all.
|
These results indicate that the participants hesitated between several haptic textures for a given visual texture, as also reported in several comments, some haptic textures being more favored while some others were almost not selected at all.
|
||||||
Another explanation could be that the participants had difficulties to estimate the roughness of the visual textures.
|
Another explanation could be that the participants had difficulties to estimate the roughness of the visual textures.
|
||||||
Indeed, many participants explained that they tried to identify or imagine the roughness of a given visual texture then to select the most plausible haptic texture, in terms of frequency and/or amplitude of vibrations.
|
Indeed, many participants explained that they tried to identify or imagine the roughness of a given visual texture then to select the most plausible haptic texture, in terms of frequency and/or amplitude of vibrations.
|
||||||
@@ -36,35 +36,49 @@ Indeed, many participants explained that they tried to identify or imagine the r
|
|||||||
|
|
||||||
To verify that the difficulty with all the visual textures was the same on the \level{Matching} task, the \response{Completion Time} of a trial was analyzed.
|
To verify that the difficulty with all the visual textures was the same on the \level{Matching} task, the \response{Completion Time} of a trial was analyzed.
|
||||||
As the \response{Completion Time} results were Gamma distributed, they were transformed with a log to approximate a normal distribution.
|
As the \response{Completion Time} results were Gamma distributed, they were transformed with a log to approximate a normal distribution.
|
||||||
A \LMM on the log \response{Completion Time} with the \factor{Visual Texture} as fixed effect and the participant as random intercept was performed.
|
An \ANOVA based on a \LMM on the log \response{Completion Time} with the \factor{Visual Texture} as fixed effect and the participant as random intercept was performed.
|
||||||
Normality was verified with a QQ-plot of the model residuals.
|
Normality was verified with a QQ-plot of the model residuals.
|
||||||
No statistical significant effect of \factor{Visual Texture} was found (\anova{8}{512}{1.9}, \p{0.06}) on \response{Completion Time} (\geomean{44}{\s} \ci{42}{46}), indicating an equal difficulty and participant behaviour for all the visual textures.
|
No statistical significant effect of \factor{Visual Texture} was found (\anova{8}{512}{1.9}, \p{0.06}) on \response{Completion Time} (\geomean{44}{\s} \ci{42}{46}), indicating an equal difficulty and participant behaviour for all the visual textures.
|
||||||
|
|
||||||
\subsection{Textures Ranking}
|
\subsection{Textures Ranking}
|
||||||
\label{results_ranking}
|
\label{results_ranking}
|
||||||
|
|
||||||
\figref{results/ranking_mean_ci} presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs.
|
\figref{results/rankings_modality} presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs.
|
||||||
For each ranking, a Friedman test was performed with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment.
|
For each ranking, a Friedman test was performed with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment.
|
||||||
|
|
||||||
|
\fig[1]{results/rankings_modality}{Means with bootstrap \percent{95} \CI of the \level{Ranking} task results for each \factor{Modality}.}[
|
||||||
|
Shown for the haptic textures alone (left), the visual textures alone (center) and the visuo-haptic textures pairs (right).
|
||||||
|
The order of the visual textures on the x-axis differs between modalities.
|
||||||
|
A lower rank means that the texture was considered rougher, a higher rank means smoother.
|
||||||
|
Wilcoxon signed-rank tests and Holm-Bonferroni adjustment: all comparisons were statistically significantly different (\pinf{0.05}) except when marked with an \enquote{X}.
|
||||||
|
]
|
||||||
|
|
||||||
\paragraph{Haptic Textures Ranking}
|
\paragraph{Haptic Textures Ranking}
|
||||||
|
|
||||||
Almost all the texture pairs in the haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{146}, \pinf{0.001}; \pinf{0.05} for each comparison), except between (\level{Metal Mesh}, \level{Sandpaper~100}), (\level{Cork}, \level{Brick~2}), (\level{Cork}, \level{Sandpaper~320}) (\level{Plastic Mesh~1}, \level{Velcro Hooks}), and (\level{Plastic Mesh~1}, \level{Terra Cotta}).
|
Almost all the texture pairs in the haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{146}, \pinf{0.001}; \pinf{0.05} for each comparison; see \figref{results/rankings_modality}, left).
|
||||||
|
However, no difference was found between the pairs (\level{Metal Mesh}, \level{Sandpaper~100}), (\level{Cork}, \level{Brick~2}), (\level{Cork}, \level{Sandpaper~320}), (\level{Plastic Mesh~1}, \level{Velcro Hooks}), and (\level{Plastic Mesh~1}, \level{Terra Cotta}).
|
||||||
Average Kendall's Tau correlations between the participants indicated a high consensus (\kendall{0.82} \ci{0.81}{0.84}) showing that participants perceived similarly the roughness of the haptic textures.
|
Average Kendall's Tau correlations between the participants indicated a high consensus (\kendall{0.82} \ci{0.81}{0.84}) showing that participants perceived similarly the roughness of the haptic textures.
|
||||||
|
|
||||||
\paragraph{Visual Textures Ranking}
|
\paragraph{Visual Textures Ranking}
|
||||||
|
|
||||||
Most of the texture pairs in the visual textures ranking results were also statistically significantly different (\chisqr{8}{20}{119}, \pinf{0.001}; \pinf{0.05} for each comparison), except for the following groups: \{\level{Metal Mesh}, \level{Cork}, \level{Plastic Mesh~1}\}; \{\level{Sandpaper~100}, \level{Brick~2}, \level{Plastic Mesh~1}, \level{Velcro Hooks}\}; \{\level{Cork}, \level{Velcro Hooks}\}; \{\level{Sandpaper~320}, \level{Terra Cotta}\}; and \{\level{Sandpaper~320}, \level{Coffee Filter}\}.
|
Most of the texture pairs in the visual textures ranking results were also statistically significantly different (\chisqr{8}{20}{119}, \pinf{0.001}; \pinf{0.05} for each comparison; see \figref{results/rankings_modality}, center), except for the following.
|
||||||
|
No difference was found between \level{Plastic Mesh~1} and \level{Metal Mesh}, \level{Brick 2}, \level{Sandpaper 100}, \level{Cork}, \level{Velcro Hooks};
|
||||||
|
nor between \level{Velcro Hooks} and \level{Sandpaper 100}, \level{Cork}, \level{Brick 2}.
|
||||||
|
No difference was also found between the pairs (\level{Metal Mesh}, \level{Cork}), (\level{Sandpaper~100}, \level{Brick~2}), (\level{Sandpaper~320}, \level{Terra Cotta}) and (\level{Sandpaper~320}, \level{Coffee Filter}).
|
||||||
Even though the consensus was high (\kendall{0.61} \ci{0.58}{0.64}), the roughness of the visual textures were more difficult to estimate, in particular for \level{Plastic Mesh~1} and \level{Velcro Hooks}.
|
Even though the consensus was high (\kendall{0.61} \ci{0.58}{0.64}), the roughness of the visual textures were more difficult to estimate, in particular for \level{Plastic Mesh~1} and \level{Velcro Hooks}.
|
||||||
|
|
||||||
\paragraph{Visuo-Haptic Textures Ranking}
|
\paragraph{Visuo-Haptic Textures Ranking}
|
||||||
|
|
||||||
Also, almost all the texture pairs in the visuo-haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{140}, \pinf{0.001}; \pinf{0.05} for each comparison), except for the following groups: \{\level{Sandpaper~100}, \level{Cork}\}; \{\level{Cork}, \level{Brick~2}\}; and \{\level{Plastic Mesh~1}, \level{Velcro Hooks}, \level{Sandpaper~320}\}.
|
Also, almost all the texture pairs in the visuo-haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{140}, \pinf{0.001}; \pinf{0.05} for each comparison; see \figref{results/rankings_modality}, right).
|
||||||
|
However, no difference was found between the textures for each of the following groups: \{\level{Sandpaper~100}, \level{Cork}\}; \{\level{Cork}, \level{Brick~2}\}; and \{\level{Plastic Mesh~1}, \level{Velcro Hooks}, \level{Sandpaper~320}\}.
|
||||||
The consensus between the participants was also high \kendall{0.77} \ci{0.74}{0.79}.
|
The consensus between the participants was also high \kendall{0.77} \ci{0.74}{0.79}.
|
||||||
Finally, calculating the similarity of the three rankings of each participant, the \textit{Visuo-Haptic Textures Ranking} was on average highly similar to the \textit{Haptic Textures Ranking} (\kendall{0.79} \ci{0.72}{0.86}) and moderately to the \textit{Visual Textures Ranking} (\kendall{0.48} \ci{0.39}{0.56}).
|
|
||||||
|
Finally, the similarity of the three rankings of each participant was calculated (\figref{results/rankings_texture}).
|
||||||
|
The \textit{Visuo-Haptic Textures Ranking} was on average highly similar to the \textit{Haptic Textures Ranking} (\kendall{0.79} \ci{0.72}{0.86}) and moderately to the \textit{Visual Textures Ranking} (\kendall{0.48} \ci{0.39}{0.56}).
|
||||||
A Wilcoxon signed-rank test indicated that this difference was statistically significant (\wilcoxon{190}, \p{0.002}).
|
A Wilcoxon signed-rank test indicated that this difference was statistically significant (\wilcoxon{190}, \p{0.002}).
|
||||||
These results indicate that the two haptic and visual modalities were integrated together, the resulting roughness ranking being between the two rankings of the modalities alone, but with haptics predominating.
|
These results indicate that the two haptic and visual modalities were integrated together, the resulting roughness ranking being between the two rankings of the modalities alone, but with haptics predominating.
|
||||||
|
|
||||||
\fig[0.7]{results/ranking_mean_ci}{Means with bootstrap \percent{95} \CI of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs. }[
|
\fig[1]{results/rankings_texture}{Means with bootstrap \percent{95} \CI of the \level{Ranking} task results for each \factor{Visual Texture}.}[
|
||||||
A lower rank means that the texture was considered rougher, a higher rank means smoother.
|
A lower rank means that the texture was considered rougher, a higher rank means smoother.
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -73,64 +87,74 @@ These results indicate that the two haptic and visual modalities were integrated
|
|||||||
|
|
||||||
The high level of agreement between participants on the three haptic, visual and visuo-haptic rankings in the \level{Ranking} task (\secref{results_ranking}), as well as the similarity of the within-participant rankings, suggest that participants perceived the roughness of the textures similarly, but differed in their strategies for matching the haptic and visual textures in the \level{Matching} task (\secref{results_matching}).
|
The high level of agreement between participants on the three haptic, visual and visuo-haptic rankings in the \level{Ranking} task (\secref{results_ranking}), as well as the similarity of the within-participant rankings, suggest that participants perceived the roughness of the textures similarly, but differed in their strategies for matching the haptic and visual textures in the \level{Matching} task (\secref{results_matching}).
|
||||||
|
|
||||||
To further investigate the perceived similarity of the haptic and visual textures and to identify groups of textures that were perceived as similar on the \level{Matching} task, a correspondence analysis and a hierarchical clustering were performed on the matching task confusion matrix (\figref{results/matching_confusion_matrix}).
|
To further investigate the perceived similarity of the haptic and visual textures, and to identify groups of textures that were perceived as similar on the \level{Matching} task, a correspondence analysis and a hierarchical clustering were performed on the matching task confusion matrix (\figref{results/matching_confusion_matrix}).
|
||||||
|
|
||||||
\paragraph{Correspondence Analysis}
|
\paragraph{Correspondence Analysis}
|
||||||
|
|
||||||
The correspondence analysis captured \percent{60} and \percent{29} of the variance in the first and second dimensions, respectively, with the remaining dimensions each accounting for less than \percent{5} each.
|
The correspondence analysis captured \percent{60} and \percent{29} of the variance in the first and second dimensions, respectively, with the remaining dimensions each accounting for less than \percent{5} each.
|
||||||
\figref{results/matching_correspondence_analysis} shows the first two dimensions with the 18 haptic and visual textures.
|
\figref{results/matching_correspondence_analysis} shows the first two dimensions with the 18 haptic and visual textures.
|
||||||
The first dimension was similar to the rankings (\figref{results/ranking_mean_ci}), distributing the textures according to their perceived roughness.
|
The first dimension was similar to the rankings (\figref{results/rankings_texture}), distributing the textures according to their perceived roughness.
|
||||||
It seems that the second dimension opposed textures that were perceived as hard with those perceived as softer, as also reported by participants.
|
It seems that the second dimension opposed textures that were perceived as hard with those perceived as softer, as also reported by participants.
|
||||||
Stiffness is indeed an important perceptual dimension of a material (\secref[related_work]{hardness}).% \cite{okamoto2013psychophysical,culbertson2014modeling}.
|
Stiffness is indeed an important perceptual dimension of a material (\secref[related_work]{hardness}).
|
||||||
|
|
||||||
\fig[0.6]{results/matching_correspondence_analysis}{
|
\fig[1]{results/matching_correspondence_analysis}{Correspondence analysis of the confusion matrix of the \level{Matching} task.}[
|
||||||
Correspondence analysis of the confusion matrix of the \level{Matching} task.
|
The closer the haptic and visual textures are, the more similar they were judged.
|
||||||
}[
|
The first dimension (horizontal axis) explains \percent{60} of the variance, the second dimension (vertical axis) explains \percent{29} of the variance.
|
||||||
The closer the haptic and visual textures are, the more similar they were judged. %
|
The confusion matrix is shown in \figref{results/matching_confusion_matrix}.
|
||||||
The first dimension (horizontal axis) explains \percent{60} of the variance, the second dimension (vertical axis) explains \percent{30} of the variance.
|
|
||||||
The confusion matrix is \figref{results/matching_confusion_matrix}.
|
|
||||||
]
|
]
|
||||||
|
|
||||||
\paragraph{Hierarchical Clustering}
|
\paragraph{Hierarchical Clustering}
|
||||||
|
|
||||||
\figref{results_clusters} shows the dendrograms of the two hierarchical clusterings of the haptic and visual textures, constructed using the Euclidean distance and the Ward's method on squared distance.
|
\figref{results_clusters} shows the dendrograms of the two hierarchical clusterings of the haptic and visual textures, constructed using the Euclidean distance and the Ward's method on squared distance.
|
||||||
|
|
||||||
The four identified haptic texture clusters were: "Roughest" \{\level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, \level{Cork}\}; "Rougher" \{\level{Sandpaper~320}, \level{Velcro Hooks}\}; "Smoother" \{\level{Plastic Mesh~1}, \level{Terra Cotta}\}; "Smoothest" \{\level{Coffee Filter}\} (\figref{results/clusters_haptic}).
|
The four identified haptic texture clusters were: \enquote{Roughest} \{\level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, \level{Cork}\}; \enquote{Rougher} \{\level{Sandpaper~320}, \level{Velcro Hooks}\}; \enquote{Smoother} \{\level{Plastic Mesh~1}, \level{Terra Cotta}\}; \enquote{Smoothest} \{\level{Coffee Filter}\} (\figref{results/clusters_haptic}).
|
||||||
Similar to the haptic ranks (\figref{results/ranking_mean_ci}), the clusters could have been named according to their perceived roughness.
|
Similar to the haptic ranks (\figref{results/rankings_modality}, left), the clusters could have been named according to their perceived roughness.
|
||||||
It also shows that the participants compared and ranked the haptic textures during the \level{Matching} task to select the one that best matched the given visual texture.
|
It also shows that the participants compared and ranked the haptic textures during the \level{Matching} task to select the one that best matched the given visual texture.
|
||||||
|
|
||||||
The five identified visual texture clusters were: "Roughest" \{\level{Metal Mesh}\}; "Rougher" \{\level{Sandpaper~100}, \level{Brick~2}, \level{Velcro Hooks}\}; "Medium" \{\level{Cork}, \level{Plastic Mesh~1}\}; "Smoother" \{\level{Sandpaper~320}, \level{Terra Cotta}\}; "Smoothest" \{\level{Coffee Filter}\} (\figref{results/clusters_visual}).
|
The five identified visual texture clusters were: \enquote{Roughest} \{\level{Metal Mesh}\}; \enquote{Rougher} \{\level{Sandpaper~100}, \level{Brick~2}, \level{Velcro Hooks}\}; \enquote{Medium} \{\level{Cork}, \level{Plastic Mesh~1}\}; \enquote{Smoother} \{\level{Sandpaper~320}, \level{Terra Cotta}\}; \enquote{Smoothest} \{\level{Coffee Filter}\} (\figref{results/clusters_visual}).
|
||||||
They are also easily identifiable on the visual ranking results, which also made it possible to name them.
|
They are also easily identifiable on the visual ranking results, which also made it possible to name them.
|
||||||
|
|
||||||
\begin{subfigs}{results_clusters}{Dendrograms of the hierarchical clusterings of the \level{Matching} task confusion matrix.}[
|
\begin{subfigs}{results_clusters}{Dendrograms of the hierarchical clusterings of the confusion matrix of the \level{Matching} task.}[
|
||||||
Done with the Euclidean distance and the Ward's method on squared distance.
|
Done with the Euclidean distance and the Ward's method on squared distance.
|
||||||
The height of the dendrograms represents the distance between the clusters.
|
The height of the dendrograms represents the distance between the clusters.
|
||||||
][%
|
][
|
||||||
\item For the haptic textures.
|
\item For the haptic textures.
|
||||||
\item For the visual textures.
|
\item For the visual textures.
|
||||||
]
|
]
|
||||||
\subfig[0.45]{results/clusters_haptic}
|
\subfig[0.48]{results/clusters_haptic}
|
||||||
\subfig[0.45]{results/clusters_visual}
|
\subfig[0.48]{results/clusters_visual}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
\paragraph{Confusion Matrices of Clusters}
|
\paragraph{Confusion Matrices of Clusters}
|
||||||
|
|
||||||
Based on these results, two alternative confusion matrices were constructed.
|
Based on these results, two alternative confusion matrices were constructed.
|
||||||
|
Similarly to \secref{results_matching}, an \ANOVA based on a Poisson regression was performed for each confusion matrix on the number of selections, followed by post-hoc pairwise comparisons using the Tukey's \HSD test. No overdispersion was detected on the Poisson regressions.
|
||||||
|
|
||||||
\figref{results/haptic_visual_clusters_confusion_matrices} (left) shows the confusion matrix of the \level{Matching} task with visual texture clusters and the proportion of haptic texture clusters selected in response.
|
\figref{results/haptic_visual_clusters_confusion_matrices} (left) shows the confusion matrix of the \level{Matching} task with visual texture clusters and the proportion of haptic texture clusters selected in response.
|
||||||
A two-sample Pearson Chi-Squared test (\chisqr{16}{540}{353}, \pinf{0.001}) and Holm-Bonferroni adjusted binomial tests indicated that the following (Visual Cluster, Haptic Cluster) pairs have proportion selections statistically significantly higher than chance (\ie \percent{20} each): %
|
There was a statistically significant effect on the number of selections of the interaction visual texture cluster \x haptic texture cluster (\chisqr{12}{180}{324}, \pinf{0.001}), and statistically significant differences for the following visual clusters:
|
||||||
(Roughest, Roughest), (Rougher, Rougher), (Medium, Rougher), (Medium, Smoother), (Smoother, Smoother), (Smoother, Smoothest), and (Smoothest, Smoothest) (\pinf{0.005} each).
|
\begin{itemize}
|
||||||
|
\item With \enquote{Roughest}, the haptic cluster \enquote{Roughest} was the most selected (\ztest{4.6}, \pinf{0.001}).
|
||||||
|
\item With \enquote{Rougher}, \enquote{Smoothest} was the least selected (\ztest{-4.0}, \pinf{0.001}) and \enquote{Rougher} more than \enquote{Smoother} (\ztest{-3.4}, \pinf{0.001}).
|
||||||
|
\item With \enquote{Medium}, \enquote{Rougher} and \enquote{Smoother} were both (\ztest{4.5}, \pinf{0.001}) more selected than \enquote{Roughest} and \enquote{Smoothest}.
|
||||||
|
\item With \enquote{Smoother}, \enquote{Smoother} (\ztest{4.2}, \pinf{0.001}) and \enquote{Smoothest} (\ztest{4.7}, \pinf{0.001}) were both more selected than \enquote{Roughest} and \enquote{Rougher}.
|
||||||
|
\item With \enquote{Smoothest}, \enquote{Smoother} (\ztest{2.6}, \p{0.05}) and \enquote{Smoothest} (\ztest{3.9}, \pinf{0.001}) were both more selected than \enquote{Roughest} and \enquote{Rougher}.
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
\figref{results/haptic_visual_clusters_confusion_matrices} (right) shows the confusion matrix of the \level{Matching} task with visual texture ranks and the proportion of haptic texture clusters selected in response.
|
\figref{results/haptic_visual_clusters_confusion_matrices} (right) shows the confusion matrix of the \level{Matching} task with visual texture ranks and the proportion of haptic texture clusters selected in response.
|
||||||
A two-sample Pearson Chi-Squared test (\chisqr{24}{540}{342}, \pinf{0.001}) and Holm-Bonferroni adjusted binomial tests indicated that the following (Visual Texture Rank, Haptic Cluster) pairs have proportion selections statistically significantly higher than chance: %
|
There was a statistically significant effect on the number of selections of the visual texture rank \x haptic texture cluster interaction (\chisqr{24}{180}{340}, \pinf{0.001}), and statistically significant differences for the following visual texture ranks:
|
||||||
(0, Roughest); (1, Rougher); (2, Rougher); (3, Rougher); (4, Rougher); (5, Smoother); (6, Smoother); (7, Smoothest); and (8, Smoothest) (\pinf{0.05} each).
|
\begin{itemize}
|
||||||
This shows that the participants consistently identified the roughness of each visual texture and selected the corresponding haptic texture cluster.
|
\item Rank 0: the haptic cluster \enquote{Roughest} was the most selected (\ztest{4.5}, \pinf{0.001}).
|
||||||
|
\item Ranks 1, 2 and 3: \enquote{Smoothest} was the least selected (\ztest{-3.0}, \p{0.04}).
|
||||||
|
\item Rank 4: \enquote{Rougher} was more selected than \enquote{Roughest} and \enquote{Smoothest} (\ztest{3.0}, \p{0.03}).
|
||||||
|
\item Rank 5: \enquote{Rougher} and \enquote{Smoother} were both (\ztest{4.5}, \p{0.01}) more selected than \enquote{Roughest} and \enquote{Smoothest}.
|
||||||
|
\item Rank 6: \enquote{Smoother} was more selected than \enquote{Roughest} (\ztest{3.2}, \p{0.006}).
|
||||||
|
\item Rank 7: \enquote{Smoother} and \enquote{Smoothest} were both (\ztest{3.4}, \p{0.04}) more selected than \enquote{Roughest} and \enquote{Rougher}.
|
||||||
|
\item Rank 7: \enquote{Smoother} and \enquote{Smoothest} were both (\ztest{3.2}, \p{0.04}) more selected than \enquote{Roughest} and \enquote{Rougher}.
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
\fig{results/haptic_visual_clusters_confusion_matrices}{
|
\fig{results/haptic_visual_clusters_confusion_matrices}{
|
||||||
Confusion matrices of the visual texture (left) or rank (right) with the corresponding haptic texture clusters selected in proportion.
|
Confusion matrices of the visual texture (left) or rank (right) with the corresponding haptic texture clusters selected in proportion.
|
||||||
}[
|
}[]
|
||||||
Holm-Bonferroni adjusted binomial test results are marked in bold when the proportion is higher than chance (\ie more than \percent{20}, \pinf{0.05}).
|
|
||||||
]
|
|
||||||
|
|
||||||
\subsection{Questionnaire}
|
\subsection{Questionnaire}
|
||||||
\label{results_questions}
|
\label{results_questions}
|
||||||
@@ -140,7 +164,7 @@ A non-parametric \ANOVA on \ART models were used for the \response{Difficulty} a
|
|||||||
The other question results were analyzed using Wilcoxon signed-rank tests, with Holm-Bonferroni adjustment.
|
The other question results were analyzed using Wilcoxon signed-rank tests, with Holm-Bonferroni adjustment.
|
||||||
The results are shown as mean $\pm$ standard deviation.
|
The results are shown as mean $\pm$ standard deviation.
|
||||||
|
|
||||||
On \response{Difficulty}, there were statistically significant effects of \factor{Task} (\anova{1}{57}{13}, \pinf{0.001}) and of \factor{Modality} (\anova{1}{57}{8}, \p{0.007}), but no interaction effect. % \factor{Task} \x \factor{Modality} (\anova{1}{57}{2}, \ns).
|
On \response{Difficulty}, there were statistically significant effects of \factor{Task} (\anova{1}{57}{13}, \pinf{0.001}) and of \factor{Modality} (\anova{1}{57}{8}, \p{0.007}), but no interaction effect \factor{Task} \x \factor{Modality} (\anova{1}{57}{2}, \ns).
|
||||||
The \level{Ranking} task was found easier (\num{2.9 \pm 1.2}) than the \level{Matching} task (\num{3.9 \pm 1.5}), and the Haptic textures were found easier to discriminate (\num{3.0 \pm 1.3}) than the Visual ones (\num{3.8 \pm 1.5}).
|
The \level{Ranking} task was found easier (\num{2.9 \pm 1.2}) than the \level{Matching} task (\num{3.9 \pm 1.5}), and the Haptic textures were found easier to discriminate (\num{3.0 \pm 1.3}) than the Visual ones (\num{3.8 \pm 1.5}).
|
||||||
|
|
||||||
Both haptic and visual textures were judged moderately realistic for both tasks (\num{4.2 \pm 1.3}), with no statistically significant effect of \factor{Task}, \factor{Modality} or their interaction on \response{Realism}.
|
Both haptic and visual textures were judged moderately realistic for both tasks (\num{4.2 \pm 1.3}), with no statistically significant effect of \factor{Task}, \factor{Modality} or their interaction on \response{Realism}.
|
||||||
@@ -151,10 +175,10 @@ The coherence of the texture pairs was considered moderate (\num{4.6 \pm 1.2}) a
|
|||||||
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
|
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
|
||||||
Lower is better for Difficulty and Uncomfortable; higher is better for Realism and Textures Match.
|
Lower is better for Difficulty and Uncomfortable; higher is better for Realism and Textures Match.
|
||||||
][
|
][
|
||||||
\item By modality.
|
\item By \factor{Modality}.
|
||||||
\item By task.
|
\item By \factor{Task}.
|
||||||
]
|
]
|
||||||
\subfigsheight{70mm}
|
\subfigsheight{70mm}
|
||||||
\subfig{results/questions_modalities}%
|
\subfig{results/questions_modalities}
|
||||||
\subfig{results/questions_tasks}%
|
\subfig{results/questions_tasks}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|||||||
@@ -17,19 +17,19 @@ Several strategies were used, as some participants reported using vibration freq
|
|||||||
It should be noted that the task was rather difficult (\figref{results_questions}), as participants had no prior knowledge of the textures, there were no additional visual cues such as the shape of an object, and the term \enquote{roughness} had not been used by the experimenter prior to the \level{Ranking} task.
|
It should be noted that the task was rather difficult (\figref{results_questions}), as participants had no prior knowledge of the textures, there were no additional visual cues such as the shape of an object, and the term \enquote{roughness} had not been used by the experimenter prior to the \level{Ranking} task.
|
||||||
|
|
||||||
The correspondence analysis (\figref{results/matching_correspondence_analysis}) highlighted that participants did indeed match visual and haptic textures primarily on the basis of their perceived roughness (\percent{60} of variance), which is in line with previous perception studies on real textures \cite{baumgartner2013visual} and virtual textures \cite{culbertson2014modeling}.
|
The correspondence analysis (\figref{results/matching_correspondence_analysis}) highlighted that participants did indeed match visual and haptic textures primarily on the basis of their perceived roughness (\percent{60} of variance), which is in line with previous perception studies on real textures \cite{baumgartner2013visual} and virtual textures \cite{culbertson2014modeling}.
|
||||||
The rankings (\figref{results/ranking_mean_ci}) confirmed that the participants all perceived the roughness of haptic textures very similarly, but that there was less consensus for visual textures, which is also in line with roughness rankings for real haptic and visual textures \cite{bergmanntiest2007haptic}.
|
The rankings (\figref{results/rankings_modality}) confirmed that the participants all perceived the roughness of haptic textures very similarly, but that there was less consensus for visual textures, which is also in line with roughness rankings for real haptic and visual textures \cite{bergmanntiest2007haptic}.
|
||||||
These results made it possible to identify and name groups of textures in the form of clusters (\figref{results_clusters}), and to construct confusion matrices between these clusters and between visual texture ranks with haptic clusters (\figref{results/haptic_visual_clusters_confusion_matrices}), showing that participants consistently identified and matched haptic and visual textures.
|
These results made it possible to identify and name groups of textures in the form of clusters (\figref{results_clusters}), and to construct confusion matrices between these clusters and between visual texture ranks with haptic clusters (\figref{results/haptic_visual_clusters_confusion_matrices}), showing that participants consistently identified and matched haptic and visual textures.
|
||||||
\percent{30} of the matching variance of the correspondence analysis was also captured with a second dimension, opposing the roughest textures (\level{Metal Mesh}, \level{Sandpaper~100}), and to a lesser extent the smoothest (\level{Coffee Filter}, \level{Sandpaper~320}), with all other textures (\figref{results/matching_correspondence_analysis}).
|
\percent{30} of the matching variance of the correspondence analysis was also captured with a second dimension, opposing the roughest textures (\level{Metal Mesh}, \level{Sandpaper~100}), and to a lesser extent the smoothest (\level{Coffee Filter}, \level{Sandpaper~320}), with all other textures (\figref{results/matching_correspondence_analysis}).
|
||||||
|
|
||||||
One hypothesis is that this dimension could be the perceived hardness (\secref[related_work]{hardness}) of the virtual materials, with \level{Metal Mesh} and smooth textures appearing harder than the other textures, whose granularity could have been perceived as bumps on the surface that could deform under finger pressure.
|
One hypothesis is that this dimension could be the perceived hardness (\secref[related_work]{hardness}) of the virtual materials, with \level{Metal Mesh} and smooth textures appearing harder than the other textures, whose granularity could have been perceived as bumps on the surface that could deform under finger pressure.
|
||||||
Hardness is, with roughness, one of the main characteristics perceived by the vision and touch of real materials \cite{baumgartner2013visual,vardar2019fingertip}, but also on virtual haptic renderings \cite{culbertson2014modeling,degraen2019enhancing}.
|
Hardness is, with roughness, one of the main characteristics perceived by the vision and touch of real materials \cite{baumgartner2013visual,vardar2019fingertip}, but also on virtual haptic renderings \cite{culbertson2014modeling,degraen2019enhancing}.
|
||||||
|
|
||||||
The last visuo-haptic roughness ranking (\figref{results/ranking_mean_ci}) showed that both haptic and visual sensory information were well integrated as the resulting roughness ranking was being in between the two individual haptic and visual rankings.
|
The last visuo-haptic roughness ranking (\figref{results/rankings_texture}) showed that both haptic and visual sensory information were well integrated as the resulting roughness ranking was being in between the two individual haptic and visual rankings.
|
||||||
Several strategies were reported: some participants first classified visually and then corrected with haptics, others classified haptically and then integrated visuals.
|
Several strategies were reported: some participants first classified visually and then corrected with haptics, others classified haptically and then integrated visuals.
|
||||||
While visual sensation did influence perception, as observed in previous haptic \AR studies \cite{punpongsanon2015softar,gaffary2017ar,fradin2023humans}, haptic sensation dominated here.
|
While visual sensation did influence perception, as observed in previous haptic \AR studies \cite{punpongsanon2015softar,gaffary2017ar,fradin2023humans}, haptic sensation dominated here.
|
||||||
This indicates that participants were more confident and relied more on the haptic roughness perception than on the visual roughness perception when integrating both in one coherent perception.
|
This indicates that participants were more confident and relied more on the haptic roughness perception than on the visual roughness perception when integrating both in one coherent perception.
|
||||||
|
|
||||||
Several participants also described attempting to identify visual and haptic textures using spatial breaks, edges or patterns, that were not reported when these textures were displayed in non-immersive \VEs with a screen \cite{culbertson2014modeling,culbertson2015should}.
|
Several participants also described attempting to identify visual and haptic textures using spatial breaks, edges or patterns, that were not reported when these textures were displayed in \VEs using a screen \cite{culbertson2014modeling,culbertson2015should}.
|
||||||
A few participants even reported that they clearly sensed patterns on haptic textures.
|
A few participants even reported that they clearly sensed patterns on haptic textures.
|
||||||
However, the visual and haptic textures used were isotropic and homogeneous models of real texture captures, \ie their rendered roughness was constant and did not depend on the direction of movement but only on the speed of the finger (\secref[related_work]{texture_rendering}).
|
However, the visual and haptic textures used were isotropic and homogeneous models of real texture captures, \ie their rendered roughness was constant and did not depend on the direction of movement but only on the speed of the finger (\secref[related_work]{texture_rendering}).
|
||||||
Overall, the haptic device was judged to be comfortable, and the visual and haptic textures were judged to be fairly realistic and to work well together (\figref{results_questions}).
|
Overall, the haptic device was judged to be comfortable, and the visual and haptic textures were judged to be fairly realistic and to work well together (\figref{results_questions}).
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
\section{Conclusion}
|
\section{Conclusion}
|
||||||
\label{conclusion}
|
\label{conclusion}
|
||||||
|
|
||||||
In this chapter, we investigated how users perceived simultaneous and co-localized visuo-haptic texture augmentations of real surfaces seen in immersive \OST-\AR and touched directly with the index finger.
|
In this chapter, we investigated how users perceived simultaneous and co-localized visuo-haptic texture augmentations of real surfaces seen with an \OST-\AR headset and touched directly with the index finger.
|
||||||
Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, the haptic roughness texture was rendered with on the \HaTT data-driven models and finger speed.
|
Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, the haptic roughness texture was rendered with on the \HaTT data-driven models and finger speed.
|
||||||
In a user study, 20 participants rated the coherence, realism and perceived roughness of the combination of nine representative visuo-haptic texture pairs.
|
In a user study, 20 participants rated the coherence, realism and perceived roughness of the combination of nine representative visuo-haptic texture pairs.
|
||||||
|
|
||||||
@@ -11,18 +11,18 @@ Conversely, there was less consensus on the perceived roughness of visual textur
|
|||||||
Regarding the final roughness perception ranking of the original visuo-haptic pairs, the haptic roughness sensation dominated the perception.
|
Regarding the final roughness perception ranking of the original visuo-haptic pairs, the haptic roughness sensation dominated the perception.
|
||||||
This suggests that \AR visual textures that augments real surfaces can be enhanced with a set of data-driven vibrotactile haptic textures in a coherent and realistic manner.
|
This suggests that \AR visual textures that augments real surfaces can be enhanced with a set of data-driven vibrotactile haptic textures in a coherent and realistic manner.
|
||||||
|
|
||||||
This paves the way for new \AR applications capable of augmenting a real environment with virtual visuo-haptic textures, such as visuo-haptic painting in artistic or object design context, or viewing and touching virtual objects in a museum or a showroom.
|
This paves the way for new \AR applications capable of augmenting a \RE with virtual visuo-haptic textures, such as visuo-haptic painting in artistic or object design context, or viewing and touching virtual objects in a museum or a showroom.
|
||||||
The latter is illustrated in \figref{experiment/use_case}, where a user applies different visuo-haptic textures to a wall, in an interior design scenario, to compare them visually and by touch.
|
The latter is illustrated in \figref{experiment/use_case}, where a user applies different visuo-haptic textures to a wall, in an interior design scenario, to compare them visually and by touch.
|
||||||
|
|
||||||
We instinctively perceive the properties of everyday objects by touching and exploring them, but we essentially interact with them by grasping in order to manipulate them.
|
We instinctively perceive the properties of everyday objects by touching and exploring them, but we essentially interact with them by grasping in order to manipulate them.
|
||||||
In this first part, we focused on the perception of wearable and immersive virtual textures that augment real surfaces when touched with the fingertip.
|
In this first part, we focused on the perception of virtual visuo-haptic textures that augment real surfaces when touched with the fingertip.
|
||||||
In the next part, we will propose to improve the direct manipulation with the hand of virtual object with wearable visuo-haptic interaction feedback.
|
In the next part, we will propose to improve the direct manipulation with the hand of virtual object with wearable visuo-haptic interaction feedback.
|
||||||
|
|
||||||
\noindentskip The work described in \chapref{vhar_textures} was presented at the EuroHaptics 2024 conference:
|
\noindentskip The work described in \chapref{vhar_textures} was presented at the EuroHaptics 2024 conference:
|
||||||
|
|
||||||
Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal. \enquote{Augmenting the Texture Perception of Tangible Surfaces in Augmented Reality using Vibrotactile Haptic Stimuli}. In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484.
|
Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal. \enquote{Augmenting the Texture Perception of Tangible Surfaces in Augmented Reality using Vibrotactile Haptic Stimuli}. In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484.
|
||||||
|
|
||||||
\fig[0.5]{experiment/use_case}{
|
\fig[0.65]{experiment/use_case}{
|
||||||
Illustration of the texture augmentation in \AR through an interior design scenario.
|
Illustration of the texture augmentation in \AR through an interior design scenario.
|
||||||
}[
|
}[
|
||||||
A user wearing an \AR headset and a wearable vibrotactile haptic device worn on their index is applying different virtual visuo-haptic textures to a real wall to compare them visually and by touch.
|
A user wearing an \AR headset and a wearable vibrotactile haptic device worn on their index is applying different virtual visuo-haptic textures to a real wall to compare them visually and by touch.
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
BIN
3-perception/vhar-textures/figures/results/rankings_modality.pdf
Normal file
BIN
3-perception/vhar-textures/figures/results/rankings_modality.pdf
Normal file
Binary file not shown.
BIN
3-perception/vhar-textures/figures/results/rankings_texture.pdf
Normal file
BIN
3-perception/vhar-textures/figures/results/rankings_texture.pdf
Normal file
Binary file not shown.
@@ -1,15 +1,15 @@
|
|||||||
\section{Introduction}
|
\section{Introduction}
|
||||||
\label{intro}
|
\label{intro}
|
||||||
|
|
||||||
In the previous chapter, we presented a system for augmenting the visuo-haptic texture perception of real surfaces directly touched with the finger, using wearable vibrotactile haptics and an immersive \AR headset.
|
In the previous chapter, we presented a system for augmenting the visuo-haptic texture perception of real surfaces directly touched with the finger, using wearable vibrotactile haptics and an \OST-\AR headset.
|
||||||
In this chapter and the next one, we evaluate the user's perception of such wearable haptic texture augmentation under different visual rendering conditions.
|
In this chapter and the next one, we evaluate the user's perception of such wearable haptic texture augmentation under different visual rendering conditions.
|
||||||
|
|
||||||
Most of the haptic augmentations of real surfaces using with wearable haptic devices, including roughness of textures (\secref[related_work]{texture_rendering}), have been studied without a visual feedback, and none have considered the influence of the visual rendering on their perception or integrated them in \AR and \VR (\secref[related_work]{texture_rendering}).
|
Most of the haptic augmentations of real surfaces using with wearable haptic devices, including roughness of textures (\secref[related_work]{texture_rendering}), have been studied without a visual feedback, and none have considered the influence of the visual rendering on their perception or integrated them in \AR and \VR (\secref[related_work]{texture_rendering}).
|
||||||
Still, it is known that the visual rendering of an object can influence the perception of its haptic properties (\secref[related_work]{visual_haptic_influence}), and that the perception of same haptic force-feedback or vibrotactile rendering can differ between \AR and \VR, probably due to difference in perceived simultaneity between visual and haptic stimuli (\secref[related_work]{ar_vr_haptic}).
|
Still, it is known that the visual rendering of an object can influence the perception of its haptic properties (\secref[related_work]{visual_haptic_influence}), and that the perception of same haptic force-feedback or vibrotactile rendering can differ between \AR and \VR, probably due to difference in perceived simultaneity between visual and haptic stimuli (\secref[related_work]{ar_vr_haptic}).
|
||||||
In \AR, the user can see their own hand touching, the haptic device worn, and the \RE, while in \VR they are hidden by the \VE.
|
In \AR, the user can see their own hand touching, the haptic device worn, and the \RE, while in \VR they are hidden by the \VE.
|
||||||
|
|
||||||
In this chapter, we investigate the \textbf{role of the visual feedback of the virtual hand and of the environment (real or virtual) on the perception of a real surface whose haptic roughness is augmented} with wearable vibrotactile haptics. %voice-coil device worn on the finger.
|
In this chapter, we investigate the \textbf{role of the visual feedback of the virtual hand and of the environment (real or virtual) on the perception of a real surface whose haptic roughness is augmented} with wearable vibrotactile haptics.
|
||||||
To do so, we used the visuo-haptic system presented in \chapref{vhar_system} to render virtual vibrotactile patterned textures (\secref[related_work]{texture_rendering}) to augment the real surface being touched. % touched by the finger.% that can be directly touched with the bare finger.
|
To do so, we used the visuo-haptic system presented in \chapref{vhar_system} to render virtual vibrotactile patterned textures (\secref[related_work]{texture_rendering}) to augment the real surface being touched.
|
||||||
We evaluated, in \textbf{user study with psychophysical methods and extensive questionnaire}, the perceived roughness augmentation in three visual rendering conditions: \textbf{(1) without visual augmentation}, in \textbf{(2) \OST-\AR with a realistic virtual hand} rendering, and in \textbf{(3) \VR with the same virtual hand}.
|
We evaluated, in \textbf{user study with psychophysical methods and extensive questionnaire}, the perceived roughness augmentation in three visual rendering conditions: \textbf{(1) without visual augmentation}, in \textbf{(2) \OST-\AR with a realistic virtual hand} rendering, and in \textbf{(3) \VR with the same virtual hand}.
|
||||||
To control for the influence of the visual rendering, the real surface was not visually augmented and stayed the same in all conditions.
|
To control for the influence of the visual rendering, the real surface was not visually augmented and stayed the same in all conditions.
|
||||||
|
|
||||||
@@ -26,6 +26,6 @@ We then present the results obtained, discuss them, and outline recommendations
|
|||||||
|
|
||||||
\fig[0.55]{teaser/teaser2}{
|
\fig[0.55]{teaser/teaser2}{
|
||||||
Vibrotactile textures were rendered in real time on a real surface using a wearable vibrotactile device worn on the finger.
|
Vibrotactile textures were rendered in real time on a real surface using a wearable vibrotactile device worn on the finger.
|
||||||
}[%
|
}[
|
||||||
Participants explored this haptic roughness augmentation with (\level{Real}) their real hand alone, (\level{Mixed}) a realistic virtual hand overlay in \AR, and (\level{Virtual}) the same virtual hand in \VR.
|
Participants explored this haptic roughness augmentation with (\level{Real}) their real hand alone, (\level{Mixed}) a realistic virtual hand overlay in \AR, and (\level{Virtual}) the same virtual hand in \VR.
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -1,22 +1,19 @@
|
|||||||
\section{User Study}
|
\section{User Study}
|
||||||
\label{experiment}
|
\label{experiment}
|
||||||
|
|
||||||
%The visuo-haptic rendering system, described in \secref[vhar_system]{method}, allows free exploration of virtual vibrotactile textures on real surfaces directly touched with the bare finger to simulate roughness augmentation, while the visual rendering of the hand and environment can be controlled to be in \AR or \VR.
|
|
||||||
%
|
|
||||||
%The user study aimed to investigate the effect of visual hand rendering in \AR or \VR on the perception of roughness texture augmentation of a touched real surface.
|
|
||||||
In a \TIFC task (\secref[related_work]{sensations_perception}), participants compared the roughness of different tactile texture augmentations in three visual rendering conditions: without any visual augmentation (\level{Real}, \figref{experiment/real}), in \AR with a realistic virtual hand superimposed on the real hand (\level{Mixed}, \figref{experiment/mixed}), and in \VR with the same virtual hand as an avatar (\level{Virtual}, \figref{experiment/virtual}).
|
In a \TIFC task (\secref[related_work]{sensations_perception}), participants compared the roughness of different tactile texture augmentations in three visual rendering conditions: without any visual augmentation (\level{Real}, \figref{experiment/real}), in \AR with a realistic virtual hand superimposed on the real hand (\level{Mixed}, \figref{experiment/mixed}), and in \VR with the same virtual hand as an avatar (\level{Virtual}, \figref{experiment/virtual}).
|
||||||
In order not to influence the perception, as vision is an important source of information and influence for the perception of texture \cite{bergmanntiest2007haptic,yanagisawa2015effects,vardar2019fingertip}, the touched surface was visually a uniform white; thus only the visual aspect of the hand and the surrounding environment is changed.
|
In order not to influence the perception, as vision is an important source of information and influence for the perception of texture \cite{bergmanntiest2007haptic,yanagisawa2015effects,vardar2019fingertip}, the touched surface was visually a uniform white; thus only the visual aspect of the hand and the surrounding environment is changed.
|
||||||
|
|
||||||
\begin{subfigs}{renderings}{
|
\begin{subfigs}{renderings}{
|
||||||
The three visual rendering conditions and the experimental procedure of the \TIFC psychophysical study.
|
The three visual rendering conditions and the experimental procedure of the \TIFC psychophysical study.
|
||||||
}[%
|
}[
|
||||||
During a trial, two tactile textures were rendered on the augmented area of the paper sheet (black rectangle) for \qty{3}{\s} each, one after the other, then the participant chose which one was the roughest.
|
During a trial, two tactile textures were rendered on the augmented area of the paper sheet (black rectangle) for \qty{3}{\s} each, one after the other, then the participant chose which one was the roughest.
|
||||||
The visual rendering stayed the same during the trial.
|
The visual rendering stayed the same during the trial.
|
||||||
The pictures are captured directly from the Microsoft HoloLens 2 headset.
|
The pictures are captured directly from the Microsoft HoloLens 2 headset.
|
||||||
][
|
][
|
||||||
\item The real environment and real hand view without any visual augmentation.
|
\item The \RE with real hand view and without any visual augmentation.
|
||||||
\item The real environment and hand view with the virtual hand.
|
\item The \RE with real hand and virtual hand view.
|
||||||
\item Virtual environment with the virtual hand.
|
\item The \VE with the virtual hand.
|
||||||
]
|
]
|
||||||
\subfig[0.326]{experiment/real}
|
\subfig[0.326]{experiment/real}
|
||||||
\subfig[0.326]{experiment/mixed}
|
\subfig[0.326]{experiment/mixed}
|
||||||
@@ -27,17 +24,18 @@ In order not to influence the perception, as vision is an important source of in
|
|||||||
\label{apparatus}
|
\label{apparatus}
|
||||||
|
|
||||||
An experimental environment was created to ensure a similar visual rendering in \AR and \VR (\figref{renderings}).
|
An experimental environment was created to ensure a similar visual rendering in \AR and \VR (\figref{renderings}).
|
||||||
It consisted of a \qtyproduct{300 x 210 x 400}{\mm} medium-density fibreboard (MDF) box with a paper sheet glued inside and a \qtyproduct{50 x 15}{\mm} rectangle printed on the sheet to delimit the area where the tactile textures were rendered.
|
It consisted of a \qtyproduct{30 x 21 x 40}{\cm} medium-density fibreboard box with a paper sheet glued inside and a \qtyproduct{5 x 1.5}{\cm} rectangle printed on the sheet to delimit the area where the tactile textures were rendered.
|
||||||
A single light source of \qty{800}{\lumen} placed \qty{70}{\cm} above the table fully illuminated the inside of the box.
|
A single light source of \qty{800}{\lumen} placed \qty{70}{\cm} above the table fully illuminated the inside of the box.
|
||||||
Participants rated the roughness of the paper (without any texture augmentation) before the experiment on a 7-point Likert scale (1~=~Extremely smooth, 7~=~Extremely rough) as quite smooth (\mean{2.5}, \sd{1.3}).
|
Participants rated the roughness of the paper (without any texture augmentation) before the experiment on a 7-point Likert scale (1~=~Extremely smooth, 7~=~Extremely rough) as quite smooth (\mean{2.5}, \sd{1.3}).
|
||||||
|
|
||||||
The visual rendering of the virtual hand and environment was achieved using the \OST-\AR headset Microsoft HoloLens~2 (\secref[vhar_system]{virtual_real_alignment}) running at \qty{60}{FPS} a custom application made with Unity 2021.1 and Mixed Reality Toolkit (MRTK) 2.7.2.
|
The visual rendering of the virtual hand and the \VE was achieved using the \OST-\AR headset Microsoft HoloLens~2 running at \qty{60}{FPS} a custom application made with Unity (v2021.1) and Mixed Reality Toolkit (v2.7).
|
||||||
An \OST-\AR headset was chosen over a \VST-\AR headset because the former only adds virtual content to the \RE, while the latter streams a real-time video capture of the \RE, and one of our objectives was to directly compare a \VE replicating a real one, not to a video feed that introduces many other visual limitations (\secref[related_work]{ar_displays}).
|
An \OST-\AR headset was chosen over a \VST-\AR headset because the former only adds virtual content to the \RE, while the latter streams a real-time video capture of the \RE, and one of our objectives was to directly compare a \VE replicating a real one, not to a video feed that introduces many other visual limitations (\secref[related_work]{ar_displays}).
|
||||||
|
|
||||||
The \VE carefully reproduced the \RE, including the geometry of the box, textures, lighting, and shadows (\figref{renderings}, \level{Virtual}).
|
We carefully reproduced the \RE in the \VE, including the geometry of the box, textures, lighting, and shadows (\figref{renderings}, \level{Virtual}).
|
||||||
The virtual hand model was a gender-neutral human right hand with realistic skin texture, similar to that used by \textcite{schwind2017these}.
|
The virtual hand model was a gender-neutral human right hand with realistic skin texture, similar to that used by \textcite{schwind2017these}.
|
||||||
Its size was adjusted to match the real hand of the participants before the experiment.
|
Prior to the experiment, the virtual hand and the \VE were registered to the real hand of the participant and the \RE, respectively, as described in \secref[vhar_system]{virtual_real_registration}.
|
||||||
The visual rendering of the virtual hand and environment is described in \secref[vhar_system]{virtual_real_alignment}.
|
The size of the virtual hand was also manually adjusted to match the real hand of the participant.
|
||||||
|
A \qty{\pm .5}{\cm} spatial alignment error (\secref[vhar_system]{virtual_real_registration}) and a \qty{160 \pm 30}{\ms} lag (\secref[vhar_system]{virtual_real_registration}) between the real hand the virtual hand were measured.
|
||||||
|
|
||||||
To ensure the same \FoV in all \factor{Visual Rendering} condition, a cardboard mask was attached to the \AR headset (\figref{experiment/headset}).
|
To ensure the same \FoV in all \factor{Visual Rendering} condition, a cardboard mask was attached to the \AR headset (\figref{experiment/headset}).
|
||||||
In the \level{Virtual} rendering, the mask only had holes for sensors to block the view of the \RE and simulate a \VR headset.
|
In the \level{Virtual} rendering, the mask only had holes for sensors to block the view of the \RE and simulate a \VR headset.
|
||||||
@@ -51,7 +49,7 @@ They also wore headphones with a brown noise masking the sound of the voice-coil
|
|||||||
The user study was held in a quiet room with no windows.
|
The user study was held in a quiet room with no windows.
|
||||||
|
|
||||||
\begin{subfigs}{setup}{Visuo-haptic textures rendering setup. }[][
|
\begin{subfigs}{setup}{Visuo-haptic textures rendering setup. }[][
|
||||||
\item HoloLens~2 \OST-\AR headset, the two cardboard masks to switch the real or virtual environments with the same field of view, and the \ThreeD-printed piece for attaching the masks to the headset.
|
\item HoloLens~2 \OST-\AR headset, the two cardboard masks to switch the \RE and \VE with the same \FoV, and the \ThreeD-printed piece for attaching the masks to the headset.
|
||||||
\item User exploring a virtual vibrotactile texture on a real sheet of paper.
|
\item User exploring a virtual vibrotactile texture on a real sheet of paper.
|
||||||
]
|
]
|
||||||
\subfigsheight{48.5mm}
|
\subfigsheight{48.5mm}
|
||||||
@@ -63,7 +61,7 @@ The user study was held in a quiet room with no windows.
|
|||||||
\label{procedure}
|
\label{procedure}
|
||||||
|
|
||||||
Participants were first given written instructions about the experimental setup and procedure, the informed consent form to sign, and a demographic questionnaire.
|
Participants were first given written instructions about the experimental setup and procedure, the informed consent form to sign, and a demographic questionnaire.
|
||||||
A calibration was then performed to adjust the HoloLens~2 to the participant's interpupillary distance (IPD), the virtual hand to the real hand size, and the fiducial marker to the finger position.
|
The calibration was then performed to adjust the HoloLens~2 to the participant's interpupillary distance, the fiducial marker to the finger position, and the virtual hand size to the real hand.
|
||||||
They familiarized themselves with the task by completing four training trials with the most different pair of textures.
|
They familiarized themselves with the task by completing four training trials with the most different pair of textures.
|
||||||
The trials were divided into three blocks, one for each \factor{Visual Rendering} condition, with a break and questionnaire between each block.
|
The trials were divided into three blocks, one for each \factor{Visual Rendering} condition, with a break and questionnaire between each block.
|
||||||
Before each block, the experimenter ensured that the \VE and the virtual hand were correctly aligned with their real equivalents, that the haptic device was in place, and attached the cardboard mask corresponding to the next \factor{Visual Rendering} condition to the headset.
|
Before each block, the experimenter ensured that the \VE and the virtual hand were correctly aligned with their real equivalents, that the haptic device was in place, and attached the cardboard mask corresponding to the next \factor{Visual Rendering} condition to the headset.
|
||||||
@@ -88,7 +86,7 @@ The user study took on average one hour to complete.
|
|||||||
|
|
||||||
The user study was a within-subjects design with two factors:
|
The user study was a within-subjects design with two factors:
|
||||||
\begin{itemize}
|
\begin{itemize}
|
||||||
\item \factor{Visual Rendering} consists of the augmented or virtual view of the environment, the hand and the wearable haptic device, with 3 levels: real environment and hand view without any visual augmentation (\figref{renderings}, \level{Real}), real environment and hand view with the superimposed virtual hand (\figref{renderings}, \level{Mixed}) and virtual environment with the virtual hand (\figref{renderings}, \level{Virtual}).
|
\item \factor{Visual Rendering} consists of the augmented or virtual view of the environment, the hand and the wearable haptic device, with 3 levels: \RE and real hand view without any visual augmentation (\figref{renderings}, \level{Real}), \AE with real hand view and the superimposed virtual hand (\figref{renderings}, \level{Mixed}), and \VE with the virtual hand (\figref{renderings}, \level{Virtual}).
|
||||||
\item \factor{Amplitude Difference} consists of the difference in amplitude of the comparison texture with the reference texture (which is identical for all visual renderings), with 6 levels: \qtylist{\pm 12.5; \pm 25.0; \pm 37.5}{\%}.
|
\item \factor{Amplitude Difference} consists of the difference in amplitude of the comparison texture with the reference texture (which is identical for all visual renderings), with 6 levels: \qtylist{\pm 12.5; \pm 25.0; \pm 37.5}{\%}.
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
@@ -118,6 +116,8 @@ After each \factor{Visual Rendering} block of trials, participants rated their e
|
|||||||
They also assessed their workload with the NASA Task Load Index (\response{NASA-TLX}) questionnaire after each blocks of trials (\tabref{questions2}).
|
They also assessed their workload with the NASA Task Load Index (\response{NASA-TLX}) questionnaire after each blocks of trials (\tabref{questions2}).
|
||||||
For all questions, participants were shown only labels (\eg \enquote{Not at all} or \enquote{Extremely}) and not the actual scale values (\eg 1 or 5) \cite{muller2014survey}.
|
For all questions, participants were shown only labels (\eg \enquote{Not at all} or \enquote{Extremely}) and not the actual scale values (\eg 1 or 5) \cite{muller2014survey}.
|
||||||
|
|
||||||
|
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), \textit{MixedPsy} (v1.2), \textit{lme4} (v1.1), and \textit{performance} (v0.13).
|
||||||
|
|
||||||
\newcommand{\scalegroup}[2]{\multirow{#1}{1\linewidth}{#2}}
|
\newcommand{\scalegroup}[2]{\multirow{#1}{1\linewidth}{#2}}
|
||||||
\afterpage{
|
\afterpage{
|
||||||
\begin{tabwide}{questions1}
|
\begin{tabwide}{questions1}
|
||||||
|
|||||||
@@ -27,8 +27,8 @@ All pairwise differences were statistically significant.
|
|||||||
]
|
]
|
||||||
|
|
||||||
\begin{subfigs}{discrimination_accuracy}{Results of the vibrotactile texture roughness discrimination task. }[][
|
\begin{subfigs}{discrimination_accuracy}{Results of the vibrotactile texture roughness discrimination task. }[][
|
||||||
\item Estimated \PSE of each visual rendering, defined as the amplitude difference at which both reference and comparison textures are perceived to be equivalent. %, \ie the accuracy in discriminating vibrotactile roughness.
|
\item Estimated \PSE of each visual rendering, defined as the amplitude difference at which both reference and comparison textures are perceived to be equivalent.
|
||||||
\item Estimated \JND of each visual rendering. %, defined as the minimum perceptual amplitude difference, \ie the sensitivity to vibrotactile roughness differences.
|
\item Estimated \JND of each visual rendering.
|
||||||
]
|
]
|
||||||
\subfig[0.35]{results/trial_pses}
|
\subfig[0.35]{results/trial_pses}
|
||||||
\subfig[0.35]{results/trial_jnds}
|
\subfig[0.35]{results/trial_jnds}
|
||||||
@@ -37,8 +37,8 @@ All pairwise differences were statistically significant.
|
|||||||
\subsubsection{Response Time}
|
\subsubsection{Response Time}
|
||||||
\label{response_time}
|
\label{response_time}
|
||||||
|
|
||||||
A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering}, and a log transformation (as \response{Response Time} measures were gamma distributed) indicated a statistically significant effect on \response{Response Time} of \factor{Visual Rendering} (\anova{2}{18}{6.2}, \p{0.009}, \figref{results/trial_response_times}).
|
A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering}, and a log transformation (as \response{Response Time} measures were gamma distributed) indicated a statistically significant effect on \response{Response Time} of \factor{Visual Rendering} (\anova{2}{18}{6.2}, \p{0.009}, see \figref{results/trial_response_times}).
|
||||||
Reported response times are \GM.
|
Reported response times are geometric means (GM).
|
||||||
Participants took longer on average to respond with the \level{Virtual} rendering (\geomean{1.65}{\s} \ci{1.59}{1.72}) than with the \level{Real} rendering (\geomean{1.38}{\s} \ci{1.32}{1.43}), which is the only statistically significant difference (\ttest{19}{0.3}, \p{0.005}).
|
Participants took longer on average to respond with the \level{Virtual} rendering (\geomean{1.65}{\s} \ci{1.59}{1.72}) than with the \level{Real} rendering (\geomean{1.38}{\s} \ci{1.32}{1.43}), which is the only statistically significant difference (\ttest{19}{0.3}, \p{0.005}).
|
||||||
The \level{Mixed} rendering was in between (\geomean{1.56}{\s} \ci{1.49}{1.63}).
|
The \level{Mixed} rendering was in between (\geomean{1.56}{\s} \ci{1.49}{1.63}).
|
||||||
|
|
||||||
@@ -47,17 +47,17 @@ The \level{Mixed} rendering was in between (\geomean{1.56}{\s} \ci{1.49}{1.63}).
|
|||||||
|
|
||||||
The frames analyzed were those in which the participants actively touched the comparison textures with a finger speed greater than \SI{1}{\mm\per\second}.
|
The frames analyzed were those in which the participants actively touched the comparison textures with a finger speed greater than \SI{1}{\mm\per\second}.
|
||||||
|
|
||||||
A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering} indicated only one statistically significant effect on the total distance traveled by the finger in a trial of \factor{Visual Rendering} (\anova{2}{18}{3.9}, \p{0.04}, \figref{results/trial_distances}).
|
A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering} indicated only one statistically significant effect on the total distance traveled by the finger in a trial of \factor{Visual Rendering} (\anova{2}{18}{3.9}, \p{0.04}, see \figref{results/trial_distances}).
|
||||||
On average, participants explored a larger distance with the \level{Real} rendering (\geomean{20.0}{\cm} \ci{19.4}{20.7}) than with \level{Virtual} rendering (\geomean{16.5}{\cm} \ci{15.8}{17.1}), which is the only statistically significant difference (\ttest{19}{1.2}, \p{0.03}), with the \level{Mixed} rendering (\geomean{17.4}{\cm} \ci{16.8}{18.0}) in between.
|
On average, participants explored a larger distance with the \level{Real} rendering (\geomean{20.0}{\cm} \ci{19.4}{20.7}) than with \level{Virtual} rendering (\geomean{16.5}{\cm} \ci{15.8}{17.1}), which is the only statistically significant difference (\ttest{19}{1.2}, \p{0.03}), with the \level{Mixed} rendering (\geomean{17.4}{\cm} \ci{16.8}{18.0}) in between.
|
||||||
|
|
||||||
Another \LMM \ANOVA with by-trial and by-participant random intercepts but no random slopes indicated only one statistically significant effect on \response{Finger Speed} of \factor{Visual Rendering} (\anova{2}{2142}{2.0}, \pinf{0.001}, \figref{results/trial_speeds}).
|
Another \LMM \ANOVA with by-trial and by-participant random intercepts but no random slopes indicated only one statistically significant effect on \response{Finger Speed} of \factor{Visual Rendering} (\anova{2}{2142}{2.0}, \pinf{0.001}, see \figref{results/trial_speeds}).
|
||||||
On average, the textures were explored with the highest speed with the \level{Real} rendering (\geomean{5.12}{\cm\per\second} \ci{5.08}{5.17}), the lowest with the \level{Virtual} rendering (\geomean{4.40}{\cm\per\second} \ci{4.35}{4.45}), and the \level{Mixed} rendering (\geomean{4.67}{\cm\per\second} \ci{4.63}{4.71}) in between.
|
On average, the textures were explored with the highest speed with the \level{Real} rendering (\geomean{5.12}{\cm\per\second} \ci{5.08}{5.17}), the lowest with the \level{Virtual} rendering (\geomean{4.40}{\cm\per\second} \ci{4.35}{4.45}), and the \level{Mixed} rendering (\geomean{4.67}{\cm\per\second} \ci{4.63}{4.71}) in between.
|
||||||
All pairwise differences were statistically significant: \level{Real} \vs \level{Virtual} (\ttest{19}{1.17}, \pinf{0.001}), \level{Real} \vs \level{Mixed} (\ttest{19}{1.10}, \pinf{0.001}), and \level{Mixed} \vs \level{Virtual} (\ttest{19}{1.07}, \p{0.02}).
|
All pairwise differences were statistically significant: \level{Real} \vs \level{Virtual} (\ttest{19}{1.17}, \pinf{0.001}), \level{Real} \vs \level{Mixed} (\ttest{19}{1.10}, \pinf{0.001}), and \level{Mixed} \vs \level{Virtual} (\ttest{19}{1.07}, \p{0.02}).
|
||||||
|
|
||||||
This means that within the same time window on the same surface, participants explored the comparison texture on average at a greater distance and at a higher speed when in the \RE without visual representation of the hand (\level{Real} condition) than when in \VR (\level{Virtual} condition).
|
This means that within the same time window on the same surface, participants explored the comparison texture on average at a greater distance and at a higher speed when in the \RE without visual representation of the hand (\level{Real} condition) than when in \VR (\level{Virtual} condition).
|
||||||
|
|
||||||
\begin{subfigs}{results_finger}{Results of the performance metrics for the rendering condition.}[
|
\begin{subfigs}{results_finger}{Results of the performance metrics for the rendering condition.}[
|
||||||
Boxplots and geometric means with bootstrap \percent{95} \CI, with Tukey's \HSD pairwise comparisons: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
|
Boxplots and geometric means with bootstrap \percent{95} \CI and Tukey's \HSD pairwise comparisons: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
|
||||||
][
|
][
|
||||||
\item Response time at the end of a trial.
|
\item Response time at the end of a trial.
|
||||||
\item Distance travelled by the finger in a trial.
|
\item Distance travelled by the finger in a trial.
|
||||||
@@ -71,8 +71,6 @@ This means that within the same time window on the same surface, participants ex
|
|||||||
\subsection{Questionnaires}
|
\subsection{Questionnaires}
|
||||||
\label{results_questions}
|
\label{results_questions}
|
||||||
|
|
||||||
%\figref{results/question_heatmaps} shows the median and interquartile range (IQR) ratings to the questions in \tabref{questions} and to the NASA-TLX questionnaire.
|
|
||||||
%
|
|
||||||
Friedman tests were employed to compare the ratings to the questions (\tabref{questions1} and \tabref{questions2}), with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment, except for the questions regarding the virtual hand that were directly compared with Wilcoxon signed-rank tests.
|
Friedman tests were employed to compare the ratings to the questions (\tabref{questions1} and \tabref{questions2}), with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment, except for the questions regarding the virtual hand that were directly compared with Wilcoxon signed-rank tests.
|
||||||
|
|
||||||
\figref{results_questions} shows these ratings for questions where statistically significant differences were found (results are shown as mean $\pm$ standard deviation):
|
\figref{results_questions} shows these ratings for questions where statistically significant differences were found (results are shown as mean $\pm$ standard deviation):
|
||||||
@@ -103,13 +101,12 @@ The overall workload (mean NASA-TLX score) was low (\num{21 \pm 14}), with no st
|
|||||||
The textures were also overall found to be very much caused by the finger movements (\response{Texture Agency}, \num{4.5 \pm 1.0}) with a very low perceived latency (\response{Texture Latency}, \num{1.6 \pm 0.8}), and to be quite realistic (\response{Texture Realism}, \num{3.6 \pm 0.9}) and quite plausible (\response{Texture Plausibility}, \num{3.6 \pm 1.0}).
|
The textures were also overall found to be very much caused by the finger movements (\response{Texture Agency}, \num{4.5 \pm 1.0}) with a very low perceived latency (\response{Texture Latency}, \num{1.6 \pm 0.8}), and to be quite realistic (\response{Texture Realism}, \num{3.6 \pm 0.9}) and quite plausible (\response{Texture Plausibility}, \num{3.6 \pm 1.0}).
|
||||||
The vibrations were felt a slightly weak overall (\response{Vibration Strength}, \num{4.2 \pm 1.1}), and the vibrotactile device was perceived as neither distracting (\response{Device Distraction}, \num{1.2 \pm 0.4}) nor uncomfortable (\response{Device Discomfort}, \num{1.3 \pm 0.6}).
|
The vibrations were felt a slightly weak overall (\response{Vibration Strength}, \num{4.2 \pm 1.1}), and the vibrotactile device was perceived as neither distracting (\response{Device Distraction}, \num{1.2 \pm 0.4}) nor uncomfortable (\response{Device Discomfort}, \num{1.3 \pm 0.6}).
|
||||||
|
|
||||||
Participants were mixed between feeling the vibrations on the surface or on the top of their finger (\response{Vibration Location}, \num{3.9 \pm 1.7}); the distribution of scores was split between the two poles of the scale with \level{Real} and \level{Mixed} renderings (\percent{42.5} more on surface or on finger top, \percent{15} neutral), but there was a trend towards the top of the finger in VR renderings (\percent{65} \vs \percent{25} more on surface and \percent{10} neutral), but this difference was not statistically significant neither.
|
Participants were mixed between feeling the vibrations on the surface or on the top of their finger (\response{Vibration Location}, \num{3.9 \pm 1.7}); the distribution of scores was split between the two poles of the scale with \level{Real} and \level{Mixed} renderings (\percent{42.5} more on surface or on finger top, \percent{15} neutral), but there was a trend towards the top of the finger in \VR renderings (\percent{65} \vs \percent{25} more on surface and \percent{10} neutral), but this difference was not statistically significant neither.
|
||||||
|
|
||||||
\begin{tab}{questions2}
|
\begin{tab}{questions2}
|
||||||
{NASA-TLX questions asked to participants after each \factor{Visual Rendering} block of trials.}
|
{NASA-TLX questions asked to participants after each \factor{Visual Rendering} block of trials.}
|
||||||
[
|
[
|
||||||
Questions were bipolar 100-points scales (0~=~Very Low and 100~=~Very High, except for Performance where 0~=~Perfect and 100~=~Failure), with increments of 5.
|
Questions were bipolar 100-points scales (0~=~Very Low and 100~=~Very High, except for Performance where 0~=~Perfect and 100~=~Failure), with increments of 5.
|
||||||
%Participants were shown only the labels for all questions.
|
|
||||||
]
|
]
|
||||||
\begin{tabularx}{\linewidth}{l X}
|
\begin{tabularx}{\linewidth}{l X}
|
||||||
\toprule
|
\toprule
|
||||||
@@ -124,15 +121,3 @@ Participants were mixed between feeling the vibrations on the surface or on the
|
|||||||
\bottomrule
|
\bottomrule
|
||||||
\end{tabularx}
|
\end{tabularx}
|
||||||
\end{tab}
|
\end{tab}
|
||||||
|
|
||||||
%\figwide{results/question_heatmaps}{%
|
|
||||||
%
|
|
||||||
% Heatmaps of the questionnaire responses, with the median rating and the interquartile range in brackets on each cell.
|
|
||||||
%
|
|
||||||
% (Left) point Likert scale questions (1=Not at all, 2=Slightly, 3=Moderately, 4=Very, 5=Extremely).
|
|
||||||
%
|
|
||||||
% (Middle) point Likert scale questions (1=Extremely A, 2=Moderately A, 3=Slightly A, 4=Neither A nor B, 5=Slightly B, 6=Moderately B, 7=Extremely B) with A and B being the two poles of the scale.
|
|
||||||
%
|
|
||||||
% (Right) Load Index (NASA-TLX) questionnaire (lower values are better).
|
|
||||||
%}
|
|
||||||
|
|
||||||
|
|||||||
@@ -4,9 +4,6 @@
|
|||||||
The results showed a difference in vibrotactile roughness perception between the three visual rendering conditions.
|
The results showed a difference in vibrotactile roughness perception between the three visual rendering conditions.
|
||||||
Given the estimated \PSEs, the textures were on average perceived as \enquote{rougher} in the \level{Real} rendering than in the \level{Virtual} (\percent{-2.8}) and \level{Mixed} (\percent{-6.0}) renderings (\figref{results/trial_pses}).
|
Given the estimated \PSEs, the textures were on average perceived as \enquote{rougher} in the \level{Real} rendering than in the \level{Virtual} (\percent{-2.8}) and \level{Mixed} (\percent{-6.0}) renderings (\figref{results/trial_pses}).
|
||||||
A \PSE difference in the same range was found for perceived stiffness, with the \VR perceived as \enquote{stiffer} and the \AR as \enquote{softer} \cite{gaffary2017ar}.
|
A \PSE difference in the same range was found for perceived stiffness, with the \VR perceived as \enquote{stiffer} and the \AR as \enquote{softer} \cite{gaffary2017ar}.
|
||||||
%
|
|
||||||
%However, the difference between the \level{Virtual} and \level{Mixed} conditions was not significant.
|
|
||||||
%
|
|
||||||
Surprisingly, the \PSE of the \level{Real} rendering was shifted to the right (to be "rougher", \percent{7.9}) compared to the reference texture, whereas the \PSEs of the \level{Virtual} (\percent{5.1}) and \level{Mixed} (\percent{1.9}) renderings were perceived as \enquote{smoother} and closer to the reference texture (\figref{results/trial_predictions}).
|
Surprisingly, the \PSE of the \level{Real} rendering was shifted to the right (to be "rougher", \percent{7.9}) compared to the reference texture, whereas the \PSEs of the \level{Virtual} (\percent{5.1}) and \level{Mixed} (\percent{1.9}) renderings were perceived as \enquote{smoother} and closer to the reference texture (\figref{results/trial_predictions}).
|
||||||
The sensitivity of participants to roughness differences also varied, with the \level{Real} rendering having the best \JND (\percent{26}), followed by the \level{Virtual} (\percent{30}) and \level{Mixed} (\percent{33}) renderings (\figref{results/trial_jnds}).
|
The sensitivity of participants to roughness differences also varied, with the \level{Real} rendering having the best \JND (\percent{26}), followed by the \level{Virtual} (\percent{30}) and \level{Mixed} (\percent{33}) renderings (\figref{results/trial_jnds}).
|
||||||
These \JND values are in line with and at the upper end of the range of previous studies \cite{choi2013vibrotactile}, which may be due to the location of the actuator on the top of the finger middle phalanx, being less sensitive to vibration than the fingertip.
|
These \JND values are in line with and at the upper end of the range of previous studies \cite{choi2013vibrotactile}, which may be due to the location of the actuator on the top of the finger middle phalanx, being less sensitive to vibration than the fingertip.
|
||||||
@@ -23,11 +20,9 @@ The \level{Mixed} rendering had the lowest \PSE and highest perceived latency, t
|
|||||||
|
|
||||||
Our wearable visuo-haptic texture augmentation system, described in \chapref{vhar_system}, aimed to provide a coherent visuo-haptic renderings registered with the \RE.
|
Our wearable visuo-haptic texture augmentation system, described in \chapref{vhar_system}, aimed to provide a coherent visuo-haptic renderings registered with the \RE.
|
||||||
Yet, it involves different sensory interaction loops between the user's movements and the visuo-haptic feedback (\figref[vhar_system]{diagram} and \figref[introduction]{interaction-loop}), which may not feel to be in synchronized with each other or with proprioception.
|
Yet, it involves different sensory interaction loops between the user's movements and the visuo-haptic feedback (\figref[vhar_system]{diagram} and \figref[introduction]{interaction-loop}), which may not feel to be in synchronized with each other or with proprioception.
|
||||||
%When a user runs their finger over a vibrotactile virtual texture, the haptic sensations and eventual display of the virtual hand lag behind the visual displacement and proprioceptive sensations of the real hand.
|
|
||||||
%
|
|
||||||
Thereby, we hypothesize that the differences in the perception of vibrotactile roughness are less due to the visual rendering of the hand or the environment and their associated differences in exploration behaviour, but rather to the difference in the \emph{perceived} latency between one's own hand (visual and proprioception) and the virtual hand (visual and haptic).
|
Thereby, we hypothesize that the differences in the perception of vibrotactile roughness are less due to the visual rendering of the hand or the environment and their associated differences in exploration behaviour, but rather to the difference in the \emph{perceived} latency between one's own hand (visual and proprioception) and the virtual hand (visual and haptic).
|
||||||
The perceived delay was the most important in \AR, where the virtual hand visually lags significantly behind the real one, but less so in \VR, where only the proprioceptive sense can help detect the lag.
|
The perceived delay was the most important in \AR, where the virtual hand visually lags significantly behind the real one, but less so in \VR, where only the proprioceptive sense can help detect the lag.
|
||||||
This delay was not perceived when touching the virtual haptic textures without visual augmentation, because only the finger velocity was used to render them, and, despite the varied finger movements and velocities while exploring the textures, the participants did not perceive any latency in the vibrotactile rendering (\secref{results_questions}).
|
This delay was not perceived when touching the virtual haptic textures without visual augmentation, because only the finger velocity was used to render them, and, despite the varied finger movements and velocities while exploring the textures, the participants did not perceive any latency in the vibrotactile rendering (\secref{results_questions}).
|
||||||
\textcite{diluca2011effects} demonstrated similarly, in a \VST-\AR setup, how visual latency relative to proprioception increased the perception of stiffness of a virtual piston, while haptic latency decreased it (\secref[related_work]{ar_vr_haptic}).
|
\textcite{diluca2011effects} demonstrated similarly, in a \VST-\AR setup, how visual latency relative to proprioception increased the perception of stiffness of a virtual piston, while haptic latency decreased it (\secref[related_work]{ar_vr_haptic}).
|
||||||
Another complementary explanation could be a pseudo-haptic effect (\secref[related_work]{visual_haptic_influence}) of the displacement of the virtual hand, as already observed with this vibrotactile texture rendering, but seen on a screen in a non-immersive context \cite{ujitoko2019modulating}.
|
Another complementary explanation could be a pseudo-haptic effect (\secref[related_work]{visual_haptic_influence}) of the displacement of the virtual hand, as already observed with this vibrotactile texture rendering, but seen on a screen \cite{ujitoko2019modulating}.
|
||||||
Such hypotheses could be tested by manipulating the latency and pose estimation accuracy of the virtual hand or the vibrotactile feedback. % to observe their effects on the roughness perception of the virtual textures.
|
Such hypotheses could be tested by manipulating the latency and pose estimation accuracy of the virtual hand or the vibrotactile feedback.
|
||||||
|
|||||||
@@ -3,23 +3,19 @@
|
|||||||
|
|
||||||
In this chapter, we studied how the perception of wearable haptic augmented textures is affected by the visual feedback of the virtual hand and the environment, being either real, augmented or virtual.
|
In this chapter, we studied how the perception of wearable haptic augmented textures is affected by the visual feedback of the virtual hand and the environment, being either real, augmented or virtual.
|
||||||
Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, we augmented the perceived roughness of real surfaces with virtual vibrotactile textures rendered on the finger.
|
Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, we augmented the perceived roughness of real surfaces with virtual vibrotactile textures rendered on the finger.
|
||||||
%we rendered virtual vibrotactile patterned textures on the voice-coil worn on the middle-phalanx of the finger to augment the roughness perception of the real surface being touched.
|
With an \OST-\AR headset, that could be switched to a \VR only view, we considered three visual rendering conditions: (1) without visual augmentation, (2) with a realistic virtual hand rendering in \AR, and (3) with the same virtual hand in \VR.
|
||||||
With an immersive \AR headset, that could be switched to a \VR only view, we considered three visual rendering conditions: (1) without visual augmentation, (2) with a realistic virtual hand rendering in \AR, and (3) with the same virtual hand in \VR.
|
|
||||||
We then evaluated the perceived roughness augmentation in these three visual conditions with a psychophysical user study involving 20 participants and extensive questionnaires.
|
We then evaluated the perceived roughness augmentation in these three visual conditions with a psychophysical user study involving 20 participants and extensive questionnaires.
|
||||||
|
|
||||||
Our results showed that the visual virtuality of the hand (real or virtual) and the environment (\AR or \VR) had a significant effect on the perception of haptic textures and the exploration behaviour of the participants.
|
Our results showed that the visual virtuality of the hand (real or virtual) and the environment (\AR or \VR) had a significant effect on the perception of haptic textures and the exploration behaviour of the participants.
|
||||||
The textures were on average perceived as \enquote{rougher} when touched with the real hand alone than with a virtual hand either in \AR, with \VR in between.
|
The textures were on average perceived as \enquote{rougher} when touched with the real hand alone than with a virtual hand either in \AR, with \VR in between.
|
||||||
Similarly, the sensitivity to differences in roughness was better with the real hand, less good with \AR, and in between with \VR.
|
Similarly, the sensitivity to differences in roughness was better with the real hand, less good with \AR, and in between with \VR.
|
||||||
Exploration behaviour was also slower in \VR than with real hand alone, although subjective evaluation of the texture was not affected.
|
Exploration behaviour was also slower in \VR than with real hand alone, although subjective evaluation of the texture was not affected.
|
||||||
We hypothesised that this difference in perception was due to the \emph{perceived latency} between the finger movements and the different visual, haptic and proprioceptive feedbacks, which were the same in all visual renderings, but were more noticeable in \AR and \VR than without visual augmentation.
|
We hypothesized that this difference in perception was due to the \emph{perceived latency} between the finger movements and the different visual, haptic and proprioceptive feedbacks, which were the same in all visual renderings, but were more noticeable in \AR and \VR than without visual augmentation.
|
||||||
|
|
||||||
%We can outline recommendations for future \AR/\VR studies or applications using wearable haptics.
|
|
||||||
This study suggests that attention should be paid to the respective latencies of the visual and haptic sensory feedbacks inherent in such systems and, more importantly, to \emph{the perception of their possible asynchrony}.
|
This study suggests that attention should be paid to the respective latencies of the visual and haptic sensory feedbacks inherent in such systems and, more importantly, to \emph{the perception of their possible asynchrony}.
|
||||||
Latencies should be measured \cite{friston2014measuring}, minimized to an acceptable level for users and kept synchronised with each other \cite{diluca2019perceptual}.
|
Latencies should be measured \cite{friston2014measuring}, minimized to an acceptable level for users and kept synchronized with each other \cite{diluca2019perceptual}.
|
||||||
It seems also that the visual aspect of the hand or the environment on itself has little effect on the perception of haptic feedback, but the degree of visual virtuality can affect the asynchrony perception of the latencies, even though the latencies remain identical.
|
It seems also that the visual aspect of the hand or the environment on itself has little effect on the perception of haptic feedback, but the degree of visual virtuality can affect the asynchrony perception of the latencies, even though the latencies remain identical.
|
||||||
When designing for wearable haptics or integrating it into \AR/\VR, it seems important to test its perception in real, augmented and virtual environments.
|
When designing for wearable haptics or integrating it into \AR/\VR, it seems important to test its perception in real (\RE), augmented (\AE) and virtual (\VE) environments.
|
||||||
%With a better understanding of how visual factors influence the perception of haptically augmented real objects, the many wearable haptic systems that already exist but have not yet been fully explored with \AR can be better applied and new visuo-haptic renderings adapted to \AR can be designed.
|
|
||||||
%Finally, a visual hand representation in OST-\AR together with wearable haptics should be avoided until acceptable tracking latencies \are achieved, as was also observed for virtual object interaction with the bare hand \cite{normand2024visuohaptic}.
|
|
||||||
|
|
||||||
In the next chapter we present a second user study where we investigate the perception of simultaneous and co-localised visual and haptic texture augmentation.
|
In the next chapter we present a second user study where we investigate the perception of simultaneous and co-localised visual and haptic texture augmentation.
|
||||||
We will use the same system presented in \chapref{vhar_system} and a visual rendering condition similar to the \level{Real} condition of this study, in \AR without the virtual hand overlay.
|
We will use the same system presented in \chapref{vhar_system} and a visual rendering condition similar to the \level{Real} condition of this study, in \AR without the virtual hand overlay.
|
||||||
|
|||||||
@@ -2,13 +2,12 @@
|
|||||||
\label{intro}
|
\label{intro}
|
||||||
|
|
||||||
Touching, grasping and manipulating virtual objects are fundamental interactions in \AR (\secref[related_work]{ve_tasks}) and essential for many of its applications (\secref[related_work]{ar_applications}).
|
Touching, grasping and manipulating virtual objects are fundamental interactions in \AR (\secref[related_work]{ve_tasks}) and essential for many of its applications (\secref[related_work]{ar_applications}).
|
||||||
%The most common current \AR systems, in the form of portable and immersive \OST-\AR headsets \cite{hertel2021taxonomy}, allow real-time hand tracking and direct interaction with virtual objects with bare hands (\secref[related_work]{real_virtual_gap}).
|
|
||||||
Manipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the \VE and simulates interaction with virtual objects (\secref[related_work]{ar_virtual_hands}).
|
Manipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the \VE and simulates interaction with virtual objects (\secref[related_work]{ar_virtual_hands}).
|
||||||
The visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
|
The visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
|
||||||
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in an immersive context of virtual object manipulation \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}.
|
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in a context of virtual object manipulation \cite{al-kalbani2016analysis,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,blaga2017usability,maisto2017evaluation}.
|
||||||
\Gls{OST}-\AR also has significant perceptual differences from \VR due the lack of mutual occlusion between the hand and the virtual object in \OST-\AR (\secref[related_work]{ar_displays}), and the inherent delays between the user's hand and the result of the interaction simulation (\secref[related_work]{ar_virtual_hands}).
|
\Gls{OST}-\AR also has significant perceptual differences from \VR due the lack of mutual occlusion between the hand and the virtual object in \OST-\AR (\secref[related_work]{ar_displays}), and the inherent delays between the user's hand and the result of the interaction simulation (\secref[related_work]{ar_virtual_hands}).
|
||||||
|
|
||||||
In this chapter, we investigate the \textbf{visual rendering of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects in \OST-\AR.
|
In this chapter, we investigate the \textbf{visual rendering of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects with an \OST-\AR headset.
|
||||||
To this end, we selected in the literature and compared the most popular visual hand augmentation used to interact with virtual objects in \AR.
|
To this end, we selected in the literature and compared the most popular visual hand augmentation used to interact with virtual objects in \AR.
|
||||||
The virtual hand is \textbf{displayed superimposed} on the user's hand with these visual rendering, providing \textbf{feedback on the tracking} of the real hand, as shown in \figref{hands}.
|
The virtual hand is \textbf{displayed superimposed} on the user's hand with these visual rendering, providing \textbf{feedback on the tracking} of the real hand, as shown in \figref{hands}.
|
||||||
The movement of the virtual hand is also \textbf{constrained to the surface} of the virtual object, providing an additional \textbf{feedback on the interaction} with the virtual object.
|
The movement of the virtual hand is also \textbf{constrained to the surface} of the virtual object, providing an additional \textbf{feedback on the interaction} with the virtual object.
|
||||||
|
|||||||
@@ -56,7 +56,7 @@ Following the guidelines of \textcite{bergstrom2021how} for designing object man
|
|||||||
\label{push-task}
|
\label{push-task}
|
||||||
|
|
||||||
The first manipulation task consists in pushing a virtual object along a real flat surface towards a target placed on the same plane (\figref{method/task-push}).
|
The first manipulation task consists in pushing a virtual object along a real flat surface towards a target placed on the same plane (\figref{method/task-push}).
|
||||||
The virtual object to manipulate is a small \qty{50}{\mm} blue and opaque cube, while the target is a (slightly) bigger \qty{70}{\mm} blue and semi-transparent volume.
|
The virtual object to manipulate is a small \qty{5}{\cm} blue and opaque cube, while the target is a (slightly) bigger \qty{7}{\cm} blue and semi-transparent volume.
|
||||||
At every repetition of the task, the cube to manipulate always spawns at the same place, on top of a real table in front of the user.
|
At every repetition of the task, the cube to manipulate always spawns at the same place, on top of a real table in front of the user.
|
||||||
On the other hand, the target volume can spawn in eight different locations on the same table, located on a \qty{20}{\cm} radius circle centred on the cube, at \qty{45}{\degree} from each other (again \figref{method/task-push}).
|
On the other hand, the target volume can spawn in eight different locations on the same table, located on a \qty{20}{\cm} radius circle centred on the cube, at \qty{45}{\degree} from each other (again \figref{method/task-push}).
|
||||||
Users are asked to push the cube towards the target volume using their fingertips in any way they prefer.
|
Users are asked to push the cube towards the target volume using their fingertips in any way they prefer.
|
||||||
@@ -100,12 +100,11 @@ This design led to a total of 2 manipulation tasks \x 6 visual hand augmentation
|
|||||||
\subsection{Apparatus}
|
\subsection{Apparatus}
|
||||||
\label{apparatus}
|
\label{apparatus}
|
||||||
|
|
||||||
We used the \OST-\AR headset HoloLens~2, as described in \secref[vhar_system]{virtual_real_alignment}.
|
We used the \OST-\AR headset HoloLens~2, as described in \secref[vhar_system]{virtual_real_registration}.
|
||||||
%It is capable of rendering virtual content within an horizontal field of view of \qty{43}{\degree} and a vertical one of \qty{29}{\degree}. It is also able to track the environment as well as the user's fingers.
|
|
||||||
It is also able to track the user's fingers.
|
It is also able to track the user's fingers.
|
||||||
We measured the latency of the hand tracking at \qty{15}{\ms}, independent of the hand movement speed.
|
We measured the latency of the hand tracking at \qty{15}{\ms}, independent of the hand movement speed.
|
||||||
|
|
||||||
The implementation of our experiment was done using Unity 2022.1, PhysX 4.1, and the Mixed Reality Toolkit (MRTK) 2.8.
|
The implementation of our experiment was done using Unity (v2022.1), PhysX (v4.1), and the Mixed Reality Toolkit (MRTK, v2.8).
|
||||||
The compiled application ran directly on the HoloLens~2 at \qty{60}{FPS}.
|
The compiled application ran directly on the HoloLens~2 at \qty{60}{FPS}.
|
||||||
|
|
||||||
The default \ThreeD hand model from MRTK was used for all visual hand augmentations.
|
The default \ThreeD hand model from MRTK was used for all visual hand augmentations.
|
||||||
@@ -114,7 +113,7 @@ A calibration was performed for every participant, to best adapt the size of the
|
|||||||
A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the virtual objects and hand renderings, which were applied throughout the experiment.
|
A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the virtual objects and hand renderings, which were applied throughout the experiment.
|
||||||
|
|
||||||
The hand tracking information provided by MRTK was used to construct a virtual articulated physics-enabled hand (\secref[related_work]{ar_virtual_hands}) using PhysX.
|
The hand tracking information provided by MRTK was used to construct a virtual articulated physics-enabled hand (\secref[related_work]{ar_virtual_hands}) using PhysX.
|
||||||
It featured 25 DoFs, including the fingers proximal, middle, and distal phalanges.
|
It featured 25 \DoFs, including the fingers proximal, middle, and distal phalanges.
|
||||||
To allow effective (and stable) physical interactions between the hand and the virtual cube to manipulate, we implemented an approach similar to that of \textcite{borst2006spring}, where a series of virtual springs with high stiffness are used to couple the physics-enabled hand with the tracked hand.
|
To allow effective (and stable) physical interactions between the hand and the virtual cube to manipulate, we implemented an approach similar to that of \textcite{borst2006spring}, where a series of virtual springs with high stiffness are used to couple the physics-enabled hand with the tracked hand.
|
||||||
As before, a set of empirical tests have been used to select the most effective physical characteristics in terms of mass, elastic constant, friction, damping, colliders size, and shape for the (tracked) virtual hand interaction model.
|
As before, a set of empirical tests have been used to select the most effective physical characteristics in terms of mass, elastic constant, friction, damping, colliders size, and shape for the (tracked) virtual hand interaction model.
|
||||||
|
|
||||||
@@ -159,9 +158,11 @@ Participants also rated each visual hand augmentation individually on six questi
|
|||||||
\item \response{Difficulty}: How difficult were the tasks?
|
\item \response{Difficulty}: How difficult were the tasks?
|
||||||
\item \response{Fatigue}: How fatiguing (mentally and physically) were the tasks?
|
\item \response{Fatigue}: How fatiguing (mentally and physically) were the tasks?
|
||||||
\item \response{Precision}: How precise were you in performing the tasks?
|
\item \response{Precision}: How precise were you in performing the tasks?
|
||||||
\item \response{Performance}: How successful were you in performing the tasks? %
|
\item \response{Performance}: How successful were you in performing the tasks?
|
||||||
\item \response{Efficiency}: How fast/efficient do you think you were in performing the tasks?
|
\item \response{Efficiency}: How fast/efficient do you think you were in performing the tasks?
|
||||||
\item \response{Rating}: How much do you like each visual hand?
|
\item \response{Rating}: How much do you like each visual hand?
|
||||||
\end{itemize}
|
\end{itemize}
|
||||||
|
|
||||||
Finally, participants were encouraged to comment out loud on the conditions throughout the experiment, as well as in an open-ended question at its end, to gather additional qualitative information.
|
Finally, participants were encouraged to comment out loud on the conditions throughout the experiment, as well as in an open-ended question at its end, to gather additional qualitative information.
|
||||||
|
|
||||||
|
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), and \textit{performance} (v0.13).
|
||||||
|
|||||||
@@ -46,10 +46,7 @@ a \LMM \ANOVA with by-participant random intercepts and random slopes for \facto
|
|||||||
and \factor{Target} (\anova{7}{3270}{4.1}, \pinf{0.001}).
|
and \factor{Target} (\anova{7}{3270}{4.1}, \pinf{0.001}).
|
||||||
|
|
||||||
It was shorter with \level{None} than with \level{Occlusion} (\pinf{0.001}), \level{Contour} (\pinf{0.001}), \level{Skeleton} (\pinf{0.001}) and \level{Mesh} (\pinf{0.001}).
|
It was shorter with \level{None} than with \level{Occlusion} (\pinf{0.001}), \level{Contour} (\pinf{0.001}), \level{Skeleton} (\pinf{0.001}) and \level{Mesh} (\pinf{0.001}).
|
||||||
%shorter with \level{Tips} than with \level{Occlusion} (\p{0.008}), \level{Contour} (\p{0.006}) and \level{Mesh} (\pinf{0.001});
|
|
||||||
%and shorter with \level{Skeleton} than with \level{Mesh} (\pinf{0.001}).
|
|
||||||
This result is an evidence of the lack of confidence of participants with no visual hand augmentation: they grasped the cube more to secure it.
|
This result is an evidence of the lack of confidence of participants with no visual hand augmentation: they grasped the cube more to secure it.
|
||||||
%The \level{Mesh} rendering seemed to have provided the most confidence to participants, maybe because it was the closest to the real hand.
|
|
||||||
|
|
||||||
The \response{Grip Aperture} was longer on the right-front (\level{RF}) target volume, indicating a higher confidence, than on back and side targets (\level{R}, \level{RB}, \level{B}, \level{L}, \p{0.03}).
|
The \response{Grip Aperture} was longer on the right-front (\level{RF}) target volume, indicating a higher confidence, than on back and side targets (\level{R}, \level{RB}, \level{B}, \level{L}, \p{0.03}).
|
||||||
|
|
||||||
|
|||||||
@@ -4,7 +4,6 @@
|
|||||||
We evaluated six visual hand augmentations, as described in \secref{hands}, displayed on top of the real hand, in two virtual object manipulation tasks in \AR.
|
We evaluated six visual hand augmentations, as described in \secref{hands}, displayed on top of the real hand, in two virtual object manipulation tasks in \AR.
|
||||||
|
|
||||||
During the \level{Push} task, the \level{Skeleton} hand rendering was the fastest (\figref{results/Push-CompletionTime}), as participants employed fewer and longer contacts to adjust the cube inside the target volume (\figref{results/Push-ContactsCount} and \figref{results/Push-MeanContactTime}).
|
During the \level{Push} task, the \level{Skeleton} hand rendering was the fastest (\figref{results/Push-CompletionTime}), as participants employed fewer and longer contacts to adjust the cube inside the target volume (\figref{results/Push-ContactsCount} and \figref{results/Push-MeanContactTime}).
|
||||||
%Participants consistently used few and continuous contacts for all visual hand augmentations (\figref{results/Push-ContactsCount}), with only less than ten trials, carried out by two participants, quickly completed with multiple discrete touches.
|
|
||||||
However, during the \level{Grasp} task, despite no difference in \response{Completion Time}, providing no visible hand rendering (\level{None} and \level{Occlusion} renderings) led to more failed grasps or cube drops (\figref{results/Grasp-ContactsCount} and \figref{results/Grasp-MeanContactTime}).
|
However, during the \level{Grasp} task, despite no difference in \response{Completion Time}, providing no visible hand rendering (\level{None} and \level{Occlusion} renderings) led to more failed grasps or cube drops (\figref{results/Grasp-ContactsCount} and \figref{results/Grasp-MeanContactTime}).
|
||||||
Indeed, participants found the \level{None} and \level{Occlusion} renderings less effective (\figref{results/Ranks-Grasp}) and less precise (\figref{results_questions}).
|
Indeed, participants found the \level{None} and \level{Occlusion} renderings less effective (\figref{results/Ranks-Grasp}) and less precise (\figref{results_questions}).
|
||||||
To understand whether the participants' previous experience might have played a role, we also carried out an additional statistical analysis considering \VR experience as an additional between-subjects factor, \ie \VR novices vs. \VR experts (\enquote{I use it every week}, see \secref{participants}).
|
To understand whether the participants' previous experience might have played a role, we also carried out an additional statistical analysis considering \VR experience as an additional between-subjects factor, \ie \VR novices vs. \VR experts (\enquote{I use it every week}, see \secref{participants}).
|
||||||
@@ -12,8 +11,8 @@ We found no statistically significant differences when comparing the considered
|
|||||||
|
|
||||||
All visual hand augmentations showed \response{Grip Apertures} close to the size of the virtual cube, except for the \level{None} rendering (\figref{results/Grasp-GripAperture}), with which participants applied stronger grasps, \ie less distance between the fingertips.
|
All visual hand augmentations showed \response{Grip Apertures} close to the size of the virtual cube, except for the \level{None} rendering (\figref{results/Grasp-GripAperture}), with which participants applied stronger grasps, \ie less distance between the fingertips.
|
||||||
Having no visual hand augmentation, but only the reaction of the cube to the interaction as feedback, made participants less confident in their grip.
|
Having no visual hand augmentation, but only the reaction of the cube to the interaction as feedback, made participants less confident in their grip.
|
||||||
This result contrasts with the wrongly estimated grip apertures observed by \textcite{al-kalbani2016analysis} in an exocentric VST-AR setup.
|
This result contrasts with the wrongly estimated grip apertures observed by \textcite{al-kalbani2016analysis} in an exocentric \VST-\AR setup.
|
||||||
Also, while some participants found the absence of visual hand augmentation more natural, many of them commented on the importance of having feedback on the tracking of their hands, as observed by \textcite{xiao2018mrtouch} in a similar immersive OST-AR setup.
|
Also, while some participants found the absence of visual hand augmentation more natural, many of them commented on the importance of having feedback on the tracking of their hands, as observed by \textcite{xiao2018mrtouch} with an \OST-\AR headset.
|
||||||
|
|
||||||
Yet, participants' opinions of the visual hand augmentations were mixed on many questions, except for the \level{Occlusion} one, which was perceived less effective than more \enquote{complete} visual hands such as \level{Contour}, \level{Skeleton}, and \level{Mesh} hands (\figref{results_questions}).
|
Yet, participants' opinions of the visual hand augmentations were mixed on many questions, except for the \level{Occlusion} one, which was perceived less effective than more \enquote{complete} visual hands such as \level{Contour}, \level{Skeleton}, and \level{Mesh} hands (\figref{results_questions}).
|
||||||
However, due to the latency of the hand tracking and the visual hand reacting to the cube, almost all participants thought that the \level{Occlusion} rendering to be a \enquote{shadow} of the real hand on the cube.
|
However, due to the latency of the hand tracking and the visual hand reacting to the cube, almost all participants thought that the \level{Occlusion} rendering to be a \enquote{shadow} of the real hand on the cube.
|
||||||
@@ -24,7 +23,7 @@ while others found that it gave them a better sense of the contact points and im
|
|||||||
This result is consistent with \textcite{saito2021contact}, who found that displaying the points of contacts was beneficial for grasping a virtual object over an opaque visual hand overlay.
|
This result is consistent with \textcite{saito2021contact}, who found that displaying the points of contacts was beneficial for grasping a virtual object over an opaque visual hand overlay.
|
||||||
|
|
||||||
To summarize, when employing a visual feedback of the virtual hand overlaying the real hand, participants were more performant and confident in manipulating virtual objects with bare hands in \AR.
|
To summarize, when employing a visual feedback of the virtual hand overlaying the real hand, participants were more performant and confident in manipulating virtual objects with bare hands in \AR.
|
||||||
These results contrast with similar manipulation studies, but in non-immersive, on-screen \AR, where the presence of a visual hand augmentation was found by participants to improve the usability of the interaction, but not their performance \cite{blaga2017usability,maisto2017evaluation,meli2018combining}.
|
These results contrast with similar manipulation studies, but in on-screen \AR, where the presence of a visual hand augmentation was found by participants to improve the usability of the interaction, but not their performance \cite{blaga2017usability,maisto2017evaluation,meli2018combining}.
|
||||||
Our results show the most effective visual hand augmentation to be the \level{Skeleton} one.
|
Our results show the most effective visual hand augmentation to be the \level{Skeleton} one.
|
||||||
Participants appreciated that it provided a detailed and precise view of the tracking of the real hand, without hiding or masking it.
|
Participants appreciated that it provided a detailed and precise view of the tracking of the real hand, without hiding or masking it.
|
||||||
Although the \level{Contour} and \level{Mesh} hand renderings were also highly rated, some participants felt that they were too visible and masked the real hand.
|
Although the \level{Contour} and \level{Mesh} hand renderings were also highly rated, some participants felt that they were too visible and masked the real hand.
|
||||||
|
|||||||
@@ -1,28 +1,21 @@
|
|||||||
\section{Conclusion}
|
\section{Conclusion}
|
||||||
\label{conclusion}
|
\label{conclusion}
|
||||||
|
|
||||||
In this chapter, we addressed the challenge of touching, grasping and manipulating virtual objects directly with the hand in immersive \OST-\AR.
|
In this chapter, we addressed the challenge of touching, grasping and manipulating virtual objects directly with the hand using an \OST-\AR headset.
|
||||||
To do so, we proposed to evaluate visual renderings of the virtual hand as augmentation of the real hand.
|
To do so, we proposed to evaluate visual renderings of the virtual hand as augmentation of the real hand.
|
||||||
Superimposed on the user's hand, these visual renderings provide feedback from the virtual hand, which tracks the real hand, and simulates the interaction with virtual objects as a proxy.
|
Superimposed on the user's hand, these visual renderings provide feedback from the virtual hand, which tracks the real hand, and simulates the interaction with virtual objects as a proxy.
|
||||||
We first selected and compared the six most popular visual hand augmentations used to interact with virtual objects in \AR.
|
We first selected and compared the six most popular visual hand augmentations used to interact with virtual objects in \AR.
|
||||||
Then, in a user study with 24 participants and an immersive \OST-\AR headset, we evaluated the effect of these six visual hand augmentations on the user performance and experience in two representative manipulation tasks.
|
Then, in a user study with 24 participants, we evaluated the effect of these six visual hand augmentations on the user performance and experience in two representative manipulation tasks.
|
||||||
|
|
||||||
Our results showed that a visual hand augmentation improved the performance, perceived effectiveness and confidence of participants compared to no augmentation.
|
Our results showed that a visual hand augmentation improved the performance, perceived effectiveness and confidence of participants compared to no augmentation.
|
||||||
A skeleton rendering, which provided a detailed view of the tracked joints and phalanges while not hiding the real hand, was the most performant and effective.
|
A skeleton rendering, which provided a detailed view of the tracked joints and phalanges while not hiding the real hand, was the most performant and effective.
|
||||||
The contour and mesh renderings were found to mask the real hand, while the tips rendering was controversial.
|
The contour and mesh renderings were found to mask the real hand, while the tips rendering was controversial.
|
||||||
The occlusion rendering had too much tracking latency to be effective.
|
The occlusion rendering had too much tracking latency to be effective.
|
||||||
This is consistent with similar manipulation studies in \VR and in non-immersive \VST-\AR setups.
|
This is consistent with similar manipulation studies in \VR and in \VST-\AR setups.
|
||||||
|
|
||||||
This study suggests that a \ThreeD visual hand augmentation is important in \AR when interacting with a virtual hand technique, particularly when it involves precise finger movements in relation to virtual content, \eg \ThreeD windows, buttons and sliders, or more complex tasks, such as stacking or assembly.
|
This study suggests that a \ThreeD visual hand augmentation is important in \OST-\AR when interacting with a virtual hand technique, particularly when it involves precise finger movements in relation to virtual content, \eg \ThreeD windows, buttons and sliders, or more complex tasks, such as stacking or assembly.
|
||||||
A minimal but detailed rendering of the virtual hand that does not hide the real hand, such as the skeleton rendering we evaluated, seems to be the best compromise between the richness and effectiveness of the feedback.
|
A minimal but detailed rendering of the virtual hand that does not hide the real hand, such as the skeleton rendering we evaluated, seems to be the best compromise between the richness and effectiveness of the feedback.
|
||||||
%Still, users should be able to choose and adapt the visual hand augmentation to their preferences and needs.
|
|
||||||
|
|
||||||
In addition to visual augmentation of the hand, direct manipulation of virtual objects with the hand can also benefit from wearable haptic feedback.
|
In addition to visual augmentation of the hand, direct manipulation of virtual objects with the hand can also benefit from wearable haptic feedback.
|
||||||
In the next chapter, we explore two wearable vibrotactile contact feedback devices in a user study, located at four positionings on the hand so as to not cover the fingertips.
|
In the next chapter, we explore two wearable vibrotactile contact feedback devices in a user study, located at four positionings on the hand so as to not cover the fingertips.
|
||||||
We evaluate their effect on user performance and experience in the same manipulation tasks as in this chapter, with the best visual hand augmentation found in this study.
|
We evaluate their effect on user performance and experience in the same manipulation tasks as in this chapter, with the best visual hand augmentation found in this study.
|
||||||
|
|
||||||
%\noindentskip This work was published in Transactions on Haptics:
|
|
||||||
|
|
||||||
%Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
|
||||||
%\enquote{Visuo-Haptic Rendering of the Hand during 3D Manipulation in Augmented Reality}.
|
|
||||||
%In: \textit{IEEE Transactions on Haptics}. 27.4 (2024), pp. 2481--2487.
|
|
||||||
|
|||||||
@@ -10,10 +10,10 @@ Conjointly, a few studies have explored and compared the effects of visual and h
|
|||||||
\textcite{sarac2022perceived} and \textcite{palmer2022haptic} studied the effects of providing haptic feedback about contacts at the fingertips using haptic devices worn at the wrist, testing different mappings.
|
\textcite{sarac2022perceived} and \textcite{palmer2022haptic} studied the effects of providing haptic feedback about contacts at the fingertips using haptic devices worn at the wrist, testing different mappings.
|
||||||
Their results proved that moving the haptic feedback away from the point(s) of contact is possible and effective, and that its impact is more significant in the absence of the visual feedback of the virtual hand.
|
Their results proved that moving the haptic feedback away from the point(s) of contact is possible and effective, and that its impact is more significant in the absence of the visual feedback of the virtual hand.
|
||||||
A final question is whether one or the other of these (haptic or visual) hand feedback should be preferred \cite{maisto2017evaluation,meli2018combining}, or whether a combined visuo-haptic feedback is beneficial for users.
|
A final question is whether one or the other of these (haptic or visual) hand feedback should be preferred \cite{maisto2017evaluation,meli2018combining}, or whether a combined visuo-haptic feedback is beneficial for users.
|
||||||
However, these studies were conducted in non-immersive setups, with a screen displaying the \VE view.
|
However, these studies were conducted in setups with a screen displaying the \VE view and not using an \AR headset (\secref[related_work]{ar_displays}).
|
||||||
In fact, both hand feedback can provide sufficient sensory feedback for efficient direct hand manipulation of virtual objects in \AR, or conversely, they can be shown to be complementary.
|
In fact, both hand feedback can provide sufficient sensory feedback for efficient direct hand manipulation of virtual objects in \AR, or conversely, they can be shown to be complementary.
|
||||||
|
|
||||||
In this chapter, we aim to investigate the role of \textbf{visuo-haptic feedback of the hand when manipulating virtual object} in immersive \OST-\AR using wearable vibrotactile haptics.
|
In this chapter, we investigate the role of \textbf{visuo-haptic feedback of the hand when manipulating virtual object} using an \OST-\AR headset and wearable vibrotactile haptics.
|
||||||
We selected \textbf{four different delocalized positionings on the hand} that have been previously proposed in the literature for direct hand interaction in \AR using wearable haptic devices (\secref[related_work]{vhar_haptics}): on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand.
|
We selected \textbf{four different delocalized positionings on the hand} that have been previously proposed in the literature for direct hand interaction in \AR using wearable haptic devices (\secref[related_work]{vhar_haptics}): on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand.
|
||||||
We focused on vibrotactile feedback, as it is used in most of the wearable haptic devices and has the lowest encumbrance.
|
We focused on vibrotactile feedback, as it is used in most of the wearable haptic devices and has the lowest encumbrance.
|
||||||
In a \textbf{user study}, using the \OST-\AR headset Microsoft HoloLens~2 and two \ERM vibrotactile motors, we evaluated the effect of the four positionings with \textbf{two contact vibration techniques} on the user performance and experience with the same two manipulation tasks as in \chapref{visual_hand}.
|
In a \textbf{user study}, using the \OST-\AR headset Microsoft HoloLens~2 and two \ERM vibrotactile motors, we evaluated the effect of the four positionings with \textbf{two contact vibration techniques} on the user performance and experience with the same two manipulation tasks as in \chapref{visual_hand}.
|
||||||
|
|||||||
@@ -88,14 +88,14 @@ Apparatus and experimental procedure were similar to the \chapref{visual_hand},
|
|||||||
We report here only the differences.
|
We report here only the differences.
|
||||||
|
|
||||||
We employed the same vibrotactile device used by \cite{devigne2020power}.
|
We employed the same vibrotactile device used by \cite{devigne2020power}.
|
||||||
It is composed of two encapsulated \ERM (\secref[related_work]{vibrotactile_actuators}) vibration motors (Pico-Vibe 304-116, Precision Microdrive, UK).
|
It is composed of two encapsulated \ERM (\secref[related_work]{vibrotactile_actuators}) vibration motors (Pico-Vibe 304-116, Precision Microdrive).
|
||||||
They are small and light (\qty{5}{\mm} \x \qty{20}{\mm}, \qty{1.2}{\g}) actuators capable of vibration frequencies from \qtyrange{120}{285}{\Hz} and
|
They are small and light (\qty{5}{\mm} \x \qty{20}{\mm}, \qty{1.2}{\g}) actuators capable of vibration frequencies from \qtyrange{120}{285}{\Hz} and
|
||||||
amplitudes from \qtyrange{0.2}{1.15}{\g}.
|
amplitudes from \qtyrange{0.2}{1.15}{\g}.
|
||||||
They have a latency of \qty{20}{\ms} that we partially compensated for at the software level with slightly larger colliders to trigger the vibrations close the moment the finger touched the cube.
|
They have a latency of \qty{20}{\ms} that we partially compensated for at the software level with slightly larger colliders to trigger the vibrations close the moment the finger touched the cube.
|
||||||
These two outputs vary linearly together, based on the tension applied.
|
These two outputs vary linearly together, based on the tension applied.
|
||||||
They were controlled by an Arduino Pro Mini (\qty{3.3}{\V}) and a custom board that delivered the tension independently to each motor.
|
They were controlled by an Arduino Pro Mini (\qty{3.3}{\V}) and a custom board that delivered the tension independently to each motor.
|
||||||
A small \qty{400}{mAh} Li-ion battery allowed for 4 hours of constant vibration at maximum intensity.
|
A small \qty{400}{mAh} Li-ion battery allowed for 4 hours of constant vibration at maximum intensity.
|
||||||
A Bluetooth module (RN42XV module, Microchip Technology Inc., USA) mounted on the Arduino ensured wireless communication with the HoloLens~2.
|
A Bluetooth module (RN42XV module, Microchip Technology Inc.) mounted on the Arduino ensured wireless communication with the HoloLens~2.
|
||||||
|
|
||||||
To ensure minimal encumbrance, we used the same two motors throughout the experiment, moving them to the considered positioning before each new block.
|
To ensure minimal encumbrance, we used the same two motors throughout the experiment, moving them to the considered positioning before each new block.
|
||||||
Thin self-gripping straps were placed on the five positionings, with an elastic strap stitched on top to place the motor, as shown in \figref{method/locations}.
|
Thin self-gripping straps were placed on the five positionings, with an elastic strap stitched on top to place the motor, as shown in \figref{method/locations}.
|
||||||
@@ -140,3 +140,5 @@ They then rated the ten combinations of \factor{Positioning} \x \factor{Vibratio
|
|||||||
|
|
||||||
Finally, they rated the ten combinations of \factor{Positioning} \x \factor{Hand} on a 7-item Likert scale (1=Not at all, 7=Extremely):
|
Finally, they rated the ten combinations of \factor{Positioning} \x \factor{Hand} on a 7-item Likert scale (1=Not at all, 7=Extremely):
|
||||||
\response{Positioning \x Hand Rating}: How much do you like each combination of vibrotactile location for each visual hand rendering?
|
\response{Positioning \x Hand Rating}: How much do you like each combination of vibrotactile location for each visual hand rendering?
|
||||||
|
|
||||||
|
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), and \textit{performance} (v0.13).
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
|
|
||||||
On the time to complete a trial,
|
On the time to complete a trial,
|
||||||
a \LMM \ANOVA with by-participant random intercepts indicated two statistically significant effects:
|
a \LMM \ANOVA with by-participant random intercepts indicated two statistically significant effects:
|
||||||
\factor{Positioning} (\anova{4}{2341}{3.6}, \p{0.007}, see \figref{results/Push-CompletionTime-Location-Overall-Means}) %
|
\factor{Positioning} (\anova{4}{2341}{3.6}, \p{0.007}, see \figref{results/Push-CompletionTime-Location-Overall-Means})
|
||||||
and \factor{Target} (\anova{1}{1990}{3.9}, \p{0.05}).
|
and \factor{Target} (\anova{1}{1990}{3.9}, \p{0.05}).
|
||||||
\level{Fingertips} was slower than \level{Proximal} (\percent{+11}, \p{0.01}) or \level{Opposite} (\percent{+12}, \p{0.03}).
|
\level{Fingertips} was slower than \level{Proximal} (\percent{+11}, \p{0.01}) or \level{Opposite} (\percent{+12}, \p{0.03}).
|
||||||
There was no evidence of an advantage of \level{Proximal} or \level{Opposite} on \level{Nowhere}, nor a disadvantage of \level{Fingertips} on \level{Nowhere}.
|
There was no evidence of an advantage of \level{Proximal} or \level{Opposite} on \level{Nowhere}, nor a disadvantage of \level{Fingertips} on \level{Nowhere}.
|
||||||
@@ -24,8 +24,8 @@ This could indicate more difficulties to adjust the virtual cube inside the targ
|
|||||||
|
|
||||||
On the mean time spent on each contact,
|
On the mean time spent on each contact,
|
||||||
a \LMM \ANOVA with by-participant random intercepts indicated two statistically significant effects of
|
a \LMM \ANOVA with by-participant random intercepts indicated two statistically significant effects of
|
||||||
\factor{Positioning} (\anova{4}{1990}{11.5}, \pinf{0.001}, see \figref{results/Push-TimePerContact-Location-Overall-Means}) %
|
\factor{Positioning} (\anova{4}{1990}{11.5}, \pinf{0.001}, see \figref{results/Push-TimePerContact-Location-Overall-Means})
|
||||||
and of \factor{Hand} (\anova{1}{1990}{16.1}, \pinf{0.001}, see \figref{results/Push-TimePerContact-Hand-Overall-Means}) %
|
and of \factor{Hand} (\anova{1}{1990}{16.1}, \pinf{0.001}, see \figref{results/Push-TimePerContact-Hand-Overall-Means})
|
||||||
but not of the \factor{Positioning} \x \factor{Hand} interaction.
|
but not of the \factor{Positioning} \x \factor{Hand} interaction.
|
||||||
It was shorter with \level{Fingertips} than with \level{Wrist} (\percent{-15}, \pinf{0.001}), \level{Opposite} (\percent{-11}, \p{0.01}), or \level{Nowhere} (\percent{-15}, \pinf{0.001});
|
It was shorter with \level{Fingertips} than with \level{Wrist} (\percent{-15}, \pinf{0.001}), \level{Opposite} (\percent{-11}, \p{0.01}), or \level{Nowhere} (\percent{-15}, \pinf{0.001});
|
||||||
and shorter with \level{Proximal} than with \level{Wrist} (\percent{-16}, \pinf{0.001}), \level{Opposite} (\percent{-12}, \p{0.005}), or \level{Nowhere} (\percent{-16}, \pinf{0.001}).
|
and shorter with \level{Proximal} than with \level{Wrist} (\percent{-16}, \pinf{0.001}), \level{Opposite} (\percent{-12}, \p{0.005}), or \level{Nowhere} (\percent{-16}, \pinf{0.001}).
|
||||||
|
|||||||
@@ -16,7 +16,7 @@ and \level{LF} was faster than \level{RB} (\p{0.03}).
|
|||||||
|
|
||||||
On the number of contacts,
|
On the number of contacts,
|
||||||
a \LMM \ANOVA with by-participant random intercepts indicated two statistically significant effects:
|
a \LMM \ANOVA with by-participant random intercepts indicated two statistically significant effects:
|
||||||
\factor{Positioning} (\anova{4}{3990}{15.1}, \pinf{0.001}, see \figref{results/Grasp-Contacts-Location-Overall-Means}) %
|
\factor{Positioning} (\anova{4}{3990}{15.1}, \pinf{0.001}, see \figref{results/Grasp-Contacts-Location-Overall-Means})
|
||||||
and \factor{Target} (\anova{3}{3990}{7.6}, \pinf{0.001}).
|
and \factor{Target} (\anova{3}{3990}{7.6}, \pinf{0.001}).
|
||||||
Fewer contacts were made with \level{Opposite} than with \level{Fingertips} (\percent{-26}, \pinf{0.001}), \level{Proximal} (\percent{-17}, \pinf{0.001}), or \level{Wrist} (\percent{-12}, \p{0.002});
|
Fewer contacts were made with \level{Opposite} than with \level{Fingertips} (\percent{-26}, \pinf{0.001}), \level{Proximal} (\percent{-17}, \pinf{0.001}), or \level{Wrist} (\percent{-12}, \p{0.002});
|
||||||
but more with \level{Fingertips} than with \level{Wrist} (\percent{+13}, \p{0.002}) or \level{Nowhere} (\percent{+17}, \pinf{0.001}).
|
but more with \level{Fingertips} than with \level{Wrist} (\percent{+13}, \p{0.002}) or \level{Nowhere} (\percent{+17}, \pinf{0.001}).
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
\section{Conclusion}
|
\section{Conclusion}
|
||||||
\label{conclusion}
|
\label{conclusion}
|
||||||
|
|
||||||
In this chapter, we investigated the visuo-haptic feedback of the hand when manipulating virtual objects in immersive \OST-\AR using wearable vibrotactile haptic.
|
In this chapter, we investigated the visuo-haptic feedback of the hand when manipulating virtual objects using an \OST-\AR headset and wearable vibrotactile haptic.
|
||||||
To do so, we provided vibrotactile feedback of the fingertip contacts with virtual objects by moving away the haptic actuator that do not cover the inside of the hand: on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand.
|
To do so, we provided vibrotactile feedback of the fingertip contacts with virtual objects by moving away the haptic actuator that do not cover the inside of the hand: on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand.
|
||||||
We selected these four different delocalized positions on the hand from the literature for direct hand interaction in \AR using wearable haptic devices.
|
We selected these four different delocalized positions on the hand from the literature for direct hand interaction in \AR using wearable haptic devices.
|
||||||
In a user study, we compared twenty visuo-haptic feedback of the hand as the combination of two vibrotactile contact techniques, provided at five different delocalized positions on the user's hand, and with the two most representative visual hand augmentations established in the \chapref{visual_hand}, \ie the skeleton hand rendering and no hand rendering.
|
In a user study, we compared twenty visuo-haptic feedback of the hand as the combination of two vibrotactile contact techniques, provided at five different delocalized positions on the user's hand, and with the two most representative visual hand augmentations established in the \chapref{visual_hand}, \ie the skeleton hand rendering and no hand rendering.
|
||||||
@@ -12,8 +12,12 @@ The visual hand augmentation was perceived less necessary than the vibrotactile
|
|||||||
This study provide evidence that moving away the feedback from the inside of the hand is a simple but promising approach for wearable haptics in \AR.
|
This study provide evidence that moving away the feedback from the inside of the hand is a simple but promising approach for wearable haptics in \AR.
|
||||||
|
|
||||||
If integration with the hand tracking system allows it, and if the task requires it, a haptic ring worn on the middle or proximal phalanx seems preferable.
|
If integration with the hand tracking system allows it, and if the task requires it, a haptic ring worn on the middle or proximal phalanx seems preferable.
|
||||||
However, a wrist-mounted haptic device will be able to provide richer feedback by embedding more diverse haptic actuators with larger bandwidths and maximum amplitudes, while being less obtrusive than a ring.
|
It can provide more realistic and appreciated feedback, being closer to the point of contact.
|
||||||
Finally, we think that the visual hand augmentation complements the haptic contact rendering well by providing continuous feedback on the hand tracking, and that it can be disabled during the grasping phase to avoid redundancy with the haptic feedback of the contact with the virtual object.
|
However, a calibration step seems important to adapt to the individual preferences and sensitivities of the user.
|
||||||
|
Yet, a wrist-mounted haptic device will be able to provide richer feedback by embedding more diverse haptic actuators with larger bandwidths and maximum amplitudes, while being less obtrusive than a ring.
|
||||||
|
It could thus provide more complex feedback of the contacts with the virtual objects.
|
||||||
|
Finally, we think that the visual hand augmentation complements the haptic contact rendering well by providing continuous feedback on hand tracking.
|
||||||
|
Such a visual augmentation can be disabled during the grasping phase to avoid redundancy with the haptic feedback of the contact with the virtual object.
|
||||||
|
|
||||||
\noindentskip The work described in \chapref{visual_hand} and \ref{visuo_haptic_hand} was published in Transactions on Haptics:
|
\noindentskip The work described in \chapref{visual_hand} and \ref{visuo_haptic_hand} was published in Transactions on Haptics:
|
||||||
|
|
||||||
|
|||||||
@@ -7,17 +7,17 @@ We conclude this thesis manuscript by summarizing our contributions and the main
|
|||||||
|
|
||||||
\section{Summary}
|
\section{Summary}
|
||||||
|
|
||||||
In this manuscript, we showed how wearable haptics can improve direct hand interaction in immersive \AR. % by augmenting the perception of the real and manipulation of the virtual.
|
In this manuscript, we showed how \OST-\AR headsets and wearable haptics can improve direct hand interaction with virtual and augmented objects.
|
||||||
Wearable haptics can provide rich tactile feedback on virtual objects and augment the perception of real objects, both directly touched by the hand, while preserving freedom of movement and interaction with the \RE.
|
Wearable haptics can provide rich tactile feedback on virtual objects and augment the perception of real objects, both directly touched by the hand, while preserving freedom of movement and interaction with the \RE.
|
||||||
However, their integration with \AR is still in its infancy and presents many design, technical and human challenges.
|
However, their integration with \AR is still in its infancy and presents many design, technical and human challenges.
|
||||||
We have structured this thesis around two research axes: \textbf{(I) modifying the visuo-haptic texture perception of real surfaces} and \textbf{(II) improving the manipulation of virtual objects}.
|
We have structured this thesis around two research axes: \textbf{(I) modifying the visuo-haptic texture perception of real surfaces} and \textbf{(II) improving the manipulation of virtual objects}.
|
||||||
|
|
||||||
\noindentskip In \partref{perception}, we focused on the perception of wearable and immersive virtual textures that augment real surfaces.
|
\noindentskip In \partref{perception}, we focused on the perception of wearable virtual textures that augment real surfaces.
|
||||||
Texture is a fundamental property of an object, perceived equally by sight and touch.
|
Texture is a fundamental property of an object, perceived equally by sight and touch.
|
||||||
It is also one of the most studied haptic augmentations, but has not yet been integrated into \AR or \VR.
|
It is also one of the most studied haptic augmentations, but has not yet been integrated into \AR or \VR.
|
||||||
We \textbf{(1) proposed a wearable visuo-haptic texture augmentation system}, \textbf{(2)} evaluated how the perception of haptic texture augmentations is \textbf{affected by the visual feedback of the virtual hand} and the environment (real, augmented, or virtual), and \textbf{(3)} investigated the \textbf{perception of co-localized visuo-haptic texture augmentations}.
|
We \textbf{(1) proposed a wearable visuo-haptic texture augmentation system}, \textbf{(2)} evaluated how the perception of haptic texture augmentations is \textbf{affected by the visual feedback of the virtual hand} and the environment (real, augmented, or virtual), and \textbf{(3)} investigated the \textbf{perception of co-localized visuo-haptic texture augmentations}.
|
||||||
|
|
||||||
In \chapref{vhar_system}, we presented a system for \textbf{augmenting any real surface} with virtual \textbf{roughness textures with visuo-haptic feedback} using an immersive \AR headset and a wearable vibrotactile device worn on the middle phalanx of the finger.
|
In \chapref{vhar_system}, we presented a system for \textbf{augmenting any real surface} with virtual \textbf{roughness textures with visuo-haptic feedback} using an \OST-\AR headset and a wearable vibrotactile device worn on the middle phalanx of the finger.
|
||||||
It allows \textbf{free visual and touch exploration} of the textures as if they were real, allowing the user to view them from different angles and touch them with the bare finger without constraints on hand movement.
|
It allows \textbf{free visual and touch exploration} of the textures as if they were real, allowing the user to view them from different angles and touch them with the bare finger without constraints on hand movement.
|
||||||
The user studies in the next two chapters were based on this system.
|
The user studies in the next two chapters were based on this system.
|
||||||
|
|
||||||
@@ -30,7 +30,7 @@ In \chapref{vhar_textures}, we investigated the perception of co-localized visua
|
|||||||
We transposed the \textbf{data-driven visuo-haptic textures} from the \HaTT database to the system presented in \chapref{vhar_system} and conducted a user study with 20 participants to rate the coherence, realism, and perceived roughness of the combination of nine visuo-haptic texture pairs.
|
We transposed the \textbf{data-driven visuo-haptic textures} from the \HaTT database to the system presented in \chapref{vhar_system} and conducted a user study with 20 participants to rate the coherence, realism, and perceived roughness of the combination of nine visuo-haptic texture pairs.
|
||||||
Participants integrated roughness sensations from both visual and haptic modalities well, with \textbf{haptics dominating perception}, and consistently identified and matched \textbf{clusters of visual and haptic textures with similar perceived roughness}.
|
Participants integrated roughness sensations from both visual and haptic modalities well, with \textbf{haptics dominating perception}, and consistently identified and matched \textbf{clusters of visual and haptic textures with similar perceived roughness}.
|
||||||
|
|
||||||
\noindentskip In \partref{manipulation}, we focused on improving the manipulation of virtual objects directly with the hand in immersive \OST-\AR.
|
\noindentskip In \partref{manipulation}, we focused on improving the manipulation of virtual objects directly with the hand using an \OST-\AR headset.
|
||||||
Our approach was to design visual augmentations of the hand and delocalized haptic feedback, based on the literature, and evaluate them in user studies.
|
Our approach was to design visual augmentations of the hand and delocalized haptic feedback, based on the literature, and evaluate them in user studies.
|
||||||
We first considered \textbf{(1) the visual augmentation of the hand} and then the \textbf{(2)} combination of different \textbf{visuo-haptic feedback of the hand when manipulating virtual objects}.
|
We first considered \textbf{(1) the visual augmentation of the hand} and then the \textbf{(2)} combination of different \textbf{visuo-haptic feedback of the hand when manipulating virtual objects}.
|
||||||
|
|
||||||
@@ -60,12 +60,16 @@ In addition, combination with pseudo-haptic rendering techniques \cite{ujitoko20
|
|||||||
|
|
||||||
\paragraph{Fully Integrated Tracking.}
|
\paragraph{Fully Integrated Tracking.}
|
||||||
|
|
||||||
In our system, we registered the real and virtual environments (\secref[related_work]{ar_definition}) using fiducial markers and a webcam external to the \AR headset.
|
In our system, we registered the \RE and the \VE using fiducial markers and a webcam external to the \AR headset.
|
||||||
This only allowed us to estimate poses of the index finger and the surface to be augmented with the haptic texture, but it was reliable and accurate enough for our needs.
|
This only allowed us to estimate poses of the index finger and the surface to be augmented with the haptic texture, but it was reliable and accurate enough for our needs.
|
||||||
In fact, preliminary tests we conducted showed that the built-in tracking capabilities of the Microsoft HoloLens~2 were not able to track hands wearing a vibrotactile voice-coil device.
|
In fact, preliminary tests we conducted showed that the built-in tracking capabilities of the HoloLens~2 were not able to track hands wearing a vibrotactile voice-coil device.
|
||||||
A more robust hand pose estimation system would support wearing haptic devices on the hand as well as holding real objects.
|
A more robust hand pose estimation system would support wearing haptic devices on the hand as well as holding real objects.
|
||||||
|
|
||||||
|
The spatial registration error \cite{grubert2018survey} and the temporal latency \cite{diluca2019perceptual} between the \RE and \VE should also be reduced to be imperceptible.
|
||||||
|
The effect of these spatial and temporal errors on the perception and manipulation of the virtual object should be systematically investigated.
|
||||||
|
Prediction of hand movements should also be considered to overcome such issues \cite{klein2020predicting,gamage2021predictable}.
|
||||||
|
|
||||||
A complementary solution would be to embed tracking sensors in the wearable haptic devices, such as an inertial measurement unit (IMU) or cameras \cite{preechayasomboon2021haplets}.
|
A complementary solution would be to embed tracking sensors in the wearable haptic devices, such as an inertial measurement unit (IMU) or cameras \cite{preechayasomboon2021haplets}.
|
||||||
Prediction of hand movements should also be considered \cite{klein2020predicting,gamage2021predictable}.
|
|
||||||
This would allow a complete portable and wearable visuo-haptic system to be used in practical applications.
|
This would allow a complete portable and wearable visuo-haptic system to be used in practical applications.
|
||||||
|
|
||||||
\subsection*{Perception of Haptic Texture Augmentation in Augmented and Virtual Reality}
|
\subsection*{Perception of Haptic Texture Augmentation in Augmented and Virtual Reality}
|
||||||
@@ -75,7 +79,7 @@ This would allow a complete portable and wearable visuo-haptic system to be used
|
|||||||
In our user study, we assessed the effect of touching a vibrotactile texture augmentation with a real hand or a virtual hand, in \AR or \VR.
|
In our user study, we assessed the effect of touching a vibrotactile texture augmentation with a real hand or a virtual hand, in \AR or \VR.
|
||||||
To control for the visual feedback, we decided not to display the virtual texture so that participants only saw and touched a uniform white real surface.
|
To control for the visual feedback, we decided not to display the virtual texture so that participants only saw and touched a uniform white real surface.
|
||||||
The visual information of a texture is as important as the haptic sensations for the perception of roughness, and the interaction between the two to form the overall texture perception is complex \cite{bergmanntiest2007haptic,yanagisawa2015effects,vardar2019fingertip}.
|
The visual information of a texture is as important as the haptic sensations for the perception of roughness, and the interaction between the two to form the overall texture perception is complex \cite{bergmanntiest2007haptic,yanagisawa2015effects,vardar2019fingertip}.
|
||||||
In particular, it remains to be investigated how the vibrotactile patterned textures we employed can be represented visually in a convincing way, as the visuo-haptic coupling of such virtual patterned textures is not trivial \cite{unger2011roughness}. % even with real textures \cite{klatzky2003feeling}.
|
In particular, it remains to be investigated how the vibrotactile patterned textures we employed can be represented visually in a convincing way, as the visuo-haptic coupling of such virtual patterned textures is not trivial \cite{unger2011roughness}.
|
||||||
|
|
||||||
\paragraph{Broader Visuo-Haptic Conditions.}
|
\paragraph{Broader Visuo-Haptic Conditions.}
|
||||||
|
|
||||||
@@ -96,13 +100,15 @@ The dynamic response of the finger should also be considered, and may vary betwe
|
|||||||
As in the previous chapter, our aim was not to accurately reproduce real textures, but to alter the perception of a real surface being touched with simultaneous visual and haptic texture augmentations.
|
As in the previous chapter, our aim was not to accurately reproduce real textures, but to alter the perception of a real surface being touched with simultaneous visual and haptic texture augmentations.
|
||||||
However, the results also have some limitations, as they addressed a small set of visuo-haptic textures that augmented the perception of smooth and white real surfaces.
|
However, the results also have some limitations, as they addressed a small set of visuo-haptic textures that augmented the perception of smooth and white real surfaces.
|
||||||
Visuo-haptic texture augmentation might be difficult on surfaces that already have strong visual or haptic patterns \cite{asano2012vibrotactile}, or on objects with complex shapes.
|
Visuo-haptic texture augmentation might be difficult on surfaces that already have strong visual or haptic patterns \cite{asano2012vibrotactile}, or on objects with complex shapes.
|
||||||
The role of visuo-haptic texture augmentation should also be evaluated in more complex tasks, such as object recognition and assembly, or in more concrete use cases, such as displaying and touching a museum object or a 3D printed object before it is manufactured.
|
A real surface could be indeed augmented not only to add visuo-haptic textures, but also to amplify, diminish, mask, or replace the existing real texture.
|
||||||
|
In addition, the visual textures used were simple color images not intended for use in an \ThreeD \VE, and enhancing their visual quality could improve the perception of visuo-haptic texture augmentation.
|
||||||
%Finally, the visual textures used were also simple color captures not meant to be used in an immersive \VE.
|
It would also be interesting to replicate the experiment in more controlled visuo-haptic environments, in \VR or with world-grounded haptic devices.
|
||||||
|
This would enable to better understand how the rendering quality, spatial registration and latency of virtual textures can affect their perception.
|
||||||
|
Finally, the role of visuo-haptic texture augmentation should also be evaluated in more complex tasks, such as object recognition and assembly, or in more concrete use cases, such as displaying and touching a museum object or a 3D printed object before it is manufactured.
|
||||||
|
|
||||||
\paragraph{Specificities of Direct Touch.}
|
\paragraph{Specificities of Direct Touch.}
|
||||||
|
|
||||||
The haptic textures used were recordings and models of the vibrations of a hand-held probe sliding over real surfaces.
|
The haptic textures used were recordings and models of the vibrations of a hand-held probe sliding over real surfaces \cite{culbertson2014one}.
|
||||||
We generated the vibrotactile textures from velocity magnitude of the finger, but the perceived roughness of real textures also depends on other factors such as the contact force, angle, posture or surface of the contact \cite{schafer2017transfer}.
|
We generated the vibrotactile textures from velocity magnitude of the finger, but the perceived roughness of real textures also depends on other factors such as the contact force, angle, posture or surface of the contact \cite{schafer2017transfer}.
|
||||||
The respective importance of these factors on the haptic texture perception is not yet fully understood \cite{richardson2022learning}.
|
The respective importance of these factors on the haptic texture perception is not yet fully understood \cite{richardson2022learning}.
|
||||||
It would be interesting to determine the importance of these factors on the perceived realism of virtual vibrotactile textures in the context of bare finger touch.
|
It would be interesting to determine the importance of these factors on the perceived realism of virtual vibrotactile textures in the context of bare finger touch.
|
||||||
@@ -111,18 +117,24 @@ Finally, the virtual texture models should also be adaptable to individual sensi
|
|||||||
|
|
||||||
\subsection*{Visual Augmentation of the Hand for Manipulating virtual objects in AR}
|
\subsection*{Visual Augmentation of the Hand for Manipulating virtual objects in AR}
|
||||||
|
|
||||||
\paragraph{AR Displays.}
|
\paragraph{Other AR Displays.}
|
||||||
|
|
||||||
The visual hand augmentations we evaluated were displayed on the Microsoft HoloLens~2, which is a common \OST-\AR headset \cite{hertel2021taxonomy}.
|
The visual hand augmentations we evaluated were displayed on the Microsoft HoloLens~2, which is a common \OST-\AR headset \cite{hertel2021taxonomy}.
|
||||||
We purposely chose this type of display because in \OST-\AR the lack of mutual occlusion between the hand and the virtual object is the most challenging to solve \cite{macedo2023occlusion}.
|
We purposely chose this type of display, because in \OST-\AR the lack of mutual occlusion between the hand and the virtual object is the most challenging to solve \cite{macedo2023occlusion}.
|
||||||
We therefore hypothesized that a visual hand augmentation would be more beneficial to users with this type of display.
|
We therefore hypothesized that a visual hand augmentation would be more beneficial to users with this type of display.
|
||||||
However, the user's visual perception and experience are different with other types of displays, such as \VST-\AR, where the \RE view is seen through cameras and screens (\secref[related_work]{ar_displays}).
|
However, the user's visual perception and experience are different with other types of displays, such as \VST-\AR, where the \RE view is seen through cameras and screens (\secref[related_work]{ar_displays}).
|
||||||
While the mutual occlusion problem and the hand pose estimation latency could be overcome with \VST-\AR, the visual hand augmentation could still be beneficial to users as it provides depth cues and feedback on the hand tracking, and should be evaluated as such.
|
In particular, the mutual occlusion problem and the latency of hand pose estimation could be overcome with a \VST-\AR headset.
|
||||||
|
In this case, the occlusion rendering could be the most natural, realistic and effective augmentation.
|
||||||
|
Yet, a visual hand augmentation could still be beneficial to users by providing depth cues and feedback on hand tracking, and should be evaluated as such.
|
||||||
|
|
||||||
\paragraph{More Practical Usages.}
|
\paragraph{More Practical Usages.}
|
||||||
|
|
||||||
We conducted the user study with two manipulation tasks that involved placing a virtual cube in a target volume, either by pushing it on a table or by grasping and lifting it.
|
We conducted the user study with two manipulation tasks that involved placing a virtual cube in a target volume, either by pushing it on a table or by grasping and lifting it.
|
||||||
These tasks are indeed fundamental building blocks for more complex manipulation tasks \cite[p.390]{laviolajr20173d}, such as stacking or assembly, which should also be considered. %, more ecological applications should be considered.
|
These tasks are indeed fundamental building blocks for more complex manipulation tasks \cite[p.390]{laviolajr20173d} such as stacking or assembling, which should be investigated as well.
|
||||||
|
They can indeed require users to perform more complex finger movements and interactions with the virtual object.
|
||||||
|
Depending on the task, the importance of position, orientation and depth information of the hand and the object may vary and affect the choice of visual hand augmentation.
|
||||||
|
More practical applications should also be considered, such as medical, educational or industrial scenarios, which may have different needs and constraints (\eg, the most natural visual hand augmentation for a medical application, or the easiest to understand and use for an educational context).
|
||||||
|
|
||||||
Similarly, a broader experimental study might shed light on the role of gender and age, as our subject pool was not sufficiently diverse in this regard.
|
Similarly, a broader experimental study might shed light on the role of gender and age, as our subject pool was not sufficiently diverse in this regard.
|
||||||
Finally, all visual hand augmentations received low and high rank rates from different participants, suggesting that users should be able to choose and personalize some aspects of the visual hand augmentation according to their preferences or needs, and this should also be evaluated.
|
Finally, all visual hand augmentations received low and high rank rates from different participants, suggesting that users should be able to choose and personalize some aspects of the visual hand augmentation according to their preferences or needs, and this should also be evaluated.
|
||||||
|
|
||||||
@@ -145,7 +157,8 @@ It remains to be explored how to support rendering for different and larger area
|
|||||||
|
|
||||||
\section{Perspectives}
|
\section{Perspectives}
|
||||||
|
|
||||||
Our goal was to improve direct hand interaction with virtual objects using wearable haptic devices in immersive \AR by providing more plausible and coherent perception and more natural and effective manipulation of the visuo-haptic augmentations.
|
Our goal was to improve direct hand interaction with virtual objects using wearable haptic devices and an \OST-\AR headset.
|
||||||
|
We aimed to provide more plausible and coherent perception and more natural and effective manipulation of the visuo-haptic augmentations.
|
||||||
Our contributions have enabled progress towards a seamless integration of the virtual into the real world.
|
Our contributions have enabled progress towards a seamless integration of the virtual into the real world.
|
||||||
They also allow us to outline longer-term research perspectives.
|
They also allow us to outline longer-term research perspectives.
|
||||||
|
|
||||||
@@ -154,7 +167,7 @@ They also allow us to outline longer-term research perspectives.
|
|||||||
We saw how complex the sense of touch is (\secref[related_work]{haptic_hand}).
|
We saw how complex the sense of touch is (\secref[related_work]{haptic_hand}).
|
||||||
Multiple sensory receptors all over the skin allow us to perceive different properties of objects, such as their texture, temperature, weight or shape.
|
Multiple sensory receptors all over the skin allow us to perceive different properties of objects, such as their texture, temperature, weight or shape.
|
||||||
Particularly concentrated in the hands, cutaneous sensory feedback, together with the muscles, is crucial for grasping and manipulating objects.
|
Particularly concentrated in the hands, cutaneous sensory feedback, together with the muscles, is crucial for grasping and manipulating objects.
|
||||||
In this manuscript, we showed how wearable haptic devices can provide virtual tactile sensations to support direct hand interaction in immersive \AR.
|
In this manuscript, we showed how wearable haptic devices can provide virtual tactile sensations to support direct hand interaction with an \OST-\AR headset.
|
||||||
We investigated both the visuo-haptic perception of texture augmenting real surfaces (\partref{perception}) and the manipulation of virtual objects with visuo-haptic feedback of hand contact with virtual objects (\partref{manipulation}).
|
We investigated both the visuo-haptic perception of texture augmenting real surfaces (\partref{perception}) and the manipulation of virtual objects with visuo-haptic feedback of hand contact with virtual objects (\partref{manipulation}).
|
||||||
|
|
||||||
However, unlike the visual sense, which can be fully immersed in the virtual using an \AR/\VR headset, there is no universal wearable haptic device that can reproduce all the haptic properties perceived by the hand (\secref[related_work]{wearable_haptics}).
|
However, unlike the visual sense, which can be fully immersed in the virtual using an \AR/\VR headset, there is no universal wearable haptic device that can reproduce all the haptic properties perceived by the hand (\secref[related_work]{wearable_haptics}).
|
||||||
@@ -169,20 +182,17 @@ It would therefore be interesting to determine which wearable haptic augmentatio
|
|||||||
Similar user studies could then be conducted, to reproduce as many haptic properties as possible in virtual object discrimination tasks.
|
Similar user studies could then be conducted, to reproduce as many haptic properties as possible in virtual object discrimination tasks.
|
||||||
These results would enable the design of more universal wearable haptic devices that provide rich haptic feedback that best meets users' needs for interaction in \AR and \VR.
|
These results would enable the design of more universal wearable haptic devices that provide rich haptic feedback that best meets users' needs for interaction in \AR and \VR.
|
||||||
|
|
||||||
% systematic exploration of the parameter space of the haptic rendering to determine the most important parameters their influence on the perception
|
|
||||||
% measure the difference in sensitivity to the haptic feedback and how much it affects the perception of the object properties
|
|
||||||
|
|
||||||
\subsection*{Responsive Visuo-Haptic Augmented Reality}
|
\subsection*{Responsive Visuo-Haptic Augmented Reality}
|
||||||
|
|
||||||
We reviewed the diversity of \AR and \VR reality displays and their respective characteristics in rendering (\secref[related_work]{ar_displays}) and the manipulation of virtual content with the hand (\chapref{visual_hand}).
|
We reviewed the diversity of \AR and \VR reality displays and their respective characteristics in rendering (\secref[related_work]{ar_displays}) and the manipulation of virtual content with the hand (\chapref{visual_hand}).
|
||||||
The diversity of wearable haptic devices and the different sensations they can provide is even more important (\secref[related_work]{wearable_haptics}) and an active research topic \cite{pacchierotti2017wearable}.
|
The diversity of wearable haptic devices and the different sensations they can provide is even more important (\secref[related_work]{wearable_haptics}) and an active research topic \cite{pacchierotti2017wearable}.
|
||||||
Coupling wearable haptics with immersive \AR also requires the haptic actuator to be placed on the body other than at the hand contact points (\secref[related_work]{vhar_haptics}).
|
Coupling wearable haptics with \AR headsets also requires the haptic actuator to be placed on the body other than at the hand contact points (\secref[related_work]{vhar_haptics}).
|
||||||
In particular, in this thesis we investigated the perception of haptic texture augmentation using a vibrotactile device on the median phalanx (\chapref{vhar_system}) and also compared different positions of the haptics on the hand for manipulating virtual objects (\chapref{visuo_haptic_hand}).
|
In particular, in this thesis we investigated the perception of haptic texture augmentation using a vibrotactile device on the median phalanx (\chapref{vhar_system}) and also compared different positions of the haptics on the hand for manipulating virtual objects (\chapref{visuo_haptic_hand}).
|
||||||
|
|
||||||
Haptic feedback should be provided close to the point of contact of the hand with the virtual, to enhance the realism of texture augmentation (\chapref{vhar_textures}) and to render contact with virtual objects (\chapref{visuo_haptic_hand}), \eg rendering fingertip contact with a haptic ring worn on the middle or proximal phalanx.
|
Haptic feedback should be provided close to the point of contact of the hand with the virtual, to enhance the realism of texture augmentation (\chapref{vhar_textures}) and to render contact with virtual objects (\chapref{visuo_haptic_hand}), \eg rendering fingertip contact with a haptic ring worn on the middle or proximal phalanx.
|
||||||
However, the task at hand, the user's sensitivity and preferences, the limitations of the tracking system, or the ergonomics of the haptic device may require the use of other form factors and positions, such as the wrist or arm.
|
However, the task at hand, the user's sensitivity and preferences, the limitations of the tracking system, or the ergonomics of the haptic device may require the use of other form factors and positions, such as the wrist or arm.
|
||||||
Similarly, collaborative and transitional experiences between \AR and \VR are becoming more common, and would involve different setups and modalities \cite{roo2017onea}.
|
Similarly, collaborative and transitional experiences between \AR and \VR are becoming more common, and would involve different setups and modalities \cite{roo2017onea}.
|
||||||
Novel \AR/\VR displays are already capable of transitioning from augmented to virtual environments \cite{feld2024simple}, and haptic feedback should also adapt to these transitions (\chapref{xr_perception}).
|
Novel \AR/\VR displays are already capable of transitioning from \AE to \VE \cite{feld2024simple}, and haptic feedback should also adapt to these transitions (\chapref{xr_perception}).
|
||||||
|
|
||||||
Therefore, a visuo-haptic augmented reality system should be able to adapt to any \AR/\VR display, any wearable haptic device worn anywhere on the body, and support personalization of haptic feedback.
|
Therefore, a visuo-haptic augmented reality system should be able to adapt to any \AR/\VR display, any wearable haptic device worn anywhere on the body, and support personalization of haptic feedback.
|
||||||
In other words, the visuo-haptic rendering system should be designed to be responsive to the context of use.
|
In other words, the visuo-haptic rendering system should be designed to be responsive to the context of use.
|
||||||
@@ -192,11 +202,3 @@ Methods should also be developed to allow the user to easily adjust the haptic f
|
|||||||
Finally, more practical use cases and applications of visuo-haptic \AR should be explored and evaluated.
|
Finally, more practical use cases and applications of visuo-haptic \AR should be explored and evaluated.
|
||||||
For example, capturing visuo-haptic perceptions of objects and sharing them in a visioconference with different \AR or \VR setups per participant, or in a medical teleconsultation.
|
For example, capturing visuo-haptic perceptions of objects and sharing them in a visioconference with different \AR or \VR setups per participant, or in a medical teleconsultation.
|
||||||
It could also be projecting and touching a visuo-haptic sample in a real wall for interior design, then switching to \VR to see and touch the complete final result, or manipulating a museum object with a real proxy object in visuo-haptic \AR, or even as it was in the past in its original state in \VR.
|
It could also be projecting and touching a visuo-haptic sample in a real wall for interior design, then switching to \VR to see and touch the complete final result, or manipulating a museum object with a real proxy object in visuo-haptic \AR, or even as it was in the past in its original state in \VR.
|
||||||
%Another example could be a medical teleconsultation, where the doctor could palpate a distant patient with haptic augmentation, but in \VR.
|
|
||||||
|
|
||||||
% design, implement and validate procedures to automatically calibrate the haptic feedback to the user's perception in accordance to what it has been designed to represent
|
|
||||||
% + let user free to easily adjust (eg can't let adjust whole spectrum of vibrotactile, reduce to two or three dimensions with sliders using MDS)
|
|
||||||
|
|
||||||
%- Visio en réalité mixte : ar avec avatars distants, vr pour se retrouver dans l'espace de l'autre ou un espace distant, et besoin de se faire toucher des objets à distance
|
|
||||||
%- Ou bien en cours, voir l'échantillon à toucher dans lenv de travail ou en contexte en passant en VR
|
|
||||||
%- Ex : médecin palpation, design d'un objet, rénovation d'un logement (AR en contexte courant, VR pour voir et toucher une fois terminé)
|
|
||||||
|
|||||||
Binary file not shown.
Binary file not shown.
@@ -6,17 +6,14 @@
|
|||||||
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
||||||
\enquote{Visuo-Haptic Rendering of the Hand during 3D Manipulation in Augmented Reality}.
|
\enquote{Visuo-Haptic Rendering of the Hand during 3D Manipulation in Augmented Reality}.
|
||||||
In: \textit{IEEE Transactions on Haptics (ToH)}. 27.4 (2024), pp. 2481--2487.
|
In: \textit{IEEE Transactions on Haptics (ToH)}. 27.4 (2024), pp. 2481--2487.
|
||||||
%\textsc{doi}: \href{https://doi.org/10/gtqcfz}{10/gtqcfz}
|
|
||||||
|
|
||||||
\section*{International Conferences}
|
\section*{International Conferences}
|
||||||
|
|
||||||
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
||||||
\enquote{Augmenting the Texture Perception of Tangible Surfaces in Augmented Reality using Vibrotactile Haptic Stimuli}.
|
\enquote{Augmenting the Texture Perception of Tangible Surfaces in Augmented Reality using Vibrotactile Haptic Stimuli}.
|
||||||
In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484.
|
In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484.
|
||||||
%\textsc{doi}: \href{https://doi.org/}{}
|
|
||||||
|
|
||||||
\noindentskip
|
\noindentskip
|
||||||
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
||||||
\enquote{How Different Is the Perception of Vibrotactile Texture Roughness in Augmented versus Virtual Reality?}.
|
\enquote{How Different Is the Perception of Vibrotactile Texture Roughness in Augmented versus Virtual Reality?}.
|
||||||
In: \textit{ACM Symposium on Virtual Reality Software and Technology (VRST)}. Trier, Germany, October 2024. pp. 287--296.
|
In: \textit{ACM Symposium on Virtual Reality Software and Technology (VRST)}. Trier, Germany, October 2024. pp. 287--296.
|
||||||
%\textsc{doi}: \href{https://doi.org/10/g5rr49}{10/g5rr49}
|
|
||||||
|
|||||||
@@ -3,13 +3,6 @@
|
|||||||
|
|
||||||
\nochaptertoc
|
\nochaptertoc
|
||||||
|
|
||||||
% Résumé vulgarisé en 700 caractères
|
|
||||||
% Les dispositifs haptiques portables procurent des sensations tactiles tout en étant compacts.
|
|
||||||
% Mais ils ont été peu utilisés en réalité augmentée (RA), où le contenu virtuel est intégré à la perception du monde réel.
|
|
||||||
% Cette thèse explore leur potentiel pour améliorer les interactions de la main avec des objets virtuels en RA.
|
|
||||||
% Nous étudions d'abord l'impact du rendu visuel sur la perception des textures vibrotactiles virtuelles qui augmentent des surfaces réelles touchées directement par le doigt.
|
|
||||||
% Nous étudions ensuite comment des retours sensoriels visuo-haptiques augmentant la main améliorent les performances et l’expérience utilisateur lors de la manipulation d'objets virtuels en RA.
|
|
||||||
|
|
||||||
\selectlanguage{french}
|
\selectlanguage{french}
|
||||||
|
|
||||||
Dans ce manuscrit de thèse, nous montrons comment la \emph{réalité augmentée (RA)}, qui intègre un contenu visuel virtuel dans la perception du monde réel, et l'\emph{haptique portable}, qui fournit des sensations tactiles sur la peau, peuvent améliorer les interactions de la main avec des objets virtuels et augmentés.
|
Dans ce manuscrit de thèse, nous montrons comment la \emph{réalité augmentée (RA)}, qui intègre un contenu visuel virtuel dans la perception du monde réel, et l'\emph{haptique portable}, qui fournit des sensations tactiles sur la peau, peuvent améliorer les interactions de la main avec des objets virtuels et augmentés.
|
||||||
@@ -33,16 +26,16 @@ Cette technologie est étroitement liée à la \emph{réalité virtuelle (RV)},
|
|||||||
La virtualité et l'augmentation peuvent être visuelles ou haptiques.
|
La virtualité et l'augmentation peuvent être visuelles ou haptiques.
|
||||||
Ainsi, un dispositif haptique fournit des sensations dites virtuelles à un utilisateur.
|
Ainsi, un dispositif haptique fournit des sensations dites virtuelles à un utilisateur.
|
||||||
Une \textbf{augmentation haptique est la modification de la perception par l'ajout de sensations haptiques virtuelles} d'un objet réel touché par un utilisateur \cite{jeon2015haptic,bhatia2024augmenting}.
|
Une \textbf{augmentation haptique est la modification de la perception par l'ajout de sensations haptiques virtuelles} d'un objet réel touché par un utilisateur \cite{jeon2015haptic,bhatia2024augmenting}.
|
||||||
Un aspect important de l'illusion de la RA (et de la RV) est la \emph{plausibilité}, c'est-à-dire l'illusion pour un utilisateur que les événements virtuels se produisent vraiment \cite{slater2022separate}. %, même si l'utilisateur sait qu'ils ne sont pas réels.
|
Un aspect important de l'illusion de la RA (et de la RV) est la \emph{plausibilité}, c'est-à-dire l'illusion pour un utilisateur que les événements virtuels se produisent vraiment \cite{slater2022separate}.
|
||||||
|
|
||||||
Dans ce contexte, nous définissons un \emph{système de RA} comme l'ensemble des dispositifs matériels (dispositifs d'entrée, capteurs, affichages et dispositifs haptiques) et logiciels (suivi, simulation et rendu) qui permettent à l'utilisateur d'interagir avec l'environnement augmenté.
|
Dans ce contexte, nous définissons un \emph{système de RA} comme l'ensemble des dispositifs matériels (dispositifs d'entrée, capteurs, affichages et dispositifs haptiques) et logiciels (suivi, simulation et rendu) qui permettent à l'utilisateur d'interagir avec l'environnement augmenté.
|
||||||
Les casques de RA sont la technologie d'affichage la plus prometteuse, car ils sont portables, fournissent à l'utilisateur un environnement augmenté \emph{immersif} et laissent les mains libres pour interagir \cite{hertel2021taxonomy}.
|
Les visiocasques de RA sont la technologie d'affichage la plus prometteuse, car ils sont portables, fournissent à l'utilisateur un environnement augmenté \emph{immersif} et laissent les mains libres pour interagir \cite{hertel2021taxonomy}.
|
||||||
Un retour haptique est alors indispensable pour assurer une interaction plausible et cohérente avec le contenu visuel virtuel.
|
Un retour haptique est alors indispensable pour assurer une interaction plausible et cohérente avec le contenu visuel virtuel.
|
||||||
C'est pourquoi l'haptique portable semble particulièrement adaptée à la RA immersive.
|
C'est pourquoi l'haptique portable semble particulièrement adaptée aux visiocasques de RA.
|
||||||
|
|
||||||
\subsectionstarbookmark{Défis de la réalité augmentée visuo-haptique portable}
|
\subsectionstarbookmark{Défis de la réalité augmentée visuo-haptique portable}
|
||||||
|
|
||||||
L'intégration de l'haptique portable avec la RA immersive pour créer un environnement augmenté visuo-haptique est cependant complexe et présente de nombreux défis.
|
L'intégration de l'haptique portable avec un visiocasque de RA pour créer un environnement augmenté visuo-haptique est cependant complexe et présente de nombreux défis.
|
||||||
Nous proposons de représenter l'expérience de l'utilisateur dans un tel environnement comme une boucle d'interaction, illustrée sur la \figref{interaction-loop-fr} et basée sur les boucles d'interaction avec les systèmes 3D \cite[p.84]{laviolajr20173d}.
|
Nous proposons de représenter l'expérience de l'utilisateur dans un tel environnement comme une boucle d'interaction, illustrée sur la \figref{interaction-loop-fr} et basée sur les boucles d'interaction avec les systèmes 3D \cite[p.84]{laviolajr20173d}.
|
||||||
Un utilisateur interagit avec les environnements virtuels visuels et haptiques via une main virtuelle qui suit ses mouvements et simule l'interaction avec des objets virtuels.
|
Un utilisateur interagit avec les environnements virtuels visuels et haptiques via une main virtuelle qui suit ses mouvements et simule l'interaction avec des objets virtuels.
|
||||||
Les environnements virtuels sont rendus en retour à l'utilisateur avec un casque de RA immersif et de l'haptique portable, qui les perçoit comme co-localisés avec l'environnement réel.
|
Les environnements virtuels sont rendus en retour à l'utilisateur avec un casque de RA immersif et de l'haptique portable, qui les perçoit comme co-localisés avec l'environnement réel.
|
||||||
@@ -80,8 +73,8 @@ Nous proposons donc pour ce premier axe de recherche de concevoir des augmentati
|
|||||||
Pour cela, nous~: \textbf{(1) concevons un système d'augmentation visuo-haptique de textures} avec de l'haptique vibrotactile portable~; \textbf{(2) évaluons comment la perception des augmentations haptiques portables de textures est affectée par le retour visuel de la main virtuelle et de l'environnement}~; \textbf{(3) étudions la perception d'augmentations visuelles et haptiques portables de textures}.
|
Pour cela, nous~: \textbf{(1) concevons un système d'augmentation visuo-haptique de textures} avec de l'haptique vibrotactile portable~; \textbf{(2) évaluons comment la perception des augmentations haptiques portables de textures est affectée par le retour visuel de la main virtuelle et de l'environnement}~; \textbf{(3) étudions la perception d'augmentations visuelles et haptiques portables de textures}.
|
||||||
Ces contributions sont détaillées dans la \secref{perception}.
|
Ces contributions sont détaillées dans la \secref{perception}.
|
||||||
|
|
||||||
Les limitations de rendu de la RA immersive et de l'haptique portable rendent difficile la manipulation d'objets virtuels directement avec la main.
|
Les limitations de rendu des visiocasques de RA et de l'haptique portable rendent difficile la manipulation d'objets virtuels directement avec la main.
|
||||||
Deux retours sensoriels peuvent améliorer cette manipulation, mais n'ont pas été étudiés en RA immersive: le retour visuel de la main virtuelle \cite{prachyabrued2014visual} et le retour haptique relocalisé sur la main \cite{teng2021touch}.
|
Deux retours sensoriels peuvent améliorer cette manipulation, mais n'ont pas été étudiés en RA: le retour visuel de la main virtuelle \cite{prachyabrued2014visual} et le retour haptique relocalisé sur la main \cite{teng2021touch}.
|
||||||
Pour ce second axe de recherche, nous proposons de concevoir des augmentations visuo-haptiques de la main comme des retours sensoriels aux interactions avec les objets virtuels.
|
Pour ce second axe de recherche, nous proposons de concevoir des augmentations visuo-haptiques de la main comme des retours sensoriels aux interactions avec les objets virtuels.
|
||||||
Pour cela, nous étudions l'effet sur la performance et l'expérience de l'utilisateur du \textbf{(1) retour visuel de la main virtuelle en tant qu'augmentation de la main réelle} et de \textbf{(2) différentes relocalisations du retour haptique} avec de l'haptique vibrotactile portable comme retour des contacts de la main avec les objets virtuels, et ce, \textbf{en combinaison avec des augmentations visuelles de la main}.
|
Pour cela, nous étudions l'effet sur la performance et l'expérience de l'utilisateur du \textbf{(1) retour visuel de la main virtuelle en tant qu'augmentation de la main réelle} et de \textbf{(2) différentes relocalisations du retour haptique} avec de l'haptique vibrotactile portable comme retour des contacts de la main avec les objets virtuels, et ce, \textbf{en combinaison avec des augmentations visuelles de la main}.
|
||||||
Ces contributions sont détaillées dans la \secref{manipulation}.
|
Ces contributions sont détaillées dans la \secref{manipulation}.
|
||||||
@@ -97,7 +90,7 @@ Une approche efficace pour créer une texture haptique consiste à générer un
|
|||||||
Les vibrations sont générées par un dispositif vibrotactile de type \textit{voice-coil}, qui permet un contrôle indépendant de la fréquence et de l'amplitude du signal.
|
Les vibrations sont générées par un dispositif vibrotactile de type \textit{voice-coil}, qui permet un contrôle indépendant de la fréquence et de l'amplitude du signal.
|
||||||
Ce dispositif est placé dans un outil tenu en main ou directement attaché sur le doigt.
|
Ce dispositif est placé dans un outil tenu en main ou directement attaché sur le doigt.
|
||||||
Lorsqu'elles sont jouées en touchant une surface réelle, ces vibrations augmentent la rugosité perçue, c'est-à-dire les micro-aspérités de la surface \cite{culbertson2015should}.
|
Lorsqu'elles sont jouées en touchant une surface réelle, ces vibrations augmentent la rugosité perçue, c'est-à-dire les micro-aspérités de la surface \cite{culbertson2015should}.
|
||||||
Cependant, cette méthode n'a pas encore été intégrée dans un contexte de RA immersive.
|
Cependant, cette méthode n'a pas encore été intégrée avec un visiocasque de RA.
|
||||||
|
|
||||||
\begin{subfigs}{vhar-system}{Notre système d'augmentation visuo-haptique portable de textures. }[][
|
\begin{subfigs}{vhar-system}{Notre système d'augmentation visuo-haptique portable de textures. }[][
|
||||||
\item Le dispositif vibrotactile de type \textit{voice-coil} HapCoil-One, muni d'un marqueur de suivi, et attaché à la phalange moyenne de l'index de l'utilisateur.
|
\item Le dispositif vibrotactile de type \textit{voice-coil} HapCoil-One, muni d'un marqueur de suivi, et attaché à la phalange moyenne de l'index de l'utilisateur.
|
||||||
@@ -108,7 +101,7 @@ Cependant, cette méthode n'a pas encore été intégrée dans un contexte de RA
|
|||||||
\subfig{vhar-system-apparatus-fr}
|
\subfig{vhar-system-apparatus-fr}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
Nous proposons un \textbf{système de rendu de textures virtuelles visuelles et haptiques}, portables et immersives, qui augmentent les surfaces réelles.
|
Nous proposons un \textbf{système de rendu de textures virtuelles visuelles et haptiques} qui augmentent les surfaces réelles.
|
||||||
Pour cela, nous utilisons le casque de réalité augmentée immersif Microsoft HoloLens~2 et un dispositif vibrotactile portable porté sur la phalange médiane (\figref{vhar-system-device-fr}).
|
Pour cela, nous utilisons le casque de réalité augmentée immersif Microsoft HoloLens~2 et un dispositif vibrotactile portable porté sur la phalange médiane (\figref{vhar-system-device-fr}).
|
||||||
Les augmentations visuo-haptiques peuvent être \textbf{visualisées sous n'importe quel angle} et \textbf{explorées librement avec le doigt}, comme s'il s'agissait de textures réelles.
|
Les augmentations visuo-haptiques peuvent être \textbf{visualisées sous n'importe quel angle} et \textbf{explorées librement avec le doigt}, comme s'il s'agissait de textures réelles.
|
||||||
|
|
||||||
@@ -124,7 +117,6 @@ Ce système constitue la base expérimentale pour les deux prochaines études d'
|
|||||||
\subsectionstarbookmark{Effet du retour visuel de la main virtuelle sur la perception d'augmen-tation haptique portable de texture}
|
\subsectionstarbookmark{Effet du retour visuel de la main virtuelle sur la perception d'augmen-tation haptique portable de texture}
|
||||||
|
|
||||||
La plupart des augmentations haptiques avec de l'haptique portable, comme celles dans notre système d'augmentation de textures ci-dessus, ont été étudiées sans retour visuel, ni dans des environnements immersifs de RA ou de RV \cite{culbertson2017importance,friesen2024perceived}.
|
La plupart des augmentations haptiques avec de l'haptique portable, comme celles dans notre système d'augmentation de textures ci-dessus, ont été étudiées sans retour visuel, ni dans des environnements immersifs de RA ou de RV \cite{culbertson2017importance,friesen2024perceived}.
|
||||||
%Plus particulièrement, il n'a été pris en compte l'influence du rendu visuel sur leur perception.
|
|
||||||
Pourtant, le retour visuel peut modifier la perception de sensation haptiques réelles et virtuelles \cite{schwind2018touch,choi2021augmenting}, et la perception d'un même stimuli d'un dispositif haptique à retour de force haptiques peut différer entre RA et RV \cite{diluca2011effects,gaffary2017ar}.
|
Pourtant, le retour visuel peut modifier la perception de sensation haptiques réelles et virtuelles \cite{schwind2018touch,choi2021augmenting}, et la perception d'un même stimuli d'un dispositif haptique à retour de force haptiques peut différer entre RA et RV \cite{diluca2011effects,gaffary2017ar}.
|
||||||
|
|
||||||
\begin{subfigs}{xr-perception}{Conditions expérimentales de l'étude utilisateur. }[][
|
\begin{subfigs}{xr-perception}{Conditions expérimentales de l'étude utilisateur. }[][
|
||||||
@@ -150,7 +142,6 @@ Nous faisons l'hypothèse que cette différence de perception a été causée à
|
|||||||
|
|
||||||
Cette étude suggère qu'il est nécessaire de veiller aux différences latences de chaque boucle de rétroaction sensorielle, visuelle ou haptique, inhérentes à ces systèmes.
|
Cette étude suggère qu'il est nécessaire de veiller aux différences latences de chaque boucle de rétroaction sensorielle, visuelle ou haptique, inhérentes à ces systèmes.
|
||||||
Plus important encore, il convient d'estimer \emph{la perception de leur asynchronisme}.
|
Plus important encore, il convient d'estimer \emph{la perception de leur asynchronisme}.
|
||||||
%Nous pensons que ces latences doivent être mesurées, réduites à un niveau acceptable pour les utilisateurs et maintenues perceptuellement synchronisées entre elles.
|
|
||||||
Il semble que l'aspect visuel de la main ou de l'environnement n'ait eu directement que peu d'effet sur la perception du retour haptique, mais que le retour visuel de la main virtuelle puisse affecter la perception des latences visuo-haptiques, même si elles restent identiques.
|
Il semble que l'aspect visuel de la main ou de l'environnement n'ait eu directement que peu d'effet sur la perception du retour haptique, mais que le retour visuel de la main virtuelle puisse affecter la perception des latences visuo-haptiques, même si elles restent identiques.
|
||||||
|
|
||||||
Nous étudions ensuite des augmentations simultanées et co-localisées de textures visuelles et haptiques.
|
Nous étudions ensuite des augmentations simultanées et co-localisées de textures visuelles et haptiques.
|
||||||
@@ -192,7 +183,7 @@ Avec l'étude précédente, cela ouvre la voie à de nouvelles applications de R
|
|||||||
\label{manipulation}
|
\label{manipulation}
|
||||||
|
|
||||||
Pour ce second axe de recherche, nous proposons de concevoir et d'évaluer des retours sensoriels visuo-haptiques de la main et de ses interactions avec des objets virtuels.
|
Pour ce second axe de recherche, nous proposons de concevoir et d'évaluer des retours sensoriels visuo-haptiques de la main et de ses interactions avec des objets virtuels.
|
||||||
L'objectif est de faciliter la manipulation d'objets virtuels en RA immersive.
|
L'objectif est de faciliter la manipulation d'objets virtuels avec les visiocasques de RA.
|
||||||
|
|
||||||
\subsectionstarbookmark{Retour visuel de la main virtuelle en tant qu'augmentation de la main réelle}
|
\subsectionstarbookmark{Retour visuel de la main virtuelle en tant qu'augmentation de la main réelle}
|
||||||
|
|
||||||
@@ -228,7 +219,7 @@ Nos résultats montrent qu'une augmentation visuelle de la main a amélioré les
|
|||||||
Le rendu sous forme de squelette, qui fournit une vue détaillée des articulations et des phalanges suivies, mais sans cacher la main réelle, s'est avéré le plus performant et le plus efficace.
|
Le rendu sous forme de squelette, qui fournit une vue détaillée des articulations et des phalanges suivies, mais sans cacher la main réelle, s'est avéré le plus performant et le plus efficace.
|
||||||
Le rendu des contours et le rendu du modèle 3D ont partiellement masqué la main réelle, tandis que le rendu aux extrémités des doigts a reçu des avis partagés.
|
Le rendu des contours et le rendu du modèle 3D ont partiellement masqué la main réelle, tandis que le rendu aux extrémités des doigts a reçu des avis partagés.
|
||||||
Le rendu d'occultation avait une latence perçue de suivi de la main trop importante pour être efficace et compris par les participants.
|
Le rendu d'occultation avait une latence perçue de suivi de la main trop importante pour être efficace et compris par les participants.
|
||||||
Ces résultats sont cohérents avec ceux d'études antérieures sur la RV et les RA non immersives.
|
Ces résultats sont cohérents avec ceux d'études antérieures en RA et RV.
|
||||||
|
|
||||||
Cette étude suggère qu'une augmentation visuelle de la main en 3D est importante pour interagir avec une technique de main virtuelle en RA.
|
Cette étude suggère qu'une augmentation visuelle de la main en 3D est importante pour interagir avec une technique de main virtuelle en RA.
|
||||||
Cela semble particulièrement requis lorsque l'utilisateur doit effectuer des mouvements précis avec ses doigts sur le contenu virtuel, comme avec des fenêtres 3D, des boutons ou des curseurs, ou encore lors de tâches plus complexes, telles que l'empilage ou l'assemblage d'objets virtuels.
|
Cela semble particulièrement requis lorsque l'utilisateur doit effectuer des mouvements précis avec ses doigts sur le contenu virtuel, comme avec des fenêtres 3D, des boutons ou des curseurs, ou encore lors de tâches plus complexes, telles que l'empilage ou l'assemblage d'objets virtuels.
|
||||||
@@ -251,19 +242,18 @@ Cependant, il n'est pas clair quel placement de l'actionneur est le plus bénéf
|
|||||||
\subfig[.45]{visuo-haptic-hand-task-grasp-fr}
|
\subfig[.45]{visuo-haptic-hand-task-grasp-fr}
|
||||||
\end{subfigs}
|
\end{subfigs}
|
||||||
|
|
||||||
C'est pourquoi nous étudions le rôle du \textbf{retour visuo-haptique de la main lors de la manipulation d'objets virtuels} en RA immersive en utilisant de l'haptique vibrotactile portable.
|
C'est pourquoi nous étudions le rôle du \textbf{retour visuo-haptique de la main lors de la manipulation d'objets virtuels} avec un visiocasque de RA en utilisant de l'haptique vibrotactile portable.
|
||||||
Nous avons tout d'abord sélectionné \textbf{quatre placements du dispositif haptique sur la main} qui ont été proposées dans la littérature~: sur les ongles, les phalanges proximales, le poignet et les ongles de la main opposée (\figref{visuo-haptic-hand-locations-fr}).
|
Nous avons tout d'abord sélectionné \textbf{quatre placements du dispositif haptique sur la main} qui ont été proposées dans la littérature~: sur les ongles, les phalanges proximales, le poignet et les ongles de la main opposée (\figref{visuo-haptic-hand-locations-fr}).
|
||||||
Nous nous sommes concentrés sur le retour vibrotactile, car il est présent dans la plupart des dispositifs haptiques portables et est le moins encombrant.
|
Nous nous sommes concentrés sur le retour vibrotactile, car il est présent dans la plupart des dispositifs haptiques portables et est le moins encombrant.
|
||||||
Nous avons utilisé en pratique deux moteurs vibrotactiles de type ERM car ils sont les plus compacts et n'affectent pas le suivi de la main \cite{pacchierotti2016hring}, mais ils permettent seulement de contrôler l'amplitude du signal.
|
Nous avons utilisé en pratique deux moteurs vibrotactiles de type ERM car ils sont les plus compacts et n'affectent pas le suivi de la main \cite{pacchierotti2016hring}, mais ils permettent seulement de contrôler l'amplitude du signal.
|
||||||
Dans une \textbf{étude utilisateur}, avec 20 participants, nous avons évalué l'effet des quatre placements avec \textbf{deux techniques de vibration de contact} sur la performance et l'expérience utilisateur.
|
Dans une \textbf{étude utilisateur}, avec 20 participants, nous avons évalué l'effet des quatre placements avec \textbf{deux techniques de vibration de contact} sur la performance et l'expérience utilisateur.
|
||||||
Les participants ont effectué les deux mêmes tâches de manipulation que dans l'étude sur l'augmentation visuelle de la main (\figref{visuo-haptic-hand-task-grasp-fr}).
|
Les participants ont effectué les deux mêmes tâches de manipulation que dans l'étude sur l'augmentation visuelle de la main (\figref{visuo-haptic-hand-task-grasp-fr}).
|
||||||
Enfin, nous avons comparé ces rendus vibrotactiles avec le \textbf{l'augmentation visuelle des phalanges de la main} (\figref{../4-manipulation/visual-hand/figures/method/hands-skeleton}). % établi dans l'étude sur l'augmentation visuelle de la main comme un retour d'information visuo-haptique complémentaire de l'interaction de la main avec les objets virtuels.
|
Enfin, nous avons comparé ces rendus vibrotactiles avec le \textbf{l'augmentation visuelle des phalanges de la main} (\figref{../4-manipulation/visual-hand/figures/method/hands-skeleton}).
|
||||||
|
|
||||||
Les résultats ont montré que lorsqu'il était placé à proximité du point de contact, le retour vibrotactile relocalisé de la main améliorait la sensation d'efficacité, de réalisme et d'utilité des participants.
|
Les résultats ont montré que lorsqu'il était placé à proximité du point de contact, le retour vibrotactile relocalisé de la main améliorait la sensation d'efficacité, de réalisme et d'utilité des participants.
|
||||||
Cependant, le placement le plus éloigné, sur la main opposée, a donné les meilleures performances, même s'il a été peu apprécié : ce placement inhabituel a probablement incité les participants à prêter plus attention au retour haptique et à se concentrer davantage sur la tâche.
|
Cependant, le placement le plus éloigné, sur la main opposée, a donné les meilleures performances, même s'il a été peu apprécié : ce placement inhabituel a probablement incité les participants à prêter plus attention au retour haptique et à se concentrer davantage sur la tâche.
|
||||||
La technique de vibration au contact a été suffisante comparée à une technique plus élaborée d'intensité de la vibration en fonction de la force de contact.
|
La technique de vibration au contact a été suffisante comparée à une technique plus élaborée d'intensité de la vibration en fonction de la force de contact.
|
||||||
L'augmentation visuelle de la main a été perçue comme moins nécessaire que le retour haptique vibrotactile, mais a tout de même fourni un retour utile sur le suivi de la main.
|
L'augmentation visuelle de la main a été perçue comme moins nécessaire que le retour haptique vibrotactile, mais a tout de même fourni un retour utile sur le suivi de la main.
|
||||||
Cette étude confirme que la relocalisation du retour haptique est une approche simple, mais prometteuse pour l'haptique portable en RA immersive.
|
Cette étude confirme que la relocalisation du retour haptique est une approche simple, mais prometteuse pour l'haptique portable avec les visiocasques de RA.
|
||||||
|
|
||||||
Si l'intégration avec le système de suivi de la main le permet et si la tâche l'exige, un anneau haptique porté sur la phalange moyenne ou proximale semble préférable : c'est l'approche que nous avons par ailleurs utilisée dans notre axe de recherche sur les augmentations de textures (\secref{perception}).
|
Si l'intégration avec le système de suivi de la main le permet et si la tâche l'exige, un anneau haptique porté sur la phalange moyenne ou proximale semble préférable : c'est l'approche que nous avons par ailleurs utilisée dans notre axe de recherche sur les augmentations de textures (\secref{perception}).
|
||||||
Cependant, un dispositif haptique monté sur le poignet pourra fournir un retour d'information plus riche en intégrant différents dispositifs haptiques, tout en étant potentiellement moins gênant qu'une bague.
|
Cependant, un dispositif haptique monté sur le poignet pourra fournir un retour d'information plus riche en intégrant différents dispositifs haptiques, tout en étant potentiellement moins gênant qu'une bague.
|
||||||
@@ -273,12 +263,12 @@ Elle peut être alors désactivée pendant la phase de saisie pour éviter la re
|
|||||||
\section{Conclusion}
|
\section{Conclusion}
|
||||||
\label{conclusion}
|
\label{conclusion}
|
||||||
|
|
||||||
Dans ce manuscrit de thèse, nous avons montré comment la RA immersive et l'haptique portable peuvent améliorer l'interaction de la main avec des objets virtuels.
|
Dans ce manuscrit de thèse, nous avons montré comment un visiocasque de RA et l'haptique portable peuvent améliorer les interactions de la main avec des objets virtuels et augmentés.
|
||||||
Les dispositifs haptiques portables sont capables de fournir un retour tactile aux objets virtuels et d'augmenter la perception des objets réels touchés avec le doigt, tout en préservant la liberté de mouvement et d'interaction de la main avec l'environnement réel.
|
Les dispositifs haptiques portables sont capables de fournir un retour tactile aux objets virtuels et d'augmenter la perception des objets réels touchés avec le doigt, tout en préservant la liberté de mouvement et d'interaction de la main avec l'environnement réel.
|
||||||
Cependant, leur intégration avec la RA reste encore récente et présente de nombreux défis conceptuels, techniques et d'expérience utilisateur.
|
Cependant, leur intégration avec la RA reste encore récente et présente de nombreux défis conceptuels, techniques et d'expérience utilisateur.
|
||||||
Nous avons structuré cette thèse autour de deux axes de recherche~: \textbf{(I) modifier la perception visuo-haptique de la texture des surfaces réelles} et \textbf{(II) améliorer la manipulation des objets virtuels}.
|
Nous avons structuré cette thèse autour de deux axes de recherche~: \textbf{(I) modifier la perception visuo-haptique de la texture des surfaces réelles} et \textbf{(II) améliorer la manipulation des objets virtuels}.
|
||||||
|
|
||||||
\noindentskip Nous nous sommes tout d'abord intéressés à la perception de texture visuo-haptique virtuelles portables et immersives qui augmentent des surfaces réelles (\secref{perception}).
|
\noindentskip Nous nous sommes tout d'abord intéressés à la perception de texture visuo-haptique virtuelles portables qui augmentent des surfaces réelles (\secref{perception}).
|
||||||
La texture est une propriété fondamentale d'un objet, perçue aussi bien par la vue que par le toucher.
|
La texture est une propriété fondamentale d'un objet, perçue aussi bien par la vue que par le toucher.
|
||||||
C'est également l'une des augmentations haptiques les plus étudiées, mais elle n'avait pas encore été intégrée à des contextes de toucher direct en RA ou RV.
|
C'est également l'une des augmentations haptiques les plus étudiées, mais elle n'avait pas encore été intégrée à des contextes de toucher direct en RA ou RV.
|
||||||
|
|
||||||
@@ -299,8 +289,7 @@ Les participants ont systématiquement identifié et fait correspondre \textbf{l
|
|||||||
|
|
||||||
\noindentskip Nous nous sommes également cherché à amélioré la manipulation d'objets virtuels directement avec la main.
|
\noindentskip Nous nous sommes également cherché à amélioré la manipulation d'objets virtuels directement avec la main.
|
||||||
La manipulation d'objets virtuels est une tâche fondamentale dans les systèmes 3D, mais elle reste difficile à effectuer avec la main.
|
La manipulation d'objets virtuels est une tâche fondamentale dans les systèmes 3D, mais elle reste difficile à effectuer avec la main.
|
||||||
Nous avons alors exploré deux retours sensoriels connus pour améliorer ce type d'interaction, mais non étudiés en RA immersive~: le retour visuel de la main virtuelle et le retour haptique relocalisé sur la main.
|
Nous avons alors exploré deux retours sensoriels connus pour améliorer ce type d'interaction, mais non étudiés avec les visiocasques de RA: le retour visuel de la main virtuelle et le retour haptique relocalisé sur la main.
|
||||||
%Notre approche a consisté à concevoir des augmentations visuelles de la main et un retour haptique portable relocalisé, sur la base de la littérature, et à les évaluer dans le cadre d'études sur les utilisateurs.
|
|
||||||
|
|
||||||
Nous donc avons d'abord examiné \textbf{(1) l'augmentation visuelle de la main}.
|
Nous donc avons d'abord examiné \textbf{(1) l'augmentation visuelle de la main}.
|
||||||
Ce sont des rendus visuels de la main virtuelle, qui fournissent un retour d'information sur le suivi de la main et sur l'interaction avec les objets virtuels.
|
Ce sont des rendus visuels de la main virtuelle, qui fournissent un retour d'information sur le suivi de la main et sur l'interaction avec les objets virtuels.
|
||||||
|
|||||||
@@ -35,9 +35,11 @@
|
|||||||
% Style the acronyms
|
% Style the acronyms
|
||||||
\renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}}% Hyperlink in black
|
\renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}}% Hyperlink in black
|
||||||
|
|
||||||
|
\let\AE\undefined % Command \AE is already defined
|
||||||
|
|
||||||
\acronym[TIFC]{2IFC}{two-interval forced choice}
|
\acronym[TIFC]{2IFC}{two-interval forced choice}
|
||||||
\acronym[TwoD]{2D}{two-dimensional}
|
|
||||||
\acronym[ThreeD]{3D}{three-dimensional}
|
\acronym[ThreeD]{3D}{three-dimensional}
|
||||||
|
\acronym{AE}{augmented environment}
|
||||||
\acronym{ANOVA}{analysis of variance}
|
\acronym{ANOVA}{analysis of variance}
|
||||||
\acronym{ART}{aligned rank transform}
|
\acronym{ART}{aligned rank transform}
|
||||||
\acronym{AR}{augmented reality}
|
\acronym{AR}{augmented reality}
|
||||||
@@ -45,13 +47,12 @@
|
|||||||
\acronym{DoF}{degree of freedom}
|
\acronym{DoF}{degree of freedom}
|
||||||
\acronym{ERM}{eccentric rotating mass}
|
\acronym{ERM}{eccentric rotating mass}
|
||||||
\acronym{FoV}{field of view}
|
\acronym{FoV}{field of view}
|
||||||
\acronym{GLMM}{generalized linear mixed models}
|
\acronym{GLMM}{generalized linear mixed model}
|
||||||
\acronym{GM}{geometric mean}
|
|
||||||
\acronym{HaTT}{Penn Haptic Texture Toolkit}
|
\acronym{HaTT}{Penn Haptic Texture Toolkit}
|
||||||
\acronym{HSD}{honest significant difference}
|
\acronym{HSD}{honest significant difference}
|
||||||
\acronym{JND}{just noticeable difference}
|
\acronym{JND}{just noticeable difference}
|
||||||
\acronym{LRA}{linear resonant actuator}
|
\acronym{LRA}{linear resonant actuator}
|
||||||
\acronym{LMM}{linear mixed models}
|
\acronym{LMM}{linear mixed model}
|
||||||
\acronym{MLE}{maximum-likelihood estimation}
|
\acronym{MLE}{maximum-likelihood estimation}
|
||||||
\acronym{MR}{mixed reality}
|
\acronym{MR}{mixed reality}
|
||||||
\acronym{OST}{optical see-through}
|
\acronym{OST}{optical see-through}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
% Images
|
% Images
|
||||||
\usepackage{graphicx}
|
\usepackage{graphicx}% Include images
|
||||||
\usepackage{caption}% Point references to the figure not the caption
|
\usepackage{caption}% Point references to the figure not the caption
|
||||||
\usepackage[export]{adjustbox}% For valign in subfigs
|
\usepackage[export]{adjustbox}% For valign in subfigs
|
||||||
|
|
||||||
@@ -20,15 +20,18 @@
|
|||||||
\renewcommand{\floatpagefraction}{0.7}
|
\renewcommand{\floatpagefraction}{0.7}
|
||||||
|
|
||||||
% Formatting
|
% Formatting
|
||||||
\usepackage[autostyle]{csquotes}% For quotes
|
\usepackage[autostyle]{csquotes}% Correctly quote text in different languages
|
||||||
\usepackage[dvipsnames]{xcolor}% More colors
|
\usepackage[dvipsnames]{xcolor}% More colors
|
||||||
\usepackage{tocloft}% Customise the table of contents
|
\usepackage{tocloft}% Customise the table of contents
|
||||||
\usepackage{tocbibind}% Add bibliography and lists to the table of contents
|
\usepackage[nottoc]{tocbibind}% Add bibliography and lists to the table of contents, and hide the table of contents itself
|
||||||
|
|
||||||
% Footnotes
|
% Footnotes
|
||||||
\usepackage[hang]{footmisc}
|
\usepackage[hang]{footmisc}% hang: no indentation for footnotes
|
||||||
\setlength{\footnotemargin}{3mm}% Margin between footnote number and text
|
\setlength{\footnotemargin}{3mm}% Margin between footnote number and text
|
||||||
|
|
||||||
|
\NewCommandCopy{\oldfootnote}{\footnote}
|
||||||
|
\renewcommand{\footnote}{\ifhmode\unskip\fi\oldfootnote}% Remove space before footnote
|
||||||
|
|
||||||
% Less hbox/vbox badness messages
|
% Less hbox/vbox badness messages
|
||||||
\hfuzz=20pt
|
\hfuzz=20pt
|
||||||
\vfuzz=20pt
|
\vfuzz=20pt
|
||||||
|
|||||||
BIN
covers.odt
BIN
covers.odt
Binary file not shown.
BIN
covers.pdf
BIN
covers.pdf
Binary file not shown.
@@ -23,17 +23,16 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
% Custom commands
|
% Custom commands
|
||||||
|
\input{config/acronyms}
|
||||||
\input{utils/commands}
|
\input{utils/commands}
|
||||||
\input{config/thesis_commands}
|
\input{config/thesis_commands}
|
||||||
|
|
||||||
\input{config/acronyms}
|
|
||||||
|
|
||||||
% Document
|
% Document
|
||||||
\begin{document}
|
\begin{document}
|
||||||
|
|
||||||
\frontmatter
|
\frontmatter
|
||||||
\import{0-front}{cover}
|
\import{0-front}{cover}
|
||||||
%\importchapter{0-front}{acknowledgement}
|
\importchapter{0-front}{acknowledgement}
|
||||||
\importchapter{0-front}{contents}
|
\importchapter{0-front}{contents}
|
||||||
|
|
||||||
\mainmatter
|
\mainmatter
|
||||||
@@ -54,7 +53,9 @@
|
|||||||
|
|
||||||
\appendix
|
\appendix
|
||||||
\importchapter{5-conclusion}{publications}
|
\importchapter{5-conclusion}{publications}
|
||||||
|
\begin{otherlanguage}{french}
|
||||||
\importchapter{5-conclusion}{résumé}
|
\importchapter{5-conclusion}{résumé}
|
||||||
|
\end{otherlanguage}
|
||||||
|
|
||||||
\backmatter
|
\backmatter
|
||||||
\importchapter{6-back}{bibliography}
|
\importchapter{6-back}{bibliography}
|
||||||
|
|||||||
@@ -1,11 +1,18 @@
|
|||||||
@incollection{abdulali2016datadriven,
|
@inproceedings{abdulali2016datadriven,
|
||||||
title = {Data-{{Driven Modeling}} of {{Anisotropic Haptic Textures}}: {{Data Segmentation}} and {{Interpolation}}},
|
title = {Data-{{Driven Modeling}} of {{Anisotropic Haptic Textures}}: {{Data Segmentation}} and {{Interpolation}}},
|
||||||
booktitle = {Haptics: {{Perception}}, {{Devices}}, {{Control}}, and {{Applications}}},
|
booktitle = {{{EuroHaptics}}},
|
||||||
author = {Abdulali, Arsen and Jeon, Seokhee},
|
author = {Abdulali, Arsen and Jeon, Seokhee},
|
||||||
date = {2016},
|
date = {2016},
|
||||||
volume = {9775},
|
pages = {228--239}
|
||||||
pages = {228--239},
|
}
|
||||||
isbn = {978-3-319-42323-4 978-3-319-42324-1}
|
|
||||||
|
@inproceedings{abdulali2016datadrivena,
|
||||||
|
title = {Data-{{Driven Rendering}} of {{Anisotropic Haptic Textures}}},
|
||||||
|
booktitle = {{{AsiaHaptics}}},
|
||||||
|
author = {Abdulali, Arsen and Jeon, Seokhee},
|
||||||
|
date = {2016},
|
||||||
|
pages = {401--407},
|
||||||
|
isbn = {978-981-10-4156-3 978-981-10-4157-0}
|
||||||
}
|
}
|
||||||
|
|
||||||
@inproceedings{abdulali2017sample,
|
@inproceedings{abdulali2017sample,
|
||||||
@@ -18,16 +25,6 @@
|
|||||||
isbn = {978-1-5090-1425-5}
|
isbn = {978-1-5090-1425-5}
|
||||||
}
|
}
|
||||||
|
|
||||||
@incollection{abdulali2018datadriven,
|
|
||||||
title = {Data-{{Driven Rendering}} of {{Anisotropic Haptic Textures}}},
|
|
||||||
booktitle = {Haptic {{Interaction}}},
|
|
||||||
author = {Abdulali, Arsen and Jeon, Seokhee},
|
|
||||||
date = {2018},
|
|
||||||
volume = {432},
|
|
||||||
pages = {401--407},
|
|
||||||
isbn = {978-981-10-4156-3 978-981-10-4157-0}
|
|
||||||
}
|
|
||||||
|
|
||||||
@inproceedings{achibet2017flexifingers,
|
@inproceedings{achibet2017flexifingers,
|
||||||
title = {{{FlexiFingers}}: {{Multi-finger}} Interaction in {{VR}} Combining Passive Haptics and Pseudo-Haptics},
|
title = {{{FlexiFingers}}: {{Multi-finger}} Interaction in {{VR}} Combining Passive Haptics and Pseudo-Haptics},
|
||||||
shorttitle = {{{FlexiFingers}}},
|
shorttitle = {{{FlexiFingers}}},
|
||||||
@@ -73,7 +70,7 @@
|
|||||||
date = {2022},
|
date = {2022},
|
||||||
pages = {792--801},
|
pages = {792--801},
|
||||||
doi = {10/gtv3wn},
|
doi = {10/gtv3wn},
|
||||||
isbn = {978-1-66549-617-9}
|
isbn = {978-1-6654-9617-9}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{aggravi2022decentralized,
|
@article{aggravi2022decentralized,
|
||||||
@@ -490,7 +487,7 @@
|
|||||||
date = {2015},
|
date = {2015},
|
||||||
journaltitle = {Found. Trends Human–Computer Interact.},
|
journaltitle = {Found. Trends Human–Computer Interact.},
|
||||||
volume = {8},
|
volume = {8},
|
||||||
number = {2-3},
|
number = {2--3},
|
||||||
pages = {73--272},
|
pages = {73--272},
|
||||||
doi = {10/gc2hkv}
|
doi = {10/gc2hkv}
|
||||||
}
|
}
|
||||||
@@ -613,7 +610,7 @@
|
|||||||
author = {Brooks, Jas and Amin, Noor and Lopes, Pedro},
|
author = {Brooks, Jas and Amin, Noor and Lopes, Pedro},
|
||||||
date = {2023},
|
date = {2023},
|
||||||
doi = {10/gt625w},
|
doi = {10/gt625w},
|
||||||
isbn = {9798400701320}
|
isbn = {979-8-4007-0132-0}
|
||||||
}
|
}
|
||||||
|
|
||||||
@inproceedings{buchmann2004fingartips,
|
@inproceedings{buchmann2004fingartips,
|
||||||
@@ -713,7 +710,7 @@
|
|||||||
date = {2019},
|
date = {2019},
|
||||||
pages = {1749--1754},
|
pages = {1749--1754},
|
||||||
doi = {10/gpm2sx},
|
doi = {10/gpm2sx},
|
||||||
isbn = {978-1-72811-377-7}
|
isbn = {978-1-7281-1377-7}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{chinello2017three,
|
@article{chinello2017three,
|
||||||
@@ -812,7 +809,7 @@
|
|||||||
date = {2021},
|
date = {2021},
|
||||||
pages = {613--618},
|
pages = {613--618},
|
||||||
doi = {10/gsvh3z},
|
doi = {10/gsvh3z},
|
||||||
isbn = {978-1-66541-871-3}
|
isbn = {978-1-6654-1871-3}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{costes2019touchy,
|
@article{costes2019touchy,
|
||||||
@@ -978,7 +975,7 @@
|
|||||||
date = {2013},
|
date = {2013},
|
||||||
journaltitle = {Wear},
|
journaltitle = {Wear},
|
||||||
volume = {301},
|
volume = {301},
|
||||||
number = {1-2},
|
number = {1--2},
|
||||||
pages = {324--329},
|
pages = {324--329},
|
||||||
doi = {10/f45z4n}
|
doi = {10/f45z4n}
|
||||||
}
|
}
|
||||||
@@ -1011,7 +1008,7 @@
|
|||||||
date = {2019},
|
date = {2019},
|
||||||
pages = {321--330},
|
pages = {321--330},
|
||||||
doi = {10/gnc6m8},
|
doi = {10/gnc6m8},
|
||||||
isbn = {978-1-72811-377-7}
|
isbn = {978-1-7281-1377-7}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{detinguy2021capacitive,
|
@article{detinguy2021capacitive,
|
||||||
@@ -1126,7 +1123,7 @@
|
|||||||
date = {2007},
|
date = {2007},
|
||||||
journaltitle = {Comput. Vis. Image Underst.},
|
journaltitle = {Comput. Vis. Image Underst.},
|
||||||
volume = {108},
|
volume = {108},
|
||||||
number = {1-2},
|
number = {1--2},
|
||||||
pages = {52--73},
|
pages = {52--73},
|
||||||
doi = {10/bzm33v}
|
doi = {10/bzm33v}
|
||||||
}
|
}
|
||||||
@@ -1138,7 +1135,7 @@
|
|||||||
author = {Feick, Martin and Zenner, André and Ariza, Oscar and Tang, Anthony and Biyikli, Cihan and Krüger, Antonio},
|
author = {Feick, Martin and Zenner, André and Ariza, Oscar and Tang, Anthony and Biyikli, Cihan and Krüger, Antonio},
|
||||||
date = {2023},
|
date = {2023},
|
||||||
doi = {10/gtfhf6},
|
doi = {10/gtfhf6},
|
||||||
isbn = {9798400701320}
|
isbn = {979-8-4007-0132-0}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{feix2016grasp,
|
@article{feix2016grasp,
|
||||||
@@ -1888,7 +1885,7 @@
|
|||||||
date = {2022-03},
|
date = {2022-03},
|
||||||
pages = {121--129},
|
pages = {121--129},
|
||||||
doi = {10/gt7pqr},
|
doi = {10/gt7pqr},
|
||||||
isbn = {978-1-66549-617-9}
|
isbn = {978-1-6654-9617-9}
|
||||||
}
|
}
|
||||||
|
|
||||||
@online{kahl2023using,
|
@online{kahl2023using,
|
||||||
@@ -1936,7 +1933,7 @@
|
|||||||
date = {2020},
|
date = {2020},
|
||||||
pages = {275--284},
|
pages = {275--284},
|
||||||
doi = {10/gjb2c6},
|
doi = {10/gjb2c6},
|
||||||
isbn = {978-1-72815-608-8}
|
isbn = {978-1-7281-5608-8}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{kappers2013haptic,
|
@article{kappers2013haptic,
|
||||||
@@ -2656,7 +2653,7 @@
|
|||||||
date = {2019},
|
date = {2019},
|
||||||
pages = {519--528},
|
pages = {519--528},
|
||||||
doi = {10/gjpxzw},
|
doi = {10/gjpxzw},
|
||||||
isbn = {978-1-72811-377-7}
|
isbn = {978-1-7281-1377-7}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{ogawa2021effect,
|
@article{ogawa2021effect,
|
||||||
@@ -2792,7 +2789,7 @@
|
|||||||
date = {2022},
|
date = {2022},
|
||||||
pages = {628--633},
|
pages = {628--633},
|
||||||
doi = {10/grnbtc},
|
doi = {10/grnbtc},
|
||||||
isbn = {978-1-66547-927-1}
|
isbn = {978-1-6654-7927-1}
|
||||||
}
|
}
|
||||||
|
|
||||||
@inproceedings{park2011perceptual,
|
@inproceedings{park2011perceptual,
|
||||||
@@ -2830,7 +2827,7 @@
|
|||||||
date = {2023},
|
date = {2023},
|
||||||
pages = {148--155},
|
pages = {148--155},
|
||||||
doi = {10/gsvpqg},
|
doi = {10/gsvpqg},
|
||||||
isbn = {9798350399936}
|
isbn = {979-8-3503-9993-6}
|
||||||
}
|
}
|
||||||
|
|
||||||
@inproceedings{peillard2019studying,
|
@inproceedings{peillard2019studying,
|
||||||
@@ -3000,7 +2997,7 @@
|
|||||||
date = {2007},
|
date = {2007},
|
||||||
journaltitle = {Int. J. Robot. Res.},
|
journaltitle = {Int. J. Robot. Res.},
|
||||||
volume = {26},
|
volume = {26},
|
||||||
number = {11-12},
|
number = {11--12},
|
||||||
pages = {1191--1203},
|
pages = {1191--1203},
|
||||||
doi = {10/b33wgg}
|
doi = {10/b33wgg}
|
||||||
}
|
}
|
||||||
@@ -3306,6 +3303,15 @@
|
|||||||
doi = {10/fj8xvn}
|
doi = {10/fj8xvn}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@inproceedings{son2022effect,
|
||||||
|
title = {Effect of {{Contact Points Feedback}} on {{Two-Thumb Touch Typing}} in {{Virtual Reality}}},
|
||||||
|
booktitle = {{{CHI Conf}}. {{Hum}}. {{Factors Comput}}. {{Syst}}.},
|
||||||
|
author = {Son, Jeongmin and Ahn, Sunggeun and Kim, Sunbum and Lee, Geehyuk},
|
||||||
|
date = {2022},
|
||||||
|
doi = {10/g8234n},
|
||||||
|
isbn = {978-1-4503-9156-6}
|
||||||
|
}
|
||||||
|
|
||||||
@inproceedings{speicher2019what,
|
@inproceedings{speicher2019what,
|
||||||
title = {What Is {{Mixed Reality}}?},
|
title = {What Is {{Mixed Reality}}?},
|
||||||
booktitle = {{{CHI Conf}}. {{Hum}}. {{Factors Comput}}. {{Syst}}.},
|
booktitle = {{{CHI Conf}}. {{Hum}}. {{Factors Comput}}. {{Syst}}.},
|
||||||
@@ -3458,7 +3464,7 @@
|
|||||||
author = {Teng, Shan-Yuan and Gupta, Aryan and Lopes, Pedro},
|
author = {Teng, Shan-Yuan and Gupta, Aryan and Lopes, Pedro},
|
||||||
date = {2024},
|
date = {2024},
|
||||||
doi = {10/gwhbpm},
|
doi = {10/gwhbpm},
|
||||||
isbn = {9798400703300}
|
isbn = {979-8-4007-0330-0}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{tong2023survey,
|
@article{tong2023survey,
|
||||||
@@ -3478,7 +3484,7 @@
|
|||||||
author = {Tran, Tanh Quang and Langlotz, Tobias and Regenbrecht, Holger},
|
author = {Tran, Tanh Quang and Langlotz, Tobias and Regenbrecht, Holger},
|
||||||
date = {2024},
|
date = {2024},
|
||||||
doi = {10/gt56cs},
|
doi = {10/gt56cs},
|
||||||
isbn = {9798400703300}
|
isbn = {979-8-4007-0330-0}
|
||||||
}
|
}
|
||||||
|
|
||||||
@article{turk2014multimodal,
|
@article{turk2014multimodal,
|
||||||
@@ -3704,7 +3710,7 @@
|
|||||||
date = {2021},
|
date = {2021},
|
||||||
pages = {385--390},
|
pages = {385--390},
|
||||||
doi = {10/gvm7sf},
|
doi = {10/gvm7sf},
|
||||||
isbn = {978-1-66541-871-3}
|
isbn = {978-1-6654-1871-3}
|
||||||
}
|
}
|
||||||
|
|
||||||
@inproceedings{yoon2020evaluating,
|
@inproceedings{yoon2020evaluating,
|
||||||
@@ -3723,7 +3729,7 @@
|
|||||||
date = {2007},
|
date = {2007},
|
||||||
journaltitle = {Somatosens. Mot. Res.},
|
journaltitle = {Somatosens. Mot. Res.},
|
||||||
volume = {24},
|
volume = {24},
|
||||||
number = {1-2},
|
number = {1--2},
|
||||||
pages = {53--70},
|
pages = {53--70},
|
||||||
doi = {10/cf9xmk}
|
doi = {10/cf9xmk}
|
||||||
}
|
}
|
||||||
|
|||||||
1
utils/.gitignore
vendored
1
utils/.gitignore
vendored
@@ -146,7 +146,6 @@ acs-*.bib
|
|||||||
|
|
||||||
# knitr
|
# knitr
|
||||||
*-concordance.tex
|
*-concordance.tex
|
||||||
# TODO Uncomment the next line if you use knitr and want to ignore its generated tikz files
|
|
||||||
# *.tikz
|
# *.tikz
|
||||||
*-tikzDictionary
|
*-tikzDictionary
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@
|
|||||||
[subrepo]
|
[subrepo]
|
||||||
remote = git@git.nortelli.fr:whar-packages/latex-utils.git
|
remote = git@git.nortelli.fr:whar-packages/latex-utils.git
|
||||||
branch = main
|
branch = main
|
||||||
commit = eb2bf7a751f36bcd487fa8d6d186c7e8249187dc
|
commit = 89f44bd6e060110cf233d2632c644067d91780bc
|
||||||
parent = 1a588e02480c5d4ecf8aa8562be461a89cc311b5
|
parent = 58161d561f8d80f744871af41c31c16b3007a7d1
|
||||||
method = rebase
|
method = rebase
|
||||||
cmdver = 0.4.6
|
cmdver = 0.4.6
|
||||||
|
|||||||
@@ -93,6 +93,7 @@
|
|||||||
}
|
}
|
||||||
|
|
||||||
%% Footnotes
|
%% Footnotes
|
||||||
|
\newcommand{\footnotemarkrepeat}{\footnotemark[\value{footnote}]}% Repeat the last footnote mark
|
||||||
\newcommand{\footnoteurl}[1]{\footnote{\ \url{#1}}}
|
\newcommand{\footnoteurl}[1]{\footnote{\ \url{#1}}}
|
||||||
|
|
||||||
%% Lists
|
%% Lists
|
||||||
@@ -138,6 +139,8 @@
|
|||||||
|
|
||||||
\newcommand{\wilcoxon}[1]{\measure{W}{#1}{}}
|
\newcommand{\wilcoxon}[1]{\measure{W}{#1}{}}
|
||||||
|
|
||||||
|
\newcommand{\ztest}[1]{$\mathrm{Z} = #1$}
|
||||||
|
|
||||||
% From https://tex.stackexchange.com/a/95842
|
% From https://tex.stackexchange.com/a/95842
|
||||||
\newenvironment{conditions}{%
|
\newenvironment{conditions}{%
|
||||||
\par% maybe you want the list to start with a new line
|
\par% maybe you want the list to start with a new line
|
||||||
@@ -155,17 +158,32 @@
|
|||||||
\newcommand{\figref}[1]{Fig.~\ref{fig:#1}}
|
\newcommand{\figref}[1]{Fig.~\ref{fig:#1}}
|
||||||
\newcommand{\secref}[1]{Sec.~\ref{sec:#1}}
|
\newcommand{\secref}[1]{Sec.~\ref{sec:#1}}
|
||||||
\newcommand{\tabref}[1]{Table~\ref{tab:#1}}
|
\newcommand{\tabref}[1]{Table~\ref{tab:#1}}
|
||||||
\newcommand{\footnotemarkrepeat}{\footnotemark[\value{footnote}]} % Repeat the last footnote mark
|
|
||||||
|
|
||||||
%% Structure
|
%% Structure
|
||||||
\newcommand{\chapterstartoc}[1]{\chapter*{#1}\addcontentsline{toc}{chapter}{#1}}
|
\newcommand{\chapternotoc}[1]{%
|
||||||
\newcommand{\chapterstarbookmark}[1]{\pdfbookmark[chapter]{#1}{#1}\chapter*{#1}}
|
\begingroup%
|
||||||
|
\renewcommand{\addtocontents}[2]{}%
|
||||||
|
\chapter*{#1}%
|
||||||
|
\endgroup%
|
||||||
|
}
|
||||||
|
|
||||||
\newcommand{\sectionstartoc}[1]{\section*{#1}\addcontentsline{toc}{section}{#1}}
|
\newcommand{\sectionstartoc}[1]{%
|
||||||
\newcommand{\sectionstarbookmark}[1]{\pdfbookmark[section]{#1}{#1}\section*{#1}}
|
\section*{#1}%
|
||||||
|
\addcontentsline{toc}{section}{#1}%
|
||||||
|
}
|
||||||
|
\newcommand{\sectionstarbookmark}[1]{%
|
||||||
|
\pdfbookmark[section]{#1}{#1}%
|
||||||
|
\section*{#1}%
|
||||||
|
}
|
||||||
|
|
||||||
\newcommand{\subsectionstartoc}[1]{\subsection*{#1}\addcontentsline{toc}{subsection}{#1}}
|
\newcommand{\subsectionstartoc}[1]{%
|
||||||
\newcommand{\subsectionstarbookmark}[1]{\pdfbookmark[subsection]{#1}{#1}\subsection*{#1}}
|
\subsection*{#1}%
|
||||||
|
\addcontentsline{toc}{subsection}{#1}%
|
||||||
|
}
|
||||||
|
\newcommand{\subsectionstarbookmark}[1]{%
|
||||||
|
\pdfbookmark[subsection]{#1}{#1}%
|
||||||
|
\subsection*{#1}%
|
||||||
|
}
|
||||||
|
|
||||||
\newcommand{\subsubsubsection}[1]{\paragraph*{\textit{#1}}}
|
\newcommand{\subsubsubsection}[1]{\paragraph*{\textit{#1}}}
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user