Compare commits

...

34 Commits

Author SHA1 Message Date
34db502525 Typo in interaction-loop-fr figure 2025-04-12 15:09:21 +02:00
defd40d0bd Cite icons in interaction-loop figure 2025-04-12 15:05:50 +02:00
a603f8e8a5 Improve registration description 2025-04-12 15:03:25 +02:00
b296d3ae98 Use babel french in résumé 2025-04-12 14:25:40 +02:00
885f002898 More \RE and \VE uses 2025-04-12 14:25:20 +02:00
d27dddc548 More visible comments 2025-04-12 14:20:57 +02:00
c681cbf43d Use \cm instead of \mm 2025-04-12 13:42:34 +02:00
97e35ca282 Add back augmented environment (AE) acronym 2025-04-11 23:06:10 +02:00
1eb5ed0e42 Amend 4cbd2b7: Add changes package 2025-04-11 23:02:23 +02:00
398583e819 Amend 42f8202: Fix vhar textures results 2025-04-11 23:01:57 +02:00
be9f9bb4be Typo 2025-04-11 23:01:34 +02:00
241c73a9c8 Fix abdulali2016datadrivena reference 2025-04-11 23:00:54 +02:00
f8ec931cd6 Replace "immersive AR" with "AR headset" 2025-04-11 23:00:02 +02:00
f1cf425e7c Remove .vscode/ltex* files 2025-04-11 15:37:44 +02:00
93a47df0f8 Remove comments 2025-04-10 16:14:23 +02:00
49c33a5a37 Fix in acronyms 2025-04-10 16:11:18 +02:00
f05fec76df Add registration error to future works 2025-04-09 16:27:56 +02:00
1062a6b13c Add visual texture quality to future works 2025-04-09 16:27:41 +02:00
ee93d2079a Fix interaction-techniques figure 2025-04-09 15:49:32 +02:00
121675b2bc Typo 2025-04-09 15:33:36 +02:00
03e96390a0 Typo 2025-04-09 15:30:53 +02:00
1164bd0da6 Add packages used for stats 2025-04-09 15:06:51 +02:00
85df9fe164 Better notation of software versions 2025-04-09 15:06:26 +02:00
7fe080351d Typo 2025-04-09 15:05:27 +02:00
f42733a7e7 Add \comans command 2025-04-08 18:41:39 +02:00
42f8202d43 Fix vhar textures results 2025-04-08 18:41:26 +02:00
4cbd2b7d0a Add changes package 2025-04-08 18:24:05 +02:00
5f9638c718 Fix vhar textures results 2025-04-08 17:07:04 +02:00
65ab354b62 Fix in references 2025-04-08 17:06:44 +02:00
530112fee9 Remove comments 2025-04-08 17:06:25 +02:00
e8dd43f4b8 Typo in GLMM and LMM acronyms 2025-04-08 17:06:13 +02:00
6c4891c255 Add \ztest 2025-04-08 17:05:53 +02:00
c8e9c8ef80 Style 2025-04-08 17:05:47 +02:00
74d850fc1b Typo 2025-04-06 12:24:29 +02:00
63 changed files with 361 additions and 472 deletions

View File

@@ -1,3 +0,0 @@
vhar
visuo-haptic
vibrotactile

View File

@@ -1,3 +0,0 @@
visuo
RV
vibrotactile

View File

@@ -1,4 +0,0 @@
COMMA_PARENTHESIS_WHITESPACE
UPPERCASE_SENTENCE_START
UPPERCASE_SENTENCE_START
DOUBLE_PUNCTUATION

View File

@@ -1,47 +0,0 @@
{"rule":"EN_A_VS_AN","sentence":"^\\QHowever, this method has not yet been integrated in an context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qdevice apparatus\\E$"}
{"rule":"ALLOW_TO","sentence":"^\\QAmong the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched touched by a hand-held stylus ([related_work]texture_rendering).\\E$"}
{"rule":"ALLOW_TO","sentence":"^\\QAmong the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus ([related_work]texture_rendering).\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe contributions of this chapter are: Transposition of data-driven visuo-haptic textures to augment real objects in a direct touch context in immersive .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[0.65]experiment/view First person view of the user study.[ As seen through the immersive headset Microsoft HoloLens 2.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q]\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[0.7]results/trial_predictions Proportion of trials in which the comparison texture was perceived as rougher than the reference texture, as a function of the amplitude difference between the two textures and the visual rendering.[ Curves represent predictions from the model (probit link function), and points are estimated marginal means with non-parametric bootstrap 95 .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe (results/trial_pses) and (results/trial_jnds) for each visual rendering and their respective differences were estimated from the model, along with their corresponding 95 , using a non-parametric bootstrap procedure (1000 samples).\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QReported response times are .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe frames analyzed were those in which the participants actively touched the comparison textures with a finger speed greater than 1 .\\E$"}
{"rule":"MISSING_GENITIVE","sentence":"^\\QFriedman tests were employed to compare the ratings to the questions (questions1 and questions2), with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment, except for the questions regarding the virtual hand that were directly compared with Wilcoxon signed-rank tests.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults_questions shows these ratings for questions where statistically significant differences were found (results are shown as mean \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q standard deviation): Hand Ownership: participants slightly feel the virtual hand as their own with the Mixed rendering (2.3 1.0) but quite with the Virtual rendering (3.5 0.9, 0.001).\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults_questions presents the questionnaire results of the Matching and Ranking tasks.\\E$"}
{"rule":"EN_A_VS_AN","sentence":"^\\QHowever, this method has not yet been integrated in an context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.\\E$"}
{"rule":"ALLOW_TO","sentence":"^\\QAmong the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus ([related_work]texture_rendering).\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/matching_confusion_matrix shows the confusion matrix of the Matching task with the visual textures and the proportion of haptic texture selected in response, i.e. the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[0.82]results/matching_confusion_matrix Confusion matrix of the Matching task.[ With the presented visual textures as columns and the selected haptic texture in proportion as rows.\\E$"}
{"rule":"EN_A_VS_AN","sentence":"^\\QA on the log Completion Time with the Visual Texture as fixed effect and the participant as random intercept was performed.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QNo statistical significant effect of Visual Texture was found (8 512 1.9, 0.06) on Completion Time (44 , 42 46), indicating an equal difficulty and participant behaviour for all the visual textures.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/ranking_mean_ci presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ A lower rank means that the texture was considered rougher, a higher rank means smoother.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/matching_correspondence_analysis shows the first two dimensions with the 18 haptic and visual textures.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults_clusters shows the dendrograms of the two hierarchical clusterings of the haptic and visual textures, constructed using the Euclidean distance and the Ward's method on squared distance.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ The closer the haptic and visual textures are, the more similar they were judged.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/haptic_visual_clusters_confusion_matrices (left) shows the confusion matrix of the Matching task with visual texture clusters and the proportion of haptic texture clusters selected in response.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/haptic_visual_clusters_confusion_matrices (right) shows the confusion matrix of the Matching task with visual texture ranks and the proportion of haptic texture clusters selected in response.\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/haptic_visual_clusters_confusion_matrices Confusion matrices of the visual texture (left) or rank (right) with the corresponding haptic texture clusters selected in proportion.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ Holm-Bonferroni adjusted binomial test results are marked in bold when the proportion is higher than chance (i.e. more than 20, 0.05).\\E$"}
{"rule":"EN_A_VS_AN","sentence":"^\\QA non-parametric on an model was used on the Difficulty and Realism question results, while the other question results were analyzed using Wilcoxon signed-rank tests.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QOn Difficulty, there were statistically significant effects of Task (1 57 13, 0.001) and of Modality (1 57 8, 0.007), but no interaction effect Task Modality (1 57 2, ).\\E$"}
{"rule":"UPPERCASE_SENTENCE_START","sentence":"^\\Qresults/questions_modalitiesresults/questions_tasks\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q[ Questions were bipolar 100-points scales (0 = Very Low and 100 = Very High, except for Performance where 0 = Perfect and 100 = Failure), with increments of 5.\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\Q] Code Question Mental Demand How mentally demanding was the task?\\E$"}
{"rule":"NON_STANDARD_WORD","sentence":"^\\QTouching, grasping and manipulating virtual objects are fundamental interactions in ([related_work]ve_tasks) and essential for many of its applications ([related_work]ar_applications).\\E$"}
{"rule":"AND_END","sentence":"^\\QManipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the and simulates interaction with virtual objects ([related_work]ar_virtual_hands).\\E$"}
{"rule":"THE_PUNCT","sentence":"^\\QHowever, direct hand manipulation is still challenging due to the intangibility of the , the lack of mutual occlusion between the hand and the virtual object in - ([related_work]ar_displays), and the inherent delays between the user's hand and the result of the interaction simulation ([related_work]ar_virtual_hands).\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QTo this end, we selected in the literature and compared the most popular visual hand renderings used to interact with virtual objects in .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QThe main contributions of this chapter are: A comparison from the literature of six common visual hand renderings used to interact with virtual objects in .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QWe present the results of the user study and discuss the implications of these results for the manipulation of virtual objects directly with the hand in .\\E$"}
{"rule":"COMMA_PARENTHESIS_WHITESPACE","sentence":"^\\QResults showed that 95 of the contacts between the fingertip and the virtual cube happened at speeds below 1.5 .\\E$"}
{"rule":"DOUBLE_PUNCTUATION","sentence":"^\\QParticipants rated their expertise (I use it more than once a year) with , , and haptics in a pre-experiment questionnaire.\\E$"}
{"rule":"THE_SENT_END","sentence":"^\\QIt is technically and conceptually closely related to , which completely replaces perception with a .\\E$"}
{"rule":"THE_PUNCT","sentence":"^\\QOn the original reality-virtuality continuum of \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, augmented virtuality is also considered, as the incorporation of real objects into a , and is placed between and .\\E$"}
{"rule":"THE_SENT_END","sentence":"^\\QIn this thesis we call / systems the computational set of hardware (input devices, sensors, displays and haptic devices) and software (tracking, simulation and rendering) that allows the user to interact with the .\\E$"}
{"rule":"THE_PUNCT","sentence":"^\\QBecause the visuo-haptic is displayed in real time and aligned with the , the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the .\\E$"}
{"rule":"THE_SENT_END","sentence":"^\\QBecause the visuo-haptic is displayed in real time and aligned with the , the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the .\\E$"}

View File

@@ -1,3 +0,0 @@
{"rule":"D_N","sentence":"^\\QL'intégration de l'haptique portable avec la RA immersive pour créer un environnement augmenté visuo-haptique est cependant complexe et présente de nombreux défis.\\E$"}
{"rule":"PAS_DE_TRAIT_UNION","sentence":"^\\QLes sensations haptiques virtuelles ne sont alors pas co-localisées avec le contact réel, ce qui peut affecter la perception globale.\\E$"}
{"rule":"J_N","sentence":"^\\QLes textures sont en effet l'une des principales propriétés perçues d'un objet \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, aussi bien par la vue que le toucher \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q, et une augmentation haptique bien étudiée \\E(?:Dummy|Ina|Jimmy-)[0-9]+\\Q.\\E$"}

View File

@@ -5,12 +5,9 @@
\bigskip \bigskip
%This PhD manuscript shows how wearable haptics, worn on the outside of the hand, can improve direct hand interaction in immersive \AR by augmenting the perception of the virtual content and its manipulation. \comans{JG}{I was wondering what the difference between an immersive AR headset and a non-immersive AR headset should be. If there is a difference (e.g., derived through headset properties by FoV), it should be stated. If there is none, I would suggest not using the term immersive AR headset but simply AR headset. On this account, in Figure 1.5 another term (“Visual AR Headset”) is introduced (and later OST-AR systems, c.f. also section 2.3.1.3).}{The terms "immersive AR headset" and "visual AR headset" have been replaced by the more appropriate term "AR headset".}
In this manuscript thesis, we show how immersive \AR, which integrates visual virtual content into the real world perception, and wearable haptics, which provide tactile sensations on the skin, can improve the free and direct interaction of virtual objects with the hand. In this manuscript thesis, we show how \AR headset, which integrates visual virtual content into the real world perception, and wearable haptics, which provide tactile sensations on the skin, can improve direct hand interaction with virtual and augmented objects.
Our goal is to enable users to perceive and interact with wearable visuo-haptic augmentations in a more realistic and effective way, as if they were real. Our goal is to enable users to perceive and interact with wearable visuo-haptic augmentations in a more realistic and effective way, as if they were real.
%interaction of the hand with the virtual content.%, moving towards a seamless integration of the virtual into the real world.
%We are particularly interested in enabling direct contact of virtual and augmented objects with the bare hand.
%The aim of this thesis is to understand how immersive visual and wearable haptic augmentations complement each other in the context of direct hand perception and manipulation with virtual and augmented objects.
\section{Visual and Haptic Object Augmentations} \section{Visual and Haptic Object Augmentations}
\label{visuo_haptic_augmentations} \label{visuo_haptic_augmentations}
@@ -73,7 +70,7 @@ However, the \textbf{integration of wearable haptics with \AR has been little ex
\subsectionstarbookmark{Augmented Reality Is Not Only Visual} \subsectionstarbookmark{Augmented Reality Is Not Only Visual}
\textbf{\emph{Augmented Reality (\AR)} integrates virtual content into the real world perception, creating the illusion of a unique \emph{augmented environment}} \cite{azuma1997survey,skarbez2021revisiting}. \textbf{\emph{Augmented Reality (\AR)} integrates virtual content into the real world perception, creating the illusion of a unique \emph{\AE}} \cite{azuma1997survey,skarbez2021revisiting}.
It thus promises natural and seamless interaction with physical and digital objects (and their combination) directly with our hands \cite{billinghurst2021grand}. It thus promises natural and seamless interaction with physical and digital objects (and their combination) directly with our hands \cite{billinghurst2021grand}.
It is technically and conceptually closely related to \emph{\VR}, which completely replaces \emph{\RE} perception with a \emph{\VE}. It is technically and conceptually closely related to \emph{\VR}, which completely replaces \emph{\RE} perception with a \emph{\VE}.
@@ -100,17 +97,15 @@ For example, (visual) \AR using a real object as a proxy to manipulate a virtual
In this thesis we call \AR/\VR \emph{systems} the computational set of hardware (input devices, sensors, displays and haptic devices) and software (tracking, simulation and rendering) that allows the user to interact with the \VE. % by implementing the interaction loop we proposed in \figref{interaction-loop}. In this thesis we call \AR/\VR \emph{systems} the computational set of hardware (input devices, sensors, displays and haptic devices) and software (tracking, simulation and rendering) that allows the user to interact with the \VE. % by implementing the interaction loop we proposed in \figref{interaction-loop}.
Many \AR displays have been explored, from projection systems to hand-held displays. Many \AR displays have been explored, from projection systems to hand-held displays.
\textbf{\AR headsets are the most promising display technology as they are portable and provide the user with an immersive augmented environment} \cite{hertel2021taxonomy}. \textbf{\AR headsets are the most promising display technology because they create a portable experience that allows the user to navigate the \AE and interact with it directly using their hands} \cite{hertel2021taxonomy}.
Commercial headsets also have integrated real-time self-location and mapping of the \RE and hand pose estimation of the user.
While \AR and \VR systems can address any of the human senses, most focus only on visual augmentation \cite[p.144]{billinghurst2015survey} and \cite{kim2018revisiting}. While \AR and \VR systems can address any of the human senses, most focus only on visual augmentation \cite[p.144]{billinghurst2015survey} and \cite{kim2018revisiting}.
%but the most \textbf{promising devices are \AR headsets}, which are \textbf{portable displays worn directly on the head}, providing the user with an \textbf{immersive visual augmented environment}.
\emph{Presence} is the illusion of \enquote{being there} when in \VR, or the illusion of the virtual content to \enquote{feel here} when in \AR \cite{slater2022separate,skarbez2021revisiting}. \emph{Presence} is the illusion of \enquote{being there} when in \VR, or the illusion of the virtual content to \enquote{feel here} when in \AR \cite{slater2022separate,skarbez2021revisiting}.
One of the most important aspects of this illusion is the \emph{plausibility}, \ie the illusion that the virtual events are really happening. %, even if the user knows that they are not real. One of the most important aspects of this illusion is the \emph{plausibility}, \ie the illusion that the virtual events are really happening. %, even if the user knows that they are not real.
However, when an \AR/\VR system lacks haptic feedback, it may create a deceptive and incomplete user experience when the hand reaches the virtual content. However, when an \AR/\VR headset lacks haptic feedback, it may create a deceptive and incomplete user experience when the hand reaches the virtual content.
All (visual) virtual objects are inherently intangible and cannot physically constrain a user's hand, making it difficult to perceive their properties and interact with them with confidence and efficiency. All (visual) virtual objects are inherently intangible and cannot physically constrain a user's hand, making it difficult to perceive their properties and interact with them with confidence and efficiency.
It is also necessary to provide a haptic feedback that is coherent with the virtual objects and ensures the best possible user experience, as we argue in the next section. It is also necessary to provide a haptic feedback that is coherent with the virtual objects and ensures the best possible user experience, as we argue in the next section.
The \textbf{integration of wearable haptics with immersive \AR appears to be one of the most promising solutions}, but it remains challenging due to their respective limitations and the additional constraints of combining them, as we will overview in the next section. The \textbf{integration of wearable haptics with \AR headsets appears to be one of the most promising solutions}, but it remains challenging due to their respective limitations and the additional constraints of combining them, as we will overview in the next section.
\begin{subfigs}{visuo-haptic-environments}{Visuo-haptic environments with varying degrees of reality-virtuality. }[][ \begin{subfigs}{visuo-haptic-environments}{Visuo-haptic environments with varying degrees of reality-virtuality. }[][
\item \AR environment with a real haptic object used as a proxy to manipulate a virtual object \cite{kahl2023using}. \item \AR environment with a real haptic object used as a proxy to manipulate a virtual object \cite{kahl2023using}.
@@ -128,52 +123,49 @@ The \textbf{integration of wearable haptics with immersive \AR appears to be one
\section{Research Challenges of Wearable Visuo-Haptic Augmented Reality} \section{Research Challenges of Wearable Visuo-Haptic Augmented Reality}
\label{research_challenges} \label{research_challenges}
The integration of wearable haptics with \AR to create a visuo-haptic augmented environment is complex and presents many perceptual and interaction challenges. The integration of wearable haptics with \AR headsets to create a visuo-haptic \AE is complex and presents many perceptual and interaction challenges.
% \ie sensing the augmented environment and acting effectively upon it. In this thesis, we propose to \textbf{represent the user's experience with such a visuo-haptic \AE as an interaction loop}, shown in \figref{interaction-loop}.
In this thesis, we propose to \textbf{represent the user's experience with such a visuo-haptic augmented environment as an interaction loop}, shown in \figref{interaction-loop}.
It is based on the interaction loops of users with \ThreeD systems \cite[p.84]{laviolajr20173d}. It is based on the interaction loops of users with \ThreeD systems \cite[p.84]{laviolajr20173d}.
The \RE and the user's hand are tracked in real time by sensors and reconstructed in visual and haptic \VEs. The \RE and the user's hand are tracked in real time by sensors and reconstructed in visual and haptic \VEs.
The interactions between the virtual hand and objects are then simulated, and rendered as feedback to the user using a \AR/\VR headset and wearable haptics. The interactions between the virtual hand and objects are then simulated, and rendered as feedback to the user using an \AR/\VR headset and wearable haptics.
Because the visuo-haptic \VE is displayed in real time and aligned with the \RE, the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the \RE. Because the visuo-haptic \VE is displayed in real time and aligned with the \RE, the user is given the illusion of directly perceiving and interacting with the virtual content as if it were part of the \RE.
\fig{interaction-loop}{The interaction loop between a user and a visuo-haptic augmented environment as proposed in this thesis.}[ \fig{interaction-loop}{The interaction loop between a user and a visuo-haptic \AE as proposed in this thesis.}[
A user interacts with the visual (in blue) and haptic (in red) \VEs through a virtual hand (in purple) interaction technique that tracks real hand movements and simulates contact with virtual objects. A user interacts with the visual (in blue) and haptic (in red) \VEs through a virtual hand (in purple) interaction technique that tracks real hand movements and simulates contact with virtual objects.
The visual and haptic \VEs are rendered back using an immersive \AR headset and wearable haptics, and are perceived by the user to be registered and co-localized with the \RE (in gray). The visual and haptic \VEs are rendered back using an \AR headset and wearable haptics, and are perceived by the user to be registered and co-localized with the \RE (in gray).
%\protect\footnotemark \protect\footnotemark
] ]
In this context, we focus on two main research challenges: In this context, we focus on two main research challenges:
\textbf{(I) providing plausible and coherent visuo-haptic augmentations}, and \textbf{(I) providing plausible and coherent visuo-haptic augmentations}, and
\textbf{(II) enabling effective manipulation of the augmented environment}. \textbf{(II) enabling effective manipulation of the \AE}.
Each of these challenges also raises numerous design, technical, perceptual and user experience issues specific to wearable haptics and immersive \AR. Each of these challenges also raises numerous design, technical, perceptual and user experience issues specific to wearable haptics and \AR headsets.
%, as well as virtual rendering and user experience issues.% in integrating these two sensorimotor feedbacks into a coherent and seamless visuo-haptic augmented environment. \footnotetext{%
The icons are \href{https://creativecommons.org/licenses/by/3.0/}{CC BY} licensed:
%\footnotetext{% \enquote{\href{https://thenounproject.com/icon/finger-pointing-4230346/}{finger pointing}} by \href{https://thenounproject.com/creator/leremy/}{Gan Khoon Lay},
% The icons are \href{https://creativecommons.org/licenses/by/3.0/}{CC BY} licensed: \enquote{\href{https://thenounproject.com/icon/hololens-1499195/}{HoloLens}} by \href{https://thenounproject.com/creator/daniel2021/}{Daniel Falk}, and
% \enquote{\href{https://thenounproject.com/icon/finger-pointing-4230346/}{finger pointing}} by \href{https://thenounproject.com/creator/leremy/}{Gan Khoon Lay}, \enquote{\href{https://thenounproject.com/icon/vibration-6478365/}{vibrations}} by \href{https://thenounproject.com/creator/iconbunny/}{Iconbunny}.
% \enquote{\href{https://thenounproject.com/icon/hololens-1499195/}{HoloLens}} by \href{https://thenounproject.com/creator/daniel2021/}{Daniel Falk}, and }
% \enquote{\href{https://thenounproject.com/icon/vibration-6478365/}{vibrations}} by \href{https://thenounproject.com/creator/iconbunny/}{Iconbunny}.
%}
\subsectionstarbookmark{Challenge I: Providing Plausible and Coherent Visuo-Haptic Augmentations} \subsectionstarbookmark{Challenge I: Providing Plausible and Coherent Visuo-Haptic Augmentations}
\textbf{Many haptic devices have been designed and evaluated specifically for use in \VR}, providing the user with rich kinesthetic and tactile feedback on virtual objects, increasing the realism and effectiveness of interaction with them \cite{culbertson2018haptics}. \textbf{Many haptic devices have been designed and evaluated specifically for use in \VR}, providing the user with rich kinesthetic and tactile feedback on virtual objects, increasing the realism and effectiveness of interaction with them \cite{culbertson2018haptics}.
Although closely related, \AR and \VR have key differences in their respective renderings that can affect user perception. Although closely related, \AR and \VR headsets have key differences in their respective renderings that can affect user perception.
%As such, in \VR, visual sensations are particularly dominant in perception, and conflicts with haptic sensations are also specifically created to influence the user's perception, for example to create pseudo-haptic \cite{ujitoko2021survey} or haptic retargeting \cite{azmandian2016haptic} effects. %As such, in \VR, visual sensations are particularly dominant in perception, and conflicts with haptic sensations are also specifically created to influence the user's perception, for example to create pseudo-haptic \cite{ujitoko2021survey} or haptic retargeting \cite{azmandian2016haptic} effects.
Many hand-held or wearable haptic devices take the form of controllers, gloves or exoskeletons, all of which cover the fingertips and are therefore not suitable for \AR. Many hand-held or wearable haptic devices take the form of controllers, gloves or exoskeletons, all of which cover the fingertips and are therefore not suitable for \AR headsets.
The \textbf{user's hand must be free to touch and interact with the \RE while wearing a wearable haptic device}. The \textbf{user's hand must be free to touch and interact with the \RE while wearing a wearable haptic device}.
Instead, it is possible to place the haptic actuator close to the point of contact with the \RE, \eg providing haptic feedback on the nail \cite{ando2007fingernailmounted,teng2021touch}, another phalanx \cite{asano2015vibrotactile,detinguy2018enhancing,salazar2020altering} or the wrist \cite{pezent2019tasbi,sarac2022perceived} for rendering fingertip contact with virtual content. Instead, it is possible to place the haptic actuator close to the point of contact with the \RE, \eg providing haptic feedback on the nail \cite{ando2007fingernailmounted,teng2021touch}, another phalanx \cite{asano2015vibrotactile,detinguy2018enhancing,salazar2020altering} or the wrist \cite{pezent2019tasbi,sarac2022perceived} for rendering fingertip contact with virtual content.
Therefore, when touching a virtual or augmented object, \textbf{the real and virtual visual sensations are perceived as co-localized, but the virtual haptic feedback is not}. Therefore, when touching a virtual or augmented object, \textbf{the real and virtual visual sensations are perceived as co-localized, but the virtual haptic feedback is not}.
It remains to be investigated how such potential discrepancies affect the overall perception to design visuo-haptic augmentations adapted to \AR. It remains to be investigated how such potential discrepancies affect the overall perception to design visuo-haptic augmentations adapted to \AR headsets.
%So far, most of the \AR studies and applications only add visual and haptic sensations to the user's overall perception of the environment, but conversely it is more difficult to remove sensations. %So far, most of the \AR studies and applications only add visual and haptic sensations to the user's overall perception of the environment, but conversely it is more difficult to remove sensations.
%Visual and haptic augmentations of the \RE add sensations to the user's overall perception. %Visual and haptic augmentations of the \RE add sensations to the user's overall perception.
The \textbf{added visual and haptic virtual sensations may also be perceived as incoherent} with the sensations of the real objects, for example with a lower rendering quality, a temporal latency, a spatial shift, or a combination of these. The \textbf{added visual and haptic virtual sensations may also be perceived as incoherent} with the sensations of the real objects, for example with a lower rendering quality, a temporal latency, a spatial shift, or a combination of these.
Moreover, in \AR the user can still see the real world environment, including their hands, augmented real objects and worn haptic devices, unlike \VR where there is total control over the visual rendering. % of the hand and \VE. Moreover, with an \AR headset the user can still see the real world environment, including their hands, augmented real objects and worn haptic devices, unlike \VR where there is total control over the visual rendering. % of the hand and \VE.
It is therefore unclear to what extent the real and virtual visuo-haptic sensations will be perceived as a whole, and to what extent they will conflict or complement each other. % in the perception of the augmented environment. It is therefore unclear to what extent the real and virtual visuo-haptic sensations will be perceived as a whole, and to what extent they will conflict or complement each other.
With a better understanding of \textbf{how visual factors can influence the perception of haptic augmentations}, the many wearable haptic systems that already exist but have not yet been fully explored with \AR can be better applied, and new visuo-haptic augmentations adapted to \AR can be designed. With a better understanding of \textbf{how visual factors can influence the perception of haptic augmentations}, the many wearable haptic devices that already exist but have not yet been fully explored with \AR can be better applied, and new visuo-haptic augmentations adapted to \AR can be designed.
\subsectionstarbookmark{Challenge II: Enabling Effective Manipulation of the Augmented Environment} \subsectionstarbookmark{Challenge II: Enabling Effective Manipulation of the Augmented Environment}
@@ -185,7 +177,7 @@ When touching a visually augmenting a real object, the user's hand is physically
However, \textbf{manipulating a purely virtual object with the bare hand can be challenging}, especially without good haptic feedback \cite{maisto2017evaluation,meli2018combining}. %, and one will rely on visual and haptic feedback to guide the interaction. However, \textbf{manipulating a purely virtual object with the bare hand can be challenging}, especially without good haptic feedback \cite{maisto2017evaluation,meli2018combining}. %, and one will rely on visual and haptic feedback to guide the interaction.
In addition, wearable haptic devices are limited to cutaneous feedback, and cannot provide forces to constrain the hand contact with the virtual object \cite{pacchierotti2017wearable}. In addition, wearable haptic devices are limited to cutaneous feedback, and cannot provide forces to constrain the hand contact with the virtual object \cite{pacchierotti2017wearable}.
Current \AR systems have visual rendering limitations that also affect interaction with virtual objects. %, due to depth underestimation, a lack of mutual occlusions, and hand tracking latency. Current \AR headsets have visual rendering limitations that also affect interaction with virtual objects. %, due to depth underestimation, a lack of mutual occlusions, and hand tracking latency.
\AR is the display of superimposed images of the virtual world, synchronized with the user's current view of the real world. \AR is the display of superimposed images of the virtual world, synchronized with the user's current view of the real world.
However, the depth perception of virtual objects is often underestimated \cite{peillard2019studying,adams2022depth}. However, the depth perception of virtual objects is often underestimated \cite{peillard2019studying,adams2022depth}.
There is also often \textbf{a lack of mutual occlusions between the hand and a virtual object}, that is the hand can hide the object or be hidden by the object \cite{macedo2023occlusion}. There is also often \textbf{a lack of mutual occlusions between the hand and a virtual object}, that is the hand can hide the object or be hidden by the object \cite{macedo2023occlusion}.
@@ -199,11 +191,10 @@ Yet, it is unclear which type of visual and wearable haptic feedback, or their c
\section{Approach and Contributions} \section{Approach and Contributions}
\label{contributions} \label{contributions}
%The aim of this thesis is to understand how immersive visual and wearable haptic augmentations complement each other in the context of direct hand perception and manipulation with virtual and augmented objects. As we described in \secref{research_challenges}, providing a coherent and effective visuo-haptic \AE to a user is complex and raises many issues.
As we described in \secref{research_challenges}, providing a coherent and effective visuo-haptic augmented environment to a user is complex and raises many issues.
Our approach is to: Our approach is to:
\begin{enumerate*}[label=(\arabic*)] \begin{enumerate*}[label=(\arabic*)]
\item design immersive and wearable visuo-haptic renderings that augment both the objects being interacted with and the hand interacting with them, and \item design wearable visuo-haptic renderings that augment both the objects being interacted with and the hand interacting with them, and
\item evaluate in user studies how these visuo-haptic renderings affect the interaction of these objects with the hand using psychophysical, performance and user experience methods. \item evaluate in user studies how these visuo-haptic renderings affect the interaction of these objects with the hand using psychophysical, performance and user experience methods.
\end{enumerate*} \end{enumerate*}
@@ -227,13 +218,12 @@ Wearable haptic devices have proven effective in modifying the perception of a t
%It enables rich haptic feedback as the combination of kinesthetic sensation from the real and cutaneous sensation from the actuator. %It enables rich haptic feedback as the combination of kinesthetic sensation from the real and cutaneous sensation from the actuator.
However, wearable haptic augmentation with \AR has been little explored, as well as the visuo-haptic augmentation of texture. However, wearable haptic augmentation with \AR has been little explored, as well as the visuo-haptic augmentation of texture.
Texture is indeed one of the most fundamental perceived properties of a surface material \cite{hollins1993perceptual,okamoto2013psychophysical}, perceived equally well by sight and touch \cite{bergmanntiest2007haptic,baumgartner2013visual}, and one of the most studied haptic (only, without visual) augmentation \cite{unger2011roughness,culbertson2014modeling,asano2015vibrotactile,strohmeier2017generating,friesen2024perceived}. Texture is indeed one of the most fundamental perceived properties of a surface material \cite{hollins1993perceptual,okamoto2013psychophysical}, perceived equally well by sight and touch \cite{bergmanntiest2007haptic,baumgartner2013visual}, and one of the most studied haptic (only, without visual) augmentation \cite{unger2011roughness,culbertson2014modeling,asano2015vibrotactile,strohmeier2017generating,friesen2024perceived}.
%Coherently substitute the visuo-haptic texture of a surface directly touched by a finger is an important step towards a \AR capable of visually and haptically augmenting the \RE of a user in a plausible way. For this first axis of research, we propose to \textbf{design and evaluate the perception of wearable virtual visuo-haptic textures augmenting real surfaces}.
For this first axis of research, we propose to \textbf{design and evaluate the perception of wearable virtual visuo-haptic textures augmenting real surfaces}. %, using an immersive \AR headset and a wearable vibrotactile device.
To this end, we (1) design a system for rendering wearable visuo-haptic texture augmentations, to (2) evaluate how the perception of haptic texture augmentations is affected by visual feedback of the virtual hand and the environment, and (3) investigate the perception of co-localized visuo-haptic texture augmentations. To this end, we (1) design a system for rendering wearable visuo-haptic texture augmentations, to (2) evaluate how the perception of haptic texture augmentations is affected by visual feedback of the virtual hand and the environment, and (3) investigate the perception of co-localized visuo-haptic texture augmentations.
First, an effective approach to render haptic textures is to generate a vibrotactile signal that represents the finger-texture interaction \cite{culbertson2014modeling,asano2015vibrotactile}. First, an effective approach to render haptic textures is to generate a vibrotactile signal that represents the finger-texture interaction \cite{culbertson2014modeling,asano2015vibrotactile}.
Yet, to achieve natural interaction with the hand and coherent visuo-haptic feedback, it requires a real time rendering of the textures, no constraints on hand movements, and good synchronization between the visual and haptic feedback. Yet, to achieve natural interaction with the hand and coherent visuo-haptic feedback, it requires a real time rendering of the textures, no constraints on hand movements, and good synchronization between the visual and haptic feedback.
Thus, our first objective is to \textbf{design an immersive, real time system} that allows free exploration of \textbf{wearable visuo-haptic texture augmentations} on real surfaces with the bare hand. Thus, our first objective is to \textbf{design a real time system} that allows free exploration of \textbf{wearable visuo-haptic texture augmentations} on real surfaces with the bare hand.
This will form the basis of the next two chapters in this section. This will form the basis of the next two chapters in this section.
Second, many works have investigated the haptic augmentations of texture, but none have integrated them with \AR and \VR, or considered the influence of visual feedback on their perception. Second, many works have investigated the haptic augmentations of texture, but none have integrated them with \AR and \VR, or considered the influence of visual feedback on their perception.
@@ -241,22 +231,22 @@ Still, it is known that visual feedback can alter the perception of real and vir
Hence, our second objective is to \textbf{evaluate how the perception of wearable haptic texture augmentation is affected by the visual feedback of the virtual hand and the environment} (real, augmented or virtual). Hence, our second objective is to \textbf{evaluate how the perception of wearable haptic texture augmentation is affected by the visual feedback of the virtual hand and the environment} (real, augmented or virtual).
Finally, visuo-haptic texture databases have been created from real texture captures \cite{culbertson2014penn,balasubramanian2024sens3} to be rendered as virtual textures with hand-held haptic devices that are perceived as similar to real textures \cite{culbertson2015should,friesen2024perceived}. Finally, visuo-haptic texture databases have been created from real texture captures \cite{culbertson2014penn,balasubramanian2024sens3} to be rendered as virtual textures with hand-held haptic devices that are perceived as similar to real textures \cite{culbertson2015should,friesen2024perceived}.
However, the rendering of these textures in an immersive and natural visuo-haptic \AR using wearable haptics remains to be investigated. However, the rendering of these textures with and \AR headset and wearable haptics remains to be investigated.
Our third objective is to \textbf{evaluate the perception of simultaneous and co-localized wearable visuo-haptic texture augmentations} of real surfaces in \AR. %, and to understand to what extent each sensory modality contributes to the overall perception of the augmented texture. Our third objective is to \textbf{evaluate the perception of simultaneous and co-localized wearable visuo-haptic texture augmentations} of real surfaces in \AR. %, and to understand to what extent each sensory modality contributes to the overall perception of the augmented texture.
\subsectionstarbookmark{Axis II: Improving the Manipulation of Virtual Objects} \subsectionstarbookmark{Axis II: Improving the Manipulation of Virtual Objects}
In immersive and wearable visuo-haptic \AR, the hand is free to touch and interact seamlessly with real, augmented and virtual objects. With wearable visuo-haptic \AR, the hand is free to touch and interact seamlessly with real, augmented and virtual objects.
Hence, a user can expect natural and direct contact and manipulation of virtual objects with the bare hand. Hence, a user can expect natural and direct contact and manipulation of virtual objects with the bare hand.
However, the intangibility of the visual \VE, the display limitations of current visual \OST-\AR systems and the inherent spatial and temporal discrepancies between the user's hand actions and the visual feedback in the \VE can make interaction with virtual objects particularly challenging. However, the intangibility of the visual \VE, the display limitations of current \AR headsets and the inherent spatial and temporal discrepancies between the user's hand actions and the visual feedback in the \VE can make interaction with virtual objects particularly challenging.
%However, the intangibility of the virtual visual environment, the lack of kinesthetic feedback of wearable haptics, the visual rendering limitations of current \AR systems, as well as the spatial and temporal discrepancies between the \RE, the visual feedback, and the haptic feedback, can make the interaction with virtual objects with bare hands particularly challenging. %However, the intangibility of the virtual visual environment, the lack of kinesthetic feedback of wearable haptics, the visual rendering limitations of current \AR systems, as well as the spatial and temporal discrepancies between the \RE, the visual feedback, and the haptic feedback, can make the interaction with virtual objects with bare hands particularly challenging.
Two particular sensory feedbacks are known to improve such direct virtual object manipulation, but have not been properly investigated in immersive \AR: visual feedback of the virtual hand \cite{piumsomboon2014graspshell,prachyabrued2014visual} and delocalized haptic feedback \cite{lopes2018adding,teng2021touch}. Two particular sensory feedbacks are known to improve such direct virtual object manipulation, but have not been properly investigated with \AR headsets: visual feedback of the virtual hand \cite{piumsomboon2014graspshell,prachyabrued2014visual} and delocalized haptic feedback \cite{lopes2018adding,teng2021touch}.
For this second axis of research, we propose to \textbf{design and evaluate visuo-haptic augmentations of the hand as interaction feedback with virtual objects} in immersive \OST-\AR. For this second axis of research, we propose to \textbf{design and evaluate visuo-haptic augmentations of the hand as interaction feedback with virtual objects} in \AR.
We consider the effect on user performance and experience of (1) the visual feedback of the virtual hand as augmentation of the real hand and (2) different delocalized haptic feedback of virtual object manipulation with the hand in combination with visual hand augmentations. We consider the effect on user performance and experience of (1) the visual feedback of the virtual hand as augmentation of the real hand and (2) different delocalized haptic feedback of virtual object manipulation with the hand in combination with visual hand augmentations.
First, the visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}. First, the visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in an immersive context of virtual object manipulation \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}. % with the bare hand.% from simulating mutual occlusions between the hand and virtual objects \cite{piumsomboon2014graspshell,al-kalbani2016analysis} to displaying the virtual hand as an avatar overlay \cite{blaga2017usability,yoon2020evaluating}, augmenting the real hand. Some work has also investigated the visual feedback of the virtual hand in \AR, but not in a context of virtual object manipulation with a headset \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}. % with the bare hand.% from simulating mutual occlusions between the hand and virtual objects \cite{piumsomboon2014graspshell,al-kalbani2016analysis} to displaying the virtual hand as an avatar overlay \cite{blaga2017usability,yoon2020evaluating}, augmenting the real hand.
\OST-\AR also has significant perceptual differences from \VR due to the visibility of the real hand and environment, which can affect user experience and performance \cite{yoon2020evaluating}. \AR headsets also has significant perceptual differences from \VR due to the visibility of the real hand and environment, which can affect user experience and performance \cite{yoon2020evaluating}.
%, and these visual hand augmentations have not been evaluated . %, and these visual hand augmentations have not been evaluated .
Thus, our fourth objective is to \textbf{investigate the visual feedback of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects. Thus, our fourth objective is to \textbf{investigate the visual feedback of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects.
@@ -284,10 +274,10 @@ We then address each of our two research axes in a dedicated part.
In \textbf{\partref{perception}}, we present our contributions to the first axis of research: modifying the visuo-haptic texture perception of real surfaces. In \textbf{\partref{perception}}, we present our contributions to the first axis of research: modifying the visuo-haptic texture perception of real surfaces.
We evaluate how the visual feedback of the hand (real or virtual), the environment (\AR or \VR) and the textures (coherent, different or not shown) affect the perception of virtual vibrotactile textures rendered on real surfaces and touched directly with the index finger. We evaluate how the visual feedback of the hand (real or virtual), the environment (\AR or \VR) and the textures (coherent, different or not shown) affect the perception of virtual vibrotactile textures rendered on real surfaces and touched directly with the index finger.
In \textbf{\chapref{vhar_system}}, we design and implement a system for rendering visuo-haptic virtual textures that augment real surfaces. %, using an immersive \OST-\AR headset and a wearable vibrotactile device. In \textbf{\chapref{vhar_system}}, we design and implement a system for rendering visuo-haptic virtual textures that augment real surfaces.
The haptic textures represent a periodical patterned texture rendered by a wearable vibrotactile actuator worn on the middle phalanx of the finger touching the surface. The haptic textures represent a periodical patterned texture rendered by a wearable vibrotactile actuator worn on the middle phalanx of the finger touching the surface.
The pose estimation of the real hand and the environment is achieved using a vision-based technique. The pose estimation of the real hand and the environment is achieved using a vision-based technique.
The visual rendering is done using the immersive \OST-\AR headset Microsoft HoloLens~2. The visual rendering is done using the \OST-\AR headset Microsoft HoloLens~2.
The system allows free visual and haptic exploration of the textures, as if they were real, and forms the basis of the next two chapters. The system allows free visual and haptic exploration of the textures, as if they were real, and forms the basis of the next two chapters.
In \textbf{\chapref{xr_perception}}, we investigate in a psychophysical user study how different the perception of haptic texture augmentations is in \AR \vs \VR and when touched by a virtual hand \vs one's own hand. In \textbf{\chapref{xr_perception}}, we investigate in a psychophysical user study how different the perception of haptic texture augmentations is in \AR \vs \VR and when touched by a virtual hand \vs one's own hand.
@@ -298,7 +288,7 @@ The virtual textures are paired visual and haptic captures of real surfaces \cit
Our objective is to assess the perceived realism, coherence and roughness of the combination of nine representative visuo-haptic texture pairs. Our objective is to assess the perceived realism, coherence and roughness of the combination of nine representative visuo-haptic texture pairs.
\noindentskip \noindentskip
In \textbf{\partref{manipulation}}, we describe our contributions to the second axis of research: improving direct hand manipulation of virtual objects using visuo-haptic augmentations of the hand as interaction feedback with virtual objects in immersive \OST-\AR. In \textbf{\partref{manipulation}}, we describe our contributions to the second axis of research: improving direct hand manipulation of virtual objects using visuo-haptic augmentations of the hand as interaction feedback with virtual objects in \AR.
In \textbf{\chapref{visual_hand}}, we investigate in a user study six visual feedback as hand augmentations, as a set of the most popular hand augmentation in the \AR literature. In \textbf{\chapref{visual_hand}}, we investigate in a user study six visual feedback as hand augmentations, as a set of the most popular hand augmentation in the \AR literature.
Using the \OST-\AR headset Microsoft HoloLens~2, we evaluate their effect on user performance and experience in two representative manipulation tasks: push-and-slide and grasp-and-place a virtual object directly with the hand. Using the \OST-\AR headset Microsoft HoloLens~2, we evaluate their effect on user performance and experience in two representative manipulation tasks: push-and-slide and grasp-and-place a virtual object directly with the hand.

View File

@@ -91,13 +91,13 @@ As illustrated in \figref{sensorimotor_continuum}, \textcite{jones2006human} del
\item \emph{Gestures}, or non-prehensible skilled movements, are motor activities without constant contact with an object. Examples include pointing at a target, typing on a keyboard, accompanying speech with gestures, or signing in sign language \cite{yoon2020evaluating}. \item \emph{Gestures}, or non-prehensible skilled movements, are motor activities without constant contact with an object. Examples include pointing at a target, typing on a keyboard, accompanying speech with gestures, or signing in sign language \cite{yoon2020evaluating}.
\end{itemize} \end{itemize}
\fig[0.65]{sensorimotor_continuum}{ The sensorimotor continuum of the hand function proposed by and adapted from \textcite{jones2006human}.}[% \fig[0.65]{sensorimotor_continuum}{ The sensorimotor continuum of the hand function proposed by and adapted from \textcite{jones2006human}.}[
Functions of the hand are classified into four categories based on the relative importance of sensory and motor components. Functions of the hand are classified into four categories based on the relative importance of sensory and motor components.
\protect\footnotemark \protect\footnotemark
] ]
This classification has been further refined by \textcite{bullock2013handcentric} into 15 categories of possible hand interactions with an object. This classification has been further refined by \textcite{bullock2013handcentric} into 15 categories of possible hand interactions with an object.
In this thesis, we are interested in exploring visuo-haptic texture augmentations (\partref{perception}) and grasping of virtual objects (\partref{manipulation}) using immersive \AR and wearable haptics. In this thesis, we are interested in exploring visuo-haptic texture augmentations (\partref{perception}) and grasping of virtual objects (\partref{manipulation}) using an \AR headset and wearable haptics.
\subsubsection{Hand Anatomy and Motion} \subsubsection{Hand Anatomy and Motion}
\label{hand_anatomy} \label{hand_anatomy}
@@ -115,8 +115,8 @@ Each finger is formed by a chain of 3 phalanges, proximal, middle and distal, ex
The joints at the base of each phalanx allow flexion and extension, \ie folding and unfolding movements relative to the preceding bone. The joints at the base of each phalanx allow flexion and extension, \ie folding and unfolding movements relative to the preceding bone.
The proximal phalanges can also adduct and abduct, \ie move the fingers towards and away from each other. The proximal phalanges can also adduct and abduct, \ie move the fingers towards and away from each other.
Finally, the metacarpal of the thumb is capable of flexion/extension and adduction/abduction, which allows the thumb to oppose the other fingers. Finally, the metacarpal of the thumb is capable of flexion/extension and adduction/abduction, which allows the thumb to oppose the other fingers.
These axes of movement are called DoFs and can be represented by a \emph{kinematic model} of the hand with 27 DoFs as shown in \figref{blausen2014medical_hand}. These axes of movement are called \DoFs and can be represented by a \emph{kinematic model} of the hand with 27 \DoFs as shown in \figref{blausen2014medical_hand}.
Thus, the thumb has 5 DoFs, each of the other four fingers has 4 DoFs and the wrist has 6 DoFs and can take any position (3 DoFs) or orientation (3 DoFs) in space \cite{erol2007visionbased}. Thus, the thumb has 5 \DoFs, each of the other four fingers has 4 \DoFs and the wrist has 6 \DoFs and can take any position (3 \DoFs) or orientation (3 \DoFs) in space \cite{erol2007visionbased}.
This complex structure enables the hand to perform a wide range of movements and gestures. However, the way we explore and grasp objects follows simpler patterns, depending on the object being touched and the aim of the interaction. This complex structure enables the hand to perform a wide range of movements and gestures. However, the way we explore and grasp objects follows simpler patterns, depending on the object being touched and the aim of the interaction.

View File

@@ -239,7 +239,7 @@ More complex models were then created to more systematically capture everyday te
This led to the release of the \HaTT database, a public set of stylus recordings and models of 100 haptic textures \cite{culbertson2014one}. This led to the release of the \HaTT database, a public set of stylus recordings and models of 100 haptic textures \cite{culbertson2014one}.
A similar database, but captured from a direct touch context with the fingertip, has also recently been released \cite{balasubramanian2024sens3}. A similar database, but captured from a direct touch context with the fingertip, has also recently been released \cite{balasubramanian2024sens3}.
A common limitation of these data-driven models is that they can only render \emph{isotropic} textures: their record does not depend on the position of the measure, and the rendering is the same regardless of the direction of the movement. A common limitation of these data-driven models is that they can only render \emph{isotropic} textures: their record does not depend on the position of the measure, and the rendering is the same regardless of the direction of the movement.
This was eventually addressed to include the user's velocity direction into the capture, modelling and rendering of the textures \cite{abdulali2016datadriven,abdulali2018datadriven}. This was eventually addressed to include the user's velocity direction into the capture, modelling and rendering of the textures \cite{abdulali2016datadriven,abdulali2016datadrivena}.
%A third approach is to model %A third approach is to model
%Alternative models have been proposed to both render both isotropic and patterned textures \cite{chan2021hasti}., or to simulate the vibrations from the (visual) texture maps used to visually render a \ThreeD object \cite{chan2021hasti}. %Alternative models have been proposed to both render both isotropic and patterned textures \cite{chan2021hasti}., or to simulate the vibrations from the (visual) texture maps used to visually render a \ThreeD object \cite{chan2021hasti}.

View File

@@ -1,18 +1,9 @@
\section{Manipulating Objects with the Hands in AR} \section{Manipulating Objects with the Hands in AR}
\label{augmented_reality} \label{augmented_reality}
%As with haptic systems (\secref{wearable_haptics}), visual
\AR devices generate and integrate virtual content into the user's perception of their real environment (\RE), creating the illusion of the \emph{presence} of the virtual \cite{azuma1997survey,skarbez2021revisiting}. \AR devices generate and integrate virtual content into the user's perception of their real environment (\RE), creating the illusion of the \emph{presence} of the virtual \cite{azuma1997survey,skarbez2021revisiting}.
Immersive systems such as headsets leave the hands free to interact with virtual objects (virtual objects), promising natural and intuitive interactions similar to those with everyday real objects \cite{billinghurst2021grand,hertel2021taxonomy}. Among the different types of devices, \AR headsets leave the hands free to interact with virtual objects.
This promises natural and intuitive interactions similar to those with everyday real objects \cite{billinghurst2021grand,hertel2021taxonomy}.
%\begin{subfigs}{sutherland1968headmounted}{Photos of the first \AR system \cite{sutherland1968headmounted}. }[
% \item The \AR headset.
% \item Wireframe \ThreeD virtual objects were displayed registered in the \RE (as if there were part of it).
% ]
% \subfigsheight{45mm}
% \subfig{sutherland1970computer3}
% \subfig{sutherland1970computer2}
%\end{subfigs}
\subsection{What is Augmented Reality?} \subsection{What is Augmented Reality?}
\label{what_is_ar} \label{what_is_ar}
@@ -23,15 +14,12 @@ Fixed to the ceiling, the headset displayed a stereoscopic (one image per eye) p
\subsubsection{A Definition of AR} \subsubsection{A Definition of AR}
\label{ar_definition} \label{ar_definition}
%\footnotetext{There quite confusion in the literature and in (because of) the industry about the terms \AR and \MR. The term \MR is very often used as a synonym of \AR, or a version of \AR that enables an interaction with the virtual content. The title of this section refers to the title of the highly cited paper by \textcite{speicher2019what} that examines this debate.}
The first formal definition of \AR was proposed by \textcite{azuma1997survey}: (1) combine real and virtual, (2) be interactive in real time, and (3) register real and virtual\footnotemark. The first formal definition of \AR was proposed by \textcite{azuma1997survey}: (1) combine real and virtual, (2) be interactive in real time, and (3) register real and virtual\footnotemark.
Each of these characteristics is essential: the real-virtual combination distinguishes \AR from \VR, a movie with integrated digital content is not interactive and a \TwoD overlay like an image filter is not registered. Each of these characteristics is essential: the real-virtual combination distinguishes \AR from \VR, a movie with integrated digital content is not interactive and a 2D overlay like an image filter is not registered.
There are also two key aspects of this definition: it does not focus on technology or method, but on the user's perspective of the system experience, and it does not specify a particular human sense, \ie it can be auditory \cite{yang2022audio}, haptic \cite{bhatia2024augmenting}, or even olfactory \cite{brooks2021stereosmell} or gustatory \cite{brooks2023taste}. There are also two key aspects of this definition: it does not focus on technology or method, but on the user's perspective of the system experience, and it does not specify a particular human sense, \ie it can be auditory \cite{yang2022audio}, haptic \cite{bhatia2024augmenting}, or even olfactory \cite{brooks2021stereosmell} or gustatory \cite{brooks2023taste}.
Yet, most research has focused on visual augmentation, and the term \AR (without a prefix) is almost always understood as visual \AR. Yet, most research has focused on visual augmentation, and the term \AR (without a prefix) is almost always understood as visual \AR.
\footnotetext{This third characteristic has been slightly adapted to use the version of \textcite{marchand2016pose}, the original definition was: \enquote{registered in \ThreeD}.} \footnotetext{This third characteristic has been slightly adapted to use the version of \textcite{marchand2016pose}, the original definition was: \enquote{registered in \ThreeD}.}
%For example, \textcite{milgram1994taxonomy} proposed a taxonomy of \MR experiences based on the degree of mixing real and virtual environments, and \textcite{skarbez2021revisiting} revisited this taxonomy to include the user's perception of the experience.
\begin{subfigs}{ar_applications}{Examples of \AR applications. }[][ \begin{subfigs}{ar_applications}{Examples of \AR applications. }[][
\item Visuo-haptic surgery training with cutting into virtual soft tisues \cite{harders2009calibration}. \item Visuo-haptic surgery training with cutting into virtual soft tisues \cite{harders2009calibration}.
@@ -59,14 +47,13 @@ However, the user experience in \AR is still highly dependent on the display use
\label{ar_displays} \label{ar_displays}
To experience a virtual content combined and registered with the \RE, an output device that display the \VE to the user is necessary. To experience a virtual content combined and registered with the \RE, an output device that display the \VE to the user is necessary.
%An output device is more formally defined as an output \emph{\UI}
There is a large variety of \AR displays with different methods of combining the real and virtual content, and different locations on the \RE or the user \cite[p.126]{billinghurst2015survey}. There is a large variety of \AR displays with different methods of combining the real and virtual content, and different locations on the \RE or the user \cite[p.126]{billinghurst2015survey}.
In \emph{\VST-\AR}, the virtual images are superimposed to images of the \RE captured by a camera \cite{marchand2016pose}, and the combined real-virtual image is displayed on a screen to the user, as illustrated in \figref{itoh2022indistinguishable_vst}, \eg \figref{hartl2013mobile}. In \emph{\VST-\AR}, the virtual images are superimposed to images of the \RE captured by a camera \cite{marchand2016pose}, and the combined real-virtual image is displayed on a screen to the user, as illustrated in \figref{itoh2022indistinguishable_vst}. An example is shown in \figref{hartl2013mobile}.
This augmented view through the camera has the advantage of a complete control on the real-virtual combination such as mutual occlusion between real and virtual objects \cite{macedo2023occlusion}, coherent lighting and no delay between the real and virtual images \cite{kruijff2010perceptual}. This augmented view through the camera has the advantage of a complete control on the real-virtual combination such as mutual occlusion between real and virtual objects \cite{macedo2023occlusion}, coherent lighting and no delay between the real and virtual images \cite{kruijff2010perceptual}.
But, due to the camera and the screen, the user's view is degraded with a lower resolution, frame rate, field of view, and an overall visual latency compared to proprioception \cite{kruijff2010perceptual}. But, due to the camera and the screen, the user's view is degraded with a lower resolution, frame rate, field of view, and an overall visual latency compared to proprioception \cite{kruijff2010perceptual}.
An \emph{\OST-\AR} directly combines the virtual images with the real world view using a transparent optical system \cite{itoh2022indistinguishable} to like augmented glasses, as illustrated in \figref{itoh2022indistinguishable_ost}, \eg \figref{lee2013spacetop}. An \emph{\OST-\AR} display directly combines the virtual images with the real world view using a transparent optical system \cite{itoh2022indistinguishable} to be like augmented glasses, as illustrated in \figref{itoh2022indistinguishable_ost}. An example is shown in \figref{lee2013spacetop}.
These displays feature a direct, preserved view of the \RE at the cost of more difficult registration (spatial misalignment or temporal latency between the real and virtual content) \cite{grubert2018survey} and mutual real-virtual occlusion \cite{macedo2023occlusion}. These displays feature a direct, preserved view of the \RE at the cost of more difficult registration (spatial misalignment or temporal latency between the real and virtual content) \cite{grubert2018survey} and mutual real-virtual occlusion \cite{macedo2023occlusion}.
Finally, \emph{projection-based \AR} overlays the virtual images on the real world using a projector, as illustrated in \figref{roo2017one_2}, \eg \figref{roo2017inner}. Finally, \emph{projection-based \AR} overlays the virtual images on the real world using a projector, as illustrated in \figref{roo2017one_2}, \eg \figref{roo2017inner}.
@@ -86,16 +73,13 @@ It doesn't require the user to wear the display, but requires a real surface to
Regardless the \AR display, it can be placed at different locations \cite{bimber2005spatial}, as shown in \figref{roo2017one_1}. Regardless the \AR display, it can be placed at different locations \cite{bimber2005spatial}, as shown in \figref{roo2017one_1}.
\emph{Spatial \AR} is usually projection-based displays placed at fixed location (\figref{roo2017inner}), but it can also be \OST or \VST \emph{fixed windows} (\figref{lee2013spacetop}). \emph{Spatial \AR} is usually projection-based displays placed at fixed location (\figref{roo2017inner}), but it can also be \OST or \VST \emph{fixed windows} (\figref{lee2013spacetop}).
Alternatively, \AR displays can be \emph{hand-held}, like a \VST smartphone (\figref{hartl2013mobile}), or body-attached, like a micro-projector used as a flashlight \cite[p.141]{billinghurst2015survey}. Alternatively, \AR displays can be \emph{hand-held}, like a \VST smartphone (\figref{hartl2013mobile}), or body-attached, like a micro-projector used as a flashlight \cite[p.141]{billinghurst2015survey}.
Finally, \AR displays can be head-worn like \VR \emph{headsets} or glasses, providing a highly immersive and portable experience. Finally, \AR displays can be head-worn like \VR \emph{headsets} or glasses, providing a portable experience.
%Smartphones, shipped with sensors, computing ressources and algorithms, are the most common \AR today's displays, but research and development promise more immersive and interactive \AR with headset displays \cite{billinghurst2021grand}.
\fig[0.75]{roo2017one_1}{Locations of \AR displays from eye-worn to spatially projected. Adapted by \textcite{roo2017one} from \textcite{bimber2005spatial}.} \fig[0.75]{roo2017one_1}{Locations of \AR displays from eye-worn to spatially projected. Adapted by \textcite{roo2017one} from \textcite{bimber2005spatial}.}
\subsubsection{Presence and Embodiment in AR} \subsubsection{Presence and Embodiment in AR}
\label{ar_presence_embodiment} \label{ar_presence_embodiment}
%Despite the clear and acknowledged definition presented in \secref{ar_definition} and the viewpoint of this thesis that \AR and \VR are two type of \MR experience with different levels of mixing real and virtual environments, as presented in \secref[introduction]{visuo_haptic_augmentations}, there is still a debate on defining \AR and \MR as well as how to characterize and categorized such experiences \cite{speicher2019what,skarbez2021revisiting}.
Presence and embodiment are two key concepts that characterize the user experience in \AR and \VR. Presence and embodiment are two key concepts that characterize the user experience in \AR and \VR.
While there is a large literature on these topics in \VR, they are less defined and studied for \AR \cite{genay2022being,tran2024survey}. While there is a large literature on these topics in \VR, they are less defined and studied for \AR \cite{genay2022being,tran2024survey}.
These concepts will be useful for the design, evaluation, and discussion of our contributions: These concepts will be useful for the design, evaluation, and discussion of our contributions:
@@ -104,7 +88,8 @@ In particular, we will investigate the effect of the visual feedback of the virt
\paragraph{Presence} \paragraph{Presence}
\label{ar_presence} \label{ar_presence}
\AR and \VR are both essentially illusions as the virtual content does not physically exist but is just digitally simulated and rendered to the user's senses through display devices. \AR and \VR are both essentially illusions.
The virtual content does not actually exist physically, but is only digitally simulated and rendered to the user's senses through display devices.
Such experience of disbelief suspension in \VR is what is called \emph{presence}, and it can be decomposed into two dimensions: place illusion and plausibility \cite{slater2009place,slater2022separate}. Such experience of disbelief suspension in \VR is what is called \emph{presence}, and it can be decomposed into two dimensions: place illusion and plausibility \cite{slater2009place,slater2022separate}.
\emph{Place illusion} is the sense of the user of \enquote{being there} in the \VE (\figref{presence-vr}). \emph{Place illusion} is the sense of the user of \enquote{being there} in the \VE (\figref{presence-vr}).
It emerges from the real time rendering of the \VE from the user's perspective: to be able to move around inside the \VE and look from different point of views. It emerges from the real time rendering of the \VE from the user's perspective: to be able to move around inside the \VE and look from different point of views.
@@ -112,15 +97,12 @@ It emerges from the real time rendering of the \VE from the user's perspective:
It doesn't mean that the virtual events are realistic, \ie that reproduce the real world with high fidelity \cite{skarbez2017survey}, but that they are believable and coherent with the user's expectations. It doesn't mean that the virtual events are realistic, \ie that reproduce the real world with high fidelity \cite{skarbez2017survey}, but that they are believable and coherent with the user's expectations.
In the same way, a film can be plausible even if it is not realistic, such as a cartoon or a science-fiction movie. In the same way, a film can be plausible even if it is not realistic, such as a cartoon or a science-fiction movie.
%The \AR presence is far less defined and studied than for \VR \cite{tran2024survey}
For \AR, \textcite{slater2022separate} proposed to invert place illusion to what we can call \enquote{object illusion}, \ie the sense of the virtual object to \enquote{feels here} in the \RE (\figref{presence-ar}). For \AR, \textcite{slater2022separate} proposed to invert place illusion to what we can call \enquote{object illusion}, \ie the sense of the virtual object to \enquote{feels here} in the \RE (\figref{presence-ar}).
As with \VR, virtual objects must be able to be seen from different angles by moving the head, but also, this is more difficult, appear to be coherent enough with the \RE \cite{skarbez2021revisiting}, \eg occlude or be occluded by real objects \cite{macedo2023occlusion}, cast shadows or reflect lights. As with \VR, virtual objects must be able to be seen from different angles by moving the head, but also, this is more difficult, appear to be coherent enough with the \RE \cite{skarbez2021revisiting}, \eg occlude or be occluded by real objects \cite{macedo2023occlusion}, cast shadows or reflect lights.
The plausibility can be applied to \AR as is, but the virtual objects must additionally have knowledge of the \RE and react accordingly to it to be, again, perceived as coherently behaving with the real world \cite{skarbez2021revisiting}. The plausibility can be applied to \AR as is, but the virtual objects must additionally have knowledge of the \RE and react accordingly to it to be, again, perceived as coherently behaving with the real world \cite{skarbez2021revisiting}.
%\textcite{skarbez2021revisiting} also named place illusion for \AR as \enquote{immersion} and plausibility as \enquote{coherence}, and these terms will be used in the remainder of this thesis.
%One main issue with presence is how to measure it both in \VR \cite{slater2022separate} and \AR \cite{tran2024survey}.
\begin{subfigs}{presence}{ \begin{subfigs}{presence}{
The sense of immersion in virtual and augmented environments. Adapted from \textcite{stevens2002putting}. The sense of immersion in virtual and \AEs. Adapted from \textcite{stevens2002putting}.
}[][ }[][
\item Place illusion is the sense of the user of \enquote{being there} in the \VE. \item Place illusion is the sense of the user of \enquote{being there} in the \VE.
\item Objet illusion is the sense of the virtual object to \enquote{feels here} in the \RE. \item Objet illusion is the sense of the virtual object to \enquote{feels here} in the \RE.
@@ -142,7 +124,7 @@ In \AR, it could take the form of body accessorization, \eg wearing virtual clot
\subsection{Direct Hand Manipulation in AR} \subsection{Direct Hand Manipulation in AR}
\label{ar_interaction} \label{ar_interaction}
A user in \AR must be able to interact with the virtual content to fulfil the second point of \textcite{azuma1997survey}'s definition (\secref{ar_definition}) and complete our proposed visuo-haptic interaction loop (\figref[introduction]{interaction-loop}). %, \eg through a hand-held controller, a real object, or even directly with the hands. A user in \AR must be able to interact with the virtual content to fulfil the second point of \textcite{azuma1997survey}'s definition (\secref{ar_definition}) and complete our proposed visuo-haptic interaction loop (\figref[introduction]{interaction-loop}).
In all examples of \AR applications shown in \secref{ar_applications}, the user interacts with the \VE using their hands, either directly or through a physical interface. In all examples of \AR applications shown in \secref{ar_applications}, the user interacts with the \VE using their hands, either directly or through a physical interface.
\subsubsection{User Inputs and Interaction Techniques} \subsubsection{User Inputs and Interaction Techniques}
@@ -150,25 +132,23 @@ In all examples of \AR applications shown in \secref{ar_applications}, the user
For a user to interact with a computer system (desktop, mobile, \AR, etc.), they first perceive the state of the system and then acts upon it through an input device \cite[p.145]{laviolajr20173d}. For a user to interact with a computer system (desktop, mobile, \AR, etc.), they first perceive the state of the system and then acts upon it through an input device \cite[p.145]{laviolajr20173d}.
Such input devices form an input \emph{\UI} that captures and translates user's actions to the computer. Such input devices form an input \emph{\UI} that captures and translates user's actions to the computer.
Similarly, an output \UI render and display the state of the system to the user (such as a \AR/\VR display, \secref{ar_displays}, or an haptic actuator, \secref{wearable_haptic_devices}). Similarly, an output \UI render and display the state of the system to the user (such as an \AR/\VR display, \secref{ar_displays}, or an haptic actuator, \secref{wearable_haptic_devices}).
Inputs \UI can be either an \emph{active sensing}, a held or worn device, such as a mouse, a touch screen, or a hand-held controller, or a \emph{passive sensing}, that does not require a contact, such as eye trackers, voice recognition, or hand tracking \cite[p.294]{laviolajr20173d}. Inputs \UI can be either an \emph{active sensing}, \ie a held or worn device, such as a mouse, a touch screen, or a hand-held controller, or a \emph{passive sensing}, that does not require a contact, such as eye trackers, voice recognition, or hand tracking \cite[p.294]{laviolajr20173d}.
The captured information from the sensors is then translated into actions within the computer system by an \emph{interaction technique}. %(\figref{interaction-technique}). The captured information from the sensors is then translated into actions within the computer system by an \emph{interaction technique}.
For example, a cursor on a screen can be moved using either with a mouse or with the arrow keys on a keyboard, or a two-finger swipe on a touchscreen can be used to scroll or zoom an image. For example, a cursor on a screen can be moved using either with a mouse or with the arrow keys on a keyboard, or a two-finger swipe on a touchscreen can be used to scroll or zoom an image.
Choosing useful and efficient \UIs and interaction techniques is crucial for the user experience and the tasks that can be performed within the system. Choosing useful and efficient \UIs and interaction techniques is crucial for the user experience and the tasks that can be performed within the system.
%\fig[0.5]{interaction-technique}{An interaction technique map user inputs to actions within a computer system. Adapted from \textcite{billinghurst2005designing}.}
\subsubsection{Tasks with Virtual Environments} \subsubsection{Tasks with Virtual Environments}
\label{ve_tasks} \label{ve_tasks}
\textcite{laviolajr20173d} (p.385) classify interaction techniques into three categories based on the tasks they enable users to perform: manipulation, navigation, and system control. \textcite{laviolajr20173d} (p.385) classify interaction techniques into three categories based on the tasks they enable users to perform: manipulation, navigation, and system control.
\textcite{hertel2021taxonomy} proposed a similar taxonomy of interaction techniques specifically for immersive \AR. \textcite{hertel2021taxonomy} proposed a similar taxonomy of interaction techniques specifically for \AR headsets.
The \emph{manipulation tasks} are the most fundamental tasks in \AR and \VR systems, and the building blocks for more complex interactions. The \emph{manipulation tasks} are the most fundamental tasks in \AR and \VR systems, and the building blocks for more complex interactions.
\emph{Selection} is the identification or acquisition of a specific virtual object, \eg pointing at a target as in \figref{grubert2015multifi}, touching a button with a finger, or grasping an object with a hand. \emph{Selection} is the identification or acquisition of a specific virtual object, \eg pointing at a target as in \figref{grubert2015multifi}, touching a button with a finger, or grasping an object with a hand.
\emph{Positioning} and \emph{rotation} of a selected object are the change of its position and orientation in \ThreeD space respectively. \emph{Positioning} and \emph{rotation} of a selected object are the change of its position and orientation in \ThreeD space respectively.
It is also common to \emph{resize} a virtual object to change its size. It is also common to \emph{resize} a virtual object to change its size (\figref{piumsomboon2013userdefined_2}).
These three operations are geometric (rigid) manipulations of the object: they do not change its shape. These three operations are geometric (rigid) manipulations of the object: they do not change its shape.
The \emph{navigation tasks} are the movements of the user within the \VE. The \emph{navigation tasks} are the movements of the user within the \VE.
@@ -177,33 +157,33 @@ Wayfinding is the cognitive planning of the movement, such as path finding or ro
The \emph{system control tasks} are changes to the system state through commands or menus such as creating, deleting, or modifying virtual objects, \eg as in \figref{roo2017onea}. It is also the input of text, numbers, or symbols. The \emph{system control tasks} are changes to the system state through commands or menus such as creating, deleting, or modifying virtual objects, \eg as in \figref{roo2017onea}. It is also the input of text, numbers, or symbols.
\comans{JG}{In, Figure 2.24 I suggest removing d. or presenting it as separate figure as it shows no interaction technique (The caption is “Interaction techniques in AR” but a visualization of a spatial registration technique).}{It has been removed and replaced by an example of resizing a virtual object.}
In this thesis we focus on manipulation tasks of virtual content directly with the hands, more specifically on touching visuo-haptic textures with a finger (\partref{perception}) and positioning and rotating virtual objects pushed and grasp by the hand (\partref{manipulation}). In this thesis we focus on manipulation tasks of virtual content directly with the hands, more specifically on touching visuo-haptic textures with a finger (\partref{perception}) and positioning and rotating virtual objects pushed and grasp by the hand (\partref{manipulation}).
\begin{subfigs}{interaction-techniques}{Interaction techniques in \AR. }[][ \begin{subfigs}{interaction-techniques}{Interaction techniques in \AR. }[][
\item Spatial selection of virtual item of an extended display using a hand-held smartphone \cite{grubert2015multifi}. \item Spatial selection of virtual item of an extended display using a hand-held smartphone \cite{grubert2015multifi}.
\item Resizing a virtual object with a bimanual gesture \cite{piumsomboon2013userdefined}.
\item Displaying as an overlay registered on the \RE the route to follow \cite{grubert2017pervasive}. \item Displaying as an overlay registered on the \RE the route to follow \cite{grubert2017pervasive}.
\item Virtual drawing on a real object with a hand-held pen \cite{roo2017onea}. \item Virtual drawing on a real object with a hand-held pen \cite{roo2017onea}.
\item Simultaneous Localization and Mapping (SLAM) algorithms such as KinectFusion \cite{newcombe2011kinectfusion} reconstruct the \RE in real time and enables to register the \VE in it.
] ]
\subfigsheight{35.5mm} \subfigsheight{35.5mm}
\subfigbox{grubert2015multifi} \subfigbox{grubert2015multifi}
\subfigbox{piumsomboon2013userdefined_2}
\subfigbox{grubert2017pervasive} \subfigbox{grubert2017pervasive}
\subfigbox{roo2017onea} \subfigbox{roo2017onea}
\subfigbox{newcombe2011kinectfusion}
\end{subfigs} \end{subfigs}
\subsubsection{The Gap between Real and Virtual} \subsubsection{The Gap between Real and Virtual}
\label{real_virtual_gap} \label{real_virtual_gap}
In \AR and \VR, the state of the system is displayed to the user as a \ThreeD spatial \VE. In \AR and \VR, the state of the system is displayed to the user as a \ThreeD spatial \VE.
In an immersive and portable \AR system, this \VE is experienced at a 1:1 scale and as an integral part of the \RE. With an \AR headset, the \VE can be experienced at a 1:1 scale and as an integral part of the \RE.
The rendering gap between the real and virtual elements, as described on our interaction loop in \figref[introduction]{interaction-loop}, is thus experienced as narrow or even not consciously perceived by the user. The rendering gap between the real and virtual elements, as described on our interaction loop in \figref[introduction]{interaction-loop}, is thus experienced as narrow or even not consciously perceived by the user.
This manifests as a sense of presence of the virtual, as described in \secref{ar_presence}. This manifests as a sense of presence of the virtual, as described in \secref{ar_presence}.
As the gap between real and virtual rendering is reduced, one could expect a similar and seamless interaction with the \VE as with a \RE, which \textcite{jacob2008realitybased} called \emph{reality based interactions}. As the gap between real and virtual rendering is reduced, one could expect a similar and seamless interaction with the \VE as with a \RE, which \textcite{jacob2008realitybased} called \emph{reality based interactions}.
As of today, an immersive \AR system tracks itself with the user in \ThreeD, using tracking sensors and pose estimation algorithms \cite{marchand2016pose}, \eg as in \figref{newcombe2011kinectfusion}. As of today, an \AR system tracks itself with the user in \ThreeD, using tracking sensors and pose estimation algorithms \cite{marchand2016pose}.
It enables the \VE to be registered with the \RE and the user simply moves to navigate within the virtual content. It enables the \VE to be registered with the \RE and the user simply moves to navigate within the virtual content.
%This tracking and mapping of the user and \RE into the \VE is named the \enquote{extent of world knowledge} by \textcite{skarbez2021revisiting}, \ie to what extent the \AR system knows about the \RE and is able to respond to changes in it.
However, direct hand manipulation of virtual content is a challenge that requires specific interaction techniques \cite{billinghurst2021grand}. However, direct hand manipulation of virtual content is a challenge that requires specific interaction techniques \cite{billinghurst2021grand}.
It is often achieved using two interaction techniques: \emph{tangible objects} and \emph{virtual hands} \cite[p.165]{billinghurst2015survey}. It is often achieved using two interaction techniques: \emph{tangible objects} and \emph{virtual hands} \cite[p.165]{billinghurst2015survey}.
@@ -211,10 +191,8 @@ It is often achieved using two interaction techniques: \emph{tangible objects} a
\label{ar_tangibles} \label{ar_tangibles}
As \AR integrates visual virtual content into \RE perception, it can involve real surrounding objects as \UI: to either visually augment them (\figref{roo2017inner}), or to use them as physical proxies to support interaction with virtual objects \cite{ishii1997tangible}. As \AR integrates visual virtual content into \RE perception, it can involve real surrounding objects as \UI: to either visually augment them (\figref{roo2017inner}), or to use them as physical proxies to support interaction with virtual objects \cite{ishii1997tangible}.
%According to \textcite{billinghurst2005designing}
Each virtual object is coupled to a real object and physically manipulated through it, providing a direct, efficient and seamless interaction with both the real and virtual content \cite{billinghurst2005designing}. Each virtual object is coupled to a real object and physically manipulated through it, providing a direct, efficient and seamless interaction with both the real and virtual content \cite{billinghurst2005designing}.
The real objects are called \emph{tangible} in this usage context. The real objects are called \emph{tangible} in this usage context.
%This technique is similar to mapping the movements of a mouse to a virtual cursor on a screen.
Methods have been developed to automatically pair and adapt the virtual objects for rendering with available tangibles of similar shape and size \cite{hettiarachchi2016annexing,jain2023ubitouch} (\figref{jain2023ubitouch}). Methods have been developed to automatically pair and adapt the virtual objects for rendering with available tangibles of similar shape and size \cite{hettiarachchi2016annexing,jain2023ubitouch} (\figref{jain2023ubitouch}).
The issue with these \emph{space-multiplexed} interfaces is the large number and variety of tangibles required. The issue with these \emph{space-multiplexed} interfaces is the large number and variety of tangibles required.
@@ -245,12 +223,11 @@ Similarly, in \secref{tactile_rendering} we described how a material property (\
\subsubsection{Manipulating with Virtual Hands} \subsubsection{Manipulating with Virtual Hands}
\label{ar_virtual_hands} \label{ar_virtual_hands}
%can track the user's movements and use them as inputs to the \VE \textcite[p.172]{billinghurst2015survey}.
Initially tracked by active sensing devices such as gloves or controllers, it is now possible to track hands in real time using passive sensing (\secref{interaction_techniques}) and computer vision algorithms natively integrated into \AR/\VR headsets \cite{tong2023survey}. Initially tracked by active sensing devices such as gloves or controllers, it is now possible to track hands in real time using passive sensing (\secref{interaction_techniques}) and computer vision algorithms natively integrated into \AR/\VR headsets \cite{tong2023survey}.
Our hands allow us to manipulate real everyday objects (\secref{grasp_types}), hence virtual hand interaction techniques seem to be the most natural way to manipulate virtual objects \cite[p.400]{laviolajr20173d}. Our hands allow us to manipulate real everyday objects (\secref{grasp_types}), hence virtual hand interaction techniques seem to be the most natural way to manipulate virtual objects \cite[p.400]{laviolajr20173d}.
The user's hand being tracked is reconstructed as a \emph{virtual hand} model in the \VE \cite[p.405]{laviolajr20173d}. The user's hand being tracked is reconstructed as a \emph{virtual hand} model in the \VE \cite[p.405]{laviolajr20173d}.
The simplest models represent the hand as a rigid \ThreeD object that follows the movements of the real hand with \qty{6}{DoF} (position and orientation in space) \cite{talvas2012novel}. The simplest models represent the hand as a rigid \ThreeD object that follows the movements of the real hand with 6 \DoF (position and orientation in space) \cite{talvas2012novel}.
An alternative is to model only the fingertips (\figref{lee2007handy}) or the whole hand (\figref{hilliges2012holodesk_1}) as points. An alternative is to model only the fingertips (\figref{lee2007handy}) or the whole hand (\figref{hilliges2012holodesk_1}) as points.
The most common technique is to reconstruct all the phalanges of the hand in an articulated kinematic model (\secref{hand_anatomy}) \cite{borst2006spring}. The most common technique is to reconstruct all the phalanges of the hand in an articulated kinematic model (\secref{hand_anatomy}) \cite{borst2006spring}.
@@ -284,7 +261,6 @@ While a visual feedback of the virtual hand in \VR can compensate for these issu
\subsection{Visual Feedback of Virtual Hands in AR} \subsection{Visual Feedback of Virtual Hands in AR}
\label{ar_visual_hands} \label{ar_visual_hands}
%In \VR, since the user is fully immersed in the \VE and cannot see their real hands, it is necessary to represent them virtually (\secref{ar_embodiment}).
When interacting with a physics-based virtual hand method (\secref{ar_virtual_hands}) in \VR, the visual feedback of the virtual hand has an influence on perception, interaction performance, and preference of users \cite{prachyabrued2014visual,argelaguet2016role,grubert2018effects,schwind2018touch}. When interacting with a physics-based virtual hand method (\secref{ar_virtual_hands}) in \VR, the visual feedback of the virtual hand has an influence on perception, interaction performance, and preference of users \cite{prachyabrued2014visual,argelaguet2016role,grubert2018effects,schwind2018touch}.
In a pick-and-place manipulation task in \VR, \textcite{prachyabrued2014visual} and \textcite{canales2019virtual} found that the visual hand feedback whose motion was constrained to the surface of the virtual objects similar as to \textcite{borst2006spring} (\enquote{Outer Hand} in \figref{prachyabrued2014visual}) performed the worst, while the visual hand feedback following the tracked human hand (thus penetrating the virtual objects, \enquote{Inner Hand} in \figref{prachyabrued2014visual}) performed the best, though it was rather disliked. In a pick-and-place manipulation task in \VR, \textcite{prachyabrued2014visual} and \textcite{canales2019virtual} found that the visual hand feedback whose motion was constrained to the surface of the virtual objects similar as to \textcite{borst2006spring} (\enquote{Outer Hand} in \figref{prachyabrued2014visual}) performed the worst, while the visual hand feedback following the tracked human hand (thus penetrating the virtual objects, \enquote{Inner Hand} in \figref{prachyabrued2014visual}) performed the best, though it was rather disliked.
\textcite{prachyabrued2014visual} also found that the best compromise was a double feedback, showing both the virtual hand and the tracked hand (\enquote{2-Hand} in \figref{prachyabrued2014visual}). \textcite{prachyabrued2014visual} also found that the best compromise was a double feedback, showing both the virtual hand and the tracked hand (\enquote{2-Hand} in \figref{prachyabrued2014visual}).
@@ -294,13 +270,7 @@ A visual hand feedback while in \VE also seems to affect how one grasps an objec
\fig{prachyabrued2014visual}{Visual hand feedback affect user experience in \VR \cite{prachyabrued2014visual}.} \fig{prachyabrued2014visual}{Visual hand feedback affect user experience in \VR \cite{prachyabrued2014visual}.}
Conversely, a user sees their own hands in \AR, and the mutual occlusion between the hands and the virtual objects is a common issue (\secref{ar_displays}), \ie hiding the virtual object when the real hand is in front of it, and hiding the real hand when it is behind the virtual object (\figref{hilliges2012holodesk_2}). Conversely, a user sees their own hands in \AR, and the mutual occlusion between the hands and the virtual objects is a common issue (\secref{ar_displays}), \ie hiding the virtual object when the real hand is in front of it, and hiding the real hand when it is behind the virtual object (\figref{hilliges2012holodesk_2}).
%For example, in \figref{hilliges2012holodesk_2}, the user is pinching a virtual cube in \OST-\AR with their thumb and index fingers, but while the index is behind the cube, it is seen as in front of it. While in \VST-\AR, this could be solved as a masking problem by combining the real and virtual images \cite{battisti2018seamless}, \eg in \figref{suzuki2014grasping}, in \OST-\AR, this is much more difficult because the \VE is displayed as a transparent 2D image on top of the \ThreeD \RE, which cannot be easily masked \cite{macedo2023occlusion}.
While in \VST-\AR, this could be solved as a masking problem by combining the real and virtual images \cite{battisti2018seamless}, \eg in \figref{suzuki2014grasping}, in \OST-\AR, this is much more difficult because the \VE is displayed as a transparent \TwoD image on top of the \ThreeD \RE, which cannot be easily masked \cite{macedo2023occlusion}.
%Yet, even in \VST-\AR,
%An alternative is to render the virtual objects and the virtual hand semi-transparents, so that they are partially visible even when one is occluding the other (\figref{buchmann2005interaction}).
%Although perceived as less natural, this seems to be preferred to a mutual visual occlusion in \VST-\AR \cite{buchmann2005interaction,ha2014wearhand,piumsomboon2014graspshell} and \VR \cite{vanveldhuizen2021effect}, but has not yet been evaluated in \OST-\AR.
%However, this effect still causes depth conflicts that make it difficult to determine if one's hand is behind or in front of a virtual object, \eg the thumb is in front of the virtual cube, but could be perceived to be behind it.
Since the \VE is intangible, adding a visual feedback of the virtual hand in \AR that is physically constrained to the virtual objects would achieve a similar result to the double-hand feedback of \textcite{prachyabrued2014visual}. Since the \VE is intangible, adding a visual feedback of the virtual hand in \AR that is physically constrained to the virtual objects would achieve a similar result to the double-hand feedback of \textcite{prachyabrued2014visual}.
A virtual object overlaying a real object in \OST-\AR can vary in size and shape without degrading user experience or manipulation performance \cite{kahl2021investigation,kahl2023using}. A virtual object overlaying a real object in \OST-\AR can vary in size and shape without degrading user experience or manipulation performance \cite{kahl2021investigation,kahl2023using}.
@@ -308,16 +278,11 @@ This suggests that a visual hand feedback superimposed on the real hand as a par
Few works have compared different visual feedback of the virtual hand in \AR or with wearable haptic feedback. Few works have compared different visual feedback of the virtual hand in \AR or with wearable haptic feedback.
Rendering the real hand as a semi-transparent hand in \VST-\AR is perceived as less natural but seems to be preferred to a mutual visual occlusion for interaction with real and virtual objects \cite{buchmann2005interaction,piumsomboon2014graspshell}. Rendering the real hand as a semi-transparent hand in \VST-\AR is perceived as less natural but seems to be preferred to a mutual visual occlusion for interaction with real and virtual objects \cite{buchmann2005interaction,piumsomboon2014graspshell}.
%Although perceived as less natural, this seems to be preferred to a mutual visual occlusion in \VST-\AR \cite{buchmann2005interaction,ha2014wearhand,piumsomboon2014graspshell} and \VR \cite{vanveldhuizen2021effect}, but has not yet been evaluated in \OST-\AR. Similarly, \textcite{blaga2017usability} evaluated direct hand manipulation in \VST-\AR with a skeleton-like rendering \vs no visual hand feedback: while user performance did not improve, participants felt more confident with the virtual hand (\figref{blaga2017usability}).
Similarly, \textcite{blaga2017usability} evaluated direct hand manipulation in non-immersive \VST-\AR with a skeleton-like rendering \vs no visual hand feedback: while user performance did not improve, participants felt more confident with the virtual hand (\figref{blaga2017usability}). In a collaborative task in \OST-\AR \vs \VR headsets, \textcite{yoon2020evaluating} showed that a realistic human hand rendering was the most preferred over a low-polygon hand and a skeleton-like hand for the remote partner.
%\textcite{krichenbauer2018augmented} found that participants were \percent{22} faster in immersive \VST-\AR than in \VR in the same pick-and-place manipulation task, but no visual hand rendering was used in \VR while the real hand was visible in \AR.
In a collaborative task in immersive \OST-\AR \vs \VR, \textcite{yoon2020evaluating} showed that a realistic human hand rendering was the most preferred over a low-polygon hand and a skeleton-like hand for the remote partner.
\textcite{genay2021virtual} found that the sense of embodiment with robotic hands overlay in \OST-\AR was stronger when the environment contained both real and virtual objects (\figref{genay2021virtual}). \textcite{genay2021virtual} found that the sense of embodiment with robotic hands overlay in \OST-\AR was stronger when the environment contained both real and virtual objects (\figref{genay2021virtual}).
Finally, \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the visual and haptic feedback of the hand in \VST-\AR, as detailed in the next section (\secref{vhar_rings}). Finally, \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the visual and haptic feedback of the hand in \VST-\AR, as detailed in the next section (\secref{vhar_rings}).
Taken together, these results suggest that a visual augmentation of the hand in \AR could improve usability and performance in direct hand manipulation tasks, but the best rendering has yet to be determined. Taken together, these results suggest that a visual augmentation of the hand in \AR could improve usability and performance in direct hand manipulation tasks, but the best rendering has yet to be determined.
%\cite{chan2010touching} : cues for touching (selection) virtual objects.
%\textcite{saito2021contact} found that masking the real hand with a textured \ThreeD opaque virtual hand did not improve performance in a reach-to-grasp task but displaying the points of contact on the virtual object did.
%To the best of our knowledge, evaluating the role of a visual rendering of the hand displayed \enquote{and seen} directly above real tracked hands in immersive OST-AR has not been explored, particularly in the context of virtual object manipulation.
\begin{subfigs}{visual-hands}{Visual feedback of the virtual hand in \AR. }[][ \begin{subfigs}{visual-hands}{Visual feedback of the virtual hand in \AR. }[][
\item Grasping a virtual object in \OST-\AR with no visual hand feedback \cite{hilliges2012holodesk}. \item Grasping a virtual object in \OST-\AR with no visual hand feedback \cite{hilliges2012holodesk}.
@@ -332,7 +297,6 @@ Taken together, these results suggest that a visual augmentation of the hand in
\subfig{buchmann2005interaction} \subfig{buchmann2005interaction}
\subfig{blaga2017usability} \subfig{blaga2017usability}
\subfig{genay2021virtual} \subfig{genay2021virtual}
%\subfig{yoon2020evaluating}
\end{subfigs} \end{subfigs}
\subsection{Conclusion} \subsection{Conclusion}
@@ -340,8 +304,8 @@ Taken together, these results suggest that a visual augmentation of the hand in
\AR systems integrate virtual content into the user's perception as if it were part of the \RE. \AR systems integrate virtual content into the user's perception as if it were part of the \RE.
\AR headsets now enable real-time pose estimation of the head and hands, and high-quality display of virtual content, while being portable and mobile. \AR headsets now enable real-time pose estimation of the head and hands, and high-quality display of virtual content, while being portable and mobile.
They enable highly immersive augmented environments that users can explore with a strong sense of the presence of the virtual content. They create \AEs that users can explore with a strong sense of the presence of the virtual content.
However, without direct and seamless interaction with the virtual objects using the hands, the coherence of the augmented environment experience is compromised. However, without direct and seamless interaction with the virtual objects using the hands, the coherence of the \AE experience is compromised.
In particular, when manipulating virtual objects in \OST-\AR, there is a lack of mutual occlusion and interaction cues between the hands and the virtual content, which could be mitigated by a visual augmentation of the hand. In particular, when manipulating virtual objects in \OST-\AR, there is a lack of mutual occlusion and interaction cues between the hands and the virtual content, which could be mitigated by a visual augmentation of the hand.
A common alternative approach is to use real objects as proxies for interaction with virtual objects, but this raises concerns about their coherence with visual augmentations. A common alternative approach is to use real objects as proxies for interaction with virtual objects, but this raises concerns about their coherence with visual augmentations.
In this context, the use of wearable haptic systems worn on the hand seems to be a promising solution both for improving direct hand manipulation of virtual objects and for coherent visuo-haptic augmentation of touched real objects. In this context, the use of wearable haptic systems worn on the hand seems to be a promising solution both for improving direct hand manipulation of virtual objects and for coherent visuo-haptic augmentation of touched real objects.

View File

@@ -5,10 +5,7 @@ Perception and manipulation of objects with the hand typically involves both the
Each sense has unique capabilities for perceiving certain object properties, such as color for vision or temperature for touch, but they are equally capable for many properties, such as roughness, hardness, or geometry \cite{baumgartner2013visual}. Each sense has unique capabilities for perceiving certain object properties, such as color for vision or temperature for touch, but they are equally capable for many properties, such as roughness, hardness, or geometry \cite{baumgartner2013visual}.
Both \AR and wearable haptic systems integrate virtual content into the user's perception as sensory illusions. Both \AR and wearable haptic systems integrate virtual content into the user's perception as sensory illusions.
It is essential to understand how a visuo-haptic rendering of a virtual object is perceived as a coherent object property, and how wearable haptics have been integrated with immersive \AR.%, especially in immersive \AR where the haptic actuator is moved away so as not to cover the inside of the hand. It is essential to understand how a visuo-haptic rendering of a virtual object is perceived as a coherent object property, and how wearable haptics have been integrated with \AR headsets.
% spatial and temporal integration of visuo-haptic feedback as perceptual cues vs proprioception and real touch sensations
% delocalized : not at the point of contact = difficult to integrate with other perceptual cues ?
\subsection{Visuo-Haptic Perception of Virtual and Augmented Objects} \subsection{Visuo-Haptic Perception of Virtual and Augmented Objects}
\label{vh_perception} \label{vh_perception}
@@ -36,32 +33,23 @@ and the integrated variance $\sigma^2$ is the inverse of the sum of the individu
\end{equation} \end{equation}
This was demonstrated by \textcite{ernst2002humans} in a user study where participants estimated the height of a virtual bar using a fixed-window \OST-\AR display (\secref{ar_displays}) and force-feedback devices worn on the thumb and index finger (\secref{wearability_level}), as shown in \figref{ernst2002humans_setup}. This was demonstrated by \textcite{ernst2002humans} in a user study where participants estimated the height of a virtual bar using a fixed-window \OST-\AR display (\secref{ar_displays}) and force-feedback devices worn on the thumb and index finger (\secref{wearability_level}), as shown in \figref{ernst2002humans_setup}.
%They first measured the individual variances of the visual and haptic estimates (\figref{ernst2002humans_within}) and then the combined variance of the visuo-haptic estimates (\figref{ernst2002humans_visuo-haptic}).
On each trial, participants compared the visuo-haptic reference bar (of a fixed height) to a visuo-haptic comparison bar (of a variable height) in a \TIFC task (one bar is tested first, a pause, then the other) and indicated which was taller. On each trial, participants compared the visuo-haptic reference bar (of a fixed height) to a visuo-haptic comparison bar (of a variable height) in a \TIFC task (one bar is tested first, a pause, then the other) and indicated which was taller.
The reference bar had different conflicting visual $s_v$ and haptic $s_h$ heights, and different noise levels were added to the visual feedback to increase its variance. The reference bar had different conflicting visual $s_v$ and haptic $s_h$ heights, and different noise levels were added to the visual feedback to increase its variance.
The objective was to determine a \PSE between the comparison and reference bars, where the participant was equally likely to choose one or the other (\percent{50} of the trials). The objective was to determine a \PSE between the comparison and reference bars, where the participant was equally likely to choose one or the other (\percent{50} of the trials).
%\figref{ernst2002humans_within} shows the discrimination of participants with only the haptic or visual feedback, and how much the estimation becomes difficult (thus higher variance) when noise is added to the visual feedback.
\figref{ernst2004merging_results} shows that when the visual noise was low, the visual feedback had more weight, but as visual noise increased, haptic feedback gained more weight, as predicted by the \MLE model. \figref{ernst2004merging_results} shows that when the visual noise was low, the visual feedback had more weight, but as visual noise increased, haptic feedback gained more weight, as predicted by the \MLE model.
\begin{subfigs}{ernst2002humans}{ \begin{subfigs}{ernst2002humans}{
Visuo-haptic perception of height of a virtual bar \cite{ernst2002humans}. Visuo-haptic perception of height of a virtual bar \cite{ernst2002humans}.
}[][ }[][
\item Experimental setup.%: Participants estimated height visually with an \OST-\AR display and haptically with force-feedback devices worn on the thumb and index fingers. \item Experimental setup.
%\item with only haptic feedback (red) or only visual feedback (blue, with different added noise),
%\item combined visuo-haptic feedback (purple, with different visual noises).
\item Proportion of trials (vertical axis) where the comparison bar (horizontal axis) was perceived taller than the reference bar as function of increase variance (inverse of reliability) of the visual feedback (colors). \item Proportion of trials (vertical axis) where the comparison bar (horizontal axis) was perceived taller than the reference bar as function of increase variance (inverse of reliability) of the visual feedback (colors).
The reference had different conflicting visual $s_v$ and haptic $s_h$ heights. The reference had different conflicting visual $s_v$ and haptic $s_h$ heights.
] ]
\subfig[.34]{ernst2002humans_setup} \subfig[.34]{ernst2002humans_setup}
\subfig[.64]{ernst2004merging_results} \subfig[.64]{ernst2004merging_results}
%\subfig{ernst2002humans_within}
%\subfig{ernst2002humans_visuo-haptic}
\end{subfigs} \end{subfigs}
%Hence, the \MLE model explains how a (visual) virtual object in \AR can be perceived as coherent when combined with real haptic sensations of a tangible or a wearable haptic feedback.
The \MLE model implies that when seeing and touching a virtual object in \AR, the combination of visual and haptic stimuli, real or virtual, presented to the user can be perceived as a coherent single object property. The \MLE model implies that when seeing and touching a virtual object in \AR, the combination of visual and haptic stimuli, real or virtual, presented to the user can be perceived as a coherent single object property.
%As long as the user is able to associate the sensations as the same object property, and even if there are discrepancies between the sensations, the overall perception can be influenced by changing one of the stimuli, as discussed in the next sections.
%for example by including tangible objects, wearable haptic feedback, or even by altering the visual rendering of the virtual object, as discussed in the next sections.
\subsubsection{Influence of Visual Rendering on Haptic Perception} \subsubsection{Influence of Visual Rendering on Haptic Perception}
\label{visual_haptic_influence} \label{visual_haptic_influence}
@@ -72,10 +60,9 @@ More precisely, when surfaces are evaluated by vision or touch alone, both sense
The overall perception can then be modified by changing one of the sensory modalities. The overall perception can then be modified by changing one of the sensory modalities.
\textcite{yanagisawa2015effects} altered the perceived roughness, stiffness, and friction of real tactile materials touched by the finger by superimposing different real visual textures using a half-mirror. \textcite{yanagisawa2015effects} altered the perceived roughness, stiffness, and friction of real tactile materials touched by the finger by superimposing different real visual textures using a half-mirror.
In a similar setup, but in immersive \VST-\AR, \textcite{kitahara2010sensory} overlaid visual textures on real textured surfaces touched through a glove: many visual textures were found to match the real haptic textures. In a similar setup, but in \VST-\AR, \textcite{kitahara2010sensory} overlaid visual textures on real textured surfaces touched through a glove: many visual textures were found to match the real haptic textures.
\textcite{degraen2019enhancing} and \textcite{gunther2022smooth} also combined multiple virtual objects in \VR with \ThreeD-printed hair structures or with everyday real surfaces, respectively. \textcite{degraen2019enhancing} and \textcite{gunther2022smooth} also combined multiple virtual objects in \VR with \ThreeD-printed hair structures or with everyday real surfaces, respectively.
They found that the visual perception of roughness and hardness influenced the haptic perception, and that only a few real objects seemed to be sufficient to match all the visual virtual objects (\figref{gunther2022smooth}). They found that the visual perception of roughness and hardness influenced the haptic perception, and that only a few real objects seemed to be sufficient to match all the visual virtual objects (\figref{gunther2022smooth}).
%Taken together, these studies suggest that a set of haptic textures, real or virtual, can be perceived as coherent with a larger set of visual virtual textures.
\fig{gunther2022smooth}{In a passive touch context in \VR, only a smooth and a rough real surfaces were found to match all the visual virtual objects \cite{gunther2022smooth}.} \fig{gunther2022smooth}{In a passive touch context in \VR, only a smooth and a rough real surfaces were found to match all the visual virtual objects \cite{gunther2022smooth}.}
@@ -93,15 +80,12 @@ For example, in a fixed \VST-\AR screen (\secref{ar_displays}), by visually defo
\subfig{ban2014displaying} \subfig{ban2014displaying}
\end{subfigs} \end{subfigs}
%In all of these studies, the visual expectations of participants influenced their haptic perception.
%In particular, in \AR and \VR, the perception of a haptic rendering or augmentation can be influenced by the visual rendering of the virtual object.
\subsubsection{Perception of Visuo-Haptic Rendering in AR and VR} \subsubsection{Perception of Visuo-Haptic Rendering in AR and VR}
\label{ar_vr_haptic} \label{ar_vr_haptic}
Some studies have investigated the visuo-haptic perception of virtual objects rendered with force-feedback and vibrotactile feedback in \AR and \VR. Some studies have investigated the visuo-haptic perception of virtual objects rendered with force-feedback and vibrotactile feedback in \AR and \VR.
In an immersive \VST-\AR setup, \textcite{knorlein2009influence} rendered a virtual piston using force-feedback haptics that participants pressed directly with their hand (\figref{visuo-haptic-stiffness}). In \VST-\AR, \textcite{knorlein2009influence} rendered a virtual piston using force-feedback haptics that participants pressed directly with their hand (\figref{visuo-haptic-stiffness}).
In a \TIFC task (\secref{sensations_perception}), participants pressed two pistons and indicated which was stiffer. In a \TIFC task (\secref{sensations_perception}), participants pressed two pistons and indicated which was stiffer.
One had a reference stiffness but an additional visual or haptic delay, while the other varied with a comparison stiffness but had no delay. \footnote{Participants were not told about the delays and stiffness tested, nor which piston was the reference or comparison. The order of the pistons (which one was pressed first) was also randomized.} One had a reference stiffness but an additional visual or haptic delay, while the other varied with a comparison stiffness but had no delay. \footnote{Participants were not told about the delays and stiffness tested, nor which piston was the reference or comparison. The order of the pistons (which one was pressed first) was also randomized.}
Adding a visual delay increased the perceived stiffness of the reference piston, while adding a haptic delay decreased it, and adding both delays cancelled each other out (\figref{knorlein2009influence_2}). Adding a visual delay increased the perceived stiffness of the reference piston, while adding a haptic delay decreased it, and adding both delays cancelled each other out (\figref{knorlein2009influence_2}).
@@ -116,18 +100,13 @@ Adding a visual delay increased the perceived stiffness of the reference piston,
\subfig[.55]{knorlein2009influence_2} \subfig[.55]{knorlein2009influence_2}
\end{subfigs} \end{subfigs}
%explained how these delays affected the integration of the visual and haptic perceptual cues of stiffness.
The stiffness $\tilde{k}(t)$ of the piston is indeed estimated at time $t$ by both sight and proprioception as the ratio of the exerted force $F(t)$ and the displacement $\Delta L(t)$ of the piston, following \eqref{stiffness}, but with potential visual $\Delta t_v$ or haptic $\Delta t_h$ delays. The stiffness $\tilde{k}(t)$ of the piston is indeed estimated at time $t$ by both sight and proprioception as the ratio of the exerted force $F(t)$ and the displacement $\Delta L(t)$ of the piston, following \eqref{stiffness}, but with potential visual $\Delta t_v$ or haptic $\Delta t_h$ delays.
%\begin{equation}{stiffness_delay}
% \tilde{k}(t) = \frac{F(t + \Delta t_h)}{D(t + \Delta t_v)}
%\end{equation}
Therefore, the perceived stiffness $\tilde{k}(t)$ increases with a haptic delay in force and decreases with a visual delay in displacement \cite{diluca2011effects}. Therefore, the perceived stiffness $\tilde{k}(t)$ increases with a haptic delay in force and decreases with a visual delay in displacement \cite{diluca2011effects}.
\textcite{gaffary2017ar} compared perceived stiffness of virtual pistons in \OST-\AR and \VR. \textcite{gaffary2017ar} compared perceived stiffness of virtual pistons in \OST-\AR and \VR.
However, the force-feedback device and the participant's hand were not visible (\figref{gaffary2017ar}). However, the force-feedback device and the participant's hand were not visible (\figref{gaffary2017ar}).
The reference piston was judged to be stiffer when seen in \VR than in \AR, without participants noticing this difference, and more force was exerted on the piston overall in \VR. The reference piston was judged to be stiffer when seen in \VR than in \AR, without participants noticing this difference, and more force was exerted on the piston overall in \VR.
This suggests that the haptic stiffness of virtual objects feels \enquote{softer} in an augmented environment than in a full \VE. This suggests that the haptic stiffness of virtual objects feels \enquote{softer} in an \AE than in a full \VE.
%Two differences that could be worth investigating with the two previous studies are the type of \AR (visuo or optical) and to see the hand touching the virtual object.
Finally, \textcite{diluca2019perceptual} investigated the perceived simultaneity of visuo-haptic contact with a virtual object in \VR. Finally, \textcite{diluca2019perceptual} investigated the perceived simultaneity of visuo-haptic contact with a virtual object in \VR.
The contact was rendered both by a vibrotactile piezoelectric device on the fingertip and by a visual change in the color of the virtual object. The contact was rendered both by a vibrotactile piezoelectric device on the fingertip and by a visual change in the color of the virtual object.
@@ -135,7 +114,7 @@ The visuo-haptic simultaneity was varied by adding a visual delay or by triggeri
No participant (out of 19) was able to detect a \qty{50}{\ms} visual lag and a \qty{15}{\ms} haptic lead, and only half of them detected a \qty{100}{\ms} visual lag and a \qty{70}{\ms} haptic lead. No participant (out of 19) was able to detect a \qty{50}{\ms} visual lag and a \qty{15}{\ms} haptic lead, and only half of them detected a \qty{100}{\ms} visual lag and a \qty{70}{\ms} haptic lead.
These studies have shown how the latency of the visual rendering of a virtual object or the type of environment (\VE or \RE) can affect the perceived haptic stiffness of the object, rendered with a grounded force-feedback device. These studies have shown how the latency of the visual rendering of a virtual object or the type of environment (\VE or \RE) can affect the perceived haptic stiffness of the object, rendered with a grounded force-feedback device.
We describe in the next section how wearable haptics have been integrated with immersive \AR. We describe in the next section how wearable haptics have been integrated with \AR.
\begin{subfigs}{gaffary2017ar}{Perception of haptic stiffness in \OST-\AR \vs \VR \cite{gaffary2017ar}. }[][ \begin{subfigs}{gaffary2017ar}{Perception of haptic stiffness in \OST-\AR \vs \VR \cite{gaffary2017ar}. }[][
\item Experimental setup: a virtual piston was pressed with a force-feedback placed to the side of the participant. \item Experimental setup: a virtual piston was pressed with a force-feedback placed to the side of the participant.
@@ -150,32 +129,28 @@ We describe in the next section how wearable haptics have been integrated with i
\subsection{Wearable Haptics for Direct Hand Interaction in AR} \subsection{Wearable Haptics for Direct Hand Interaction in AR}
\label{vhar_haptics} \label{vhar_haptics}
A few wearable haptic devices have been specifically designed or experimentally tested for direct hand interaction in immersive \AR. A few wearable haptic devices have been specifically designed or experimentally tested for direct hand interaction in \AR.
Since virtual or augmented objects are naturally touched, grasped, and manipulated directly with the fingertips (\secref{exploratory_procedures} and \secref{grasp_types}), the main challenge of wearable haptics for \AR is to provide haptic sensations of these interactions while keeping the fingertips free to interact with the \RE. Since virtual or augmented objects are naturally touched, grasped, and manipulated directly with the fingertips (\secref{exploratory_procedures} and \secref{grasp_types}), the main challenge of wearable haptics for \AR is to provide haptic sensations of these interactions while keeping the fingertips free to interact with the \RE.
Several approaches have been proposed to move the haptic actuator to a different location, on the outside of the finger or the hand, \eg the nail, the top of a phalanx, or the wrist. Several approaches have been proposed to move the haptic actuator to a different location, on the outside of the finger or the hand, \eg the nail, the top of a phalanx, or the wrist.
Yet, they differ greatly in the actuators used (\secref{wearable_haptic_devices}), thus the haptic feedback (\secref{tactile_rendering}), and the placement of the haptic rendering. Yet, they differ greatly in the actuators used (\secref{wearable_haptic_devices}), thus the haptic feedback (\secref{tactile_rendering}), and the placement of the haptic rendering.
Other wearable haptic actuators have been proposed for \AR, but are not discussed here. Other wearable haptic actuators have been proposed for \AR, but are not discussed here.
A first reason is that they permanently cover the fingertip and affect the interaction with the \RE, such as thin-skin tactile interfaces \cite{withana2018tacttoo,teng2024haptic} or fluid-based interfaces \cite{han2018hydroring}. A first reason is that they permanently cover the fingertip and affect the interaction with the \RE, such as thin-skin tactile interfaces \cite{withana2018tacttoo,teng2024haptic} or fluid-based interfaces \cite{han2018hydroring}.
Another category of actuators relies on systems that cannot be considered as portable, such as REVEL \cite{bau2012revel}, which provide friction sensations with reverse electrovibration that must modify the real objects to augment, or Electrical Muscle Stimulation (EMS) devices \cite{lopes2018adding}, which provide kinesthetic feedback by contracting the muscles. Another category of actuators relies on systems that cannot be considered as portable, such as REVEL \cite{bau2012revel}, which provide friction sensations with reverse electrovibration that must modify the real objects to augment, or electrical muscle stimulation (EMS) devices \cite{lopes2018adding}, which provide kinesthetic feedback by contracting the muscles.
\subsubsection{Nail-Mounted Devices} \subsubsection{Nail-Mounted Devices}
\label{vhar_nails} \label{vhar_nails}
\textcite{ando2007fingernailmounted} were the first to move the actuator from the fingertip to propose the nail, as described in \secref{texture_rendering}. \textcite{ando2007fingernailmounted} were the first to move the actuator away from the fingertip.
They proposed to relocate it to the nail instead, as described in \secref{texture_rendering}.
This approach was later extended by \textcite{teng2021touch} with Touch\&Fold, a haptic device able to unfold its end-effector on demand to make contact with the fingertip when touching virtual objects (\figref{teng2021touch_1}). This approach was later extended by \textcite{teng2021touch} with Touch\&Fold, a haptic device able to unfold its end-effector on demand to make contact with the fingertip when touching virtual objects (\figref{teng2021touch_1}).
This moving platform also contains a \LRA (\secref{moving_platforms}) and provides contact pressure and texture sensations. This moving platform also contains a \LRA (\secref{moving_platforms}) and provides contact pressure and texture sensations.
%The whole system is very compact (\qtyproduct{24 x 24 x 41}{\mm}), lightweight (\qty{9.5}{\g}), and fully portable by including a battery and Bluetooth wireless communication. \qty{20}{\ms} for the Bluetooth
When touching virtual objects in \OST-\AR with the index finger, this device was found to be more realistic overall (5/7) than vibrations with a \LRA at \qty{170}{\Hz} on the nail (3/7). When touching virtual objects in \OST-\AR with the index finger, this device was found to be more realistic overall (5/7) than vibrations with a \LRA at \qty{170}{\Hz} on the nail (3/7).
Still, there is a high (\qty{92}{\ms}) latency for the folding mechanism and this design is not suitable for augmenting real objects. Still, there is a high (\qty{92}{\ms}) latency for the folding mechanism and this design is not suitable for augmenting real objects.
% teng2021touch: (5.27+3.03+5.23+5.5+5.47)/5 = 4.9
% ando2007fingernailmounted: (2.4+2.63+3.63+2.57+3.2)/5 = 2.9
With Fingeret, \textcite{maeda2022fingeret} adapted the belt actuators (\secref{belt_actuators}) as a \enquote{finger-side actuator} that leaves the fingertip free (\figref{maeda2022fingeret}). With Fingeret, \textcite{maeda2022fingeret} adapted the belt actuators (\secref{belt_actuators}) as a \enquote{finger-side actuator} that leaves the fingertip free (\figref{maeda2022fingeret}).
Two rollers, one on each side, can deform the skin: When rotated inward, they pull the skin, simulating a contact sensation, and when rotated outward, they push the skin, simulating a release sensation. Two rollers, one on each side, can deform the skin: When rotated inward, they pull the skin, simulating a contact sensation, and when rotated outward, they push the skin, simulating a release sensation.
They can also simulate a texture sensation by rapidly rotating in and out. They can also simulate a texture sensation by rapidly rotating in and out.
%The device is also very compact (\qty{60 x 25 x 36}{\mm}), lightweight (\qty{18}{\g}), and portable with a battery and Bluetooth wireless communication with \qty{83}{\ms} latency.
In a user study not in \AR, but directly touching images on a tablet, Fingeret was found to be more realistic (4/7) than a \LRA at \qty{100}{\Hz} on the nail (3/7) for rendering buttons and a patterned texture (\secref{texture_rendering}), but not different from vibrations for rendering high-frequency textures (3.5/7 for both). In a user study not in \AR, but directly touching images on a tablet, Fingeret was found to be more realistic (4/7) than a \LRA at \qty{100}{\Hz} on the nail (3/7) for rendering buttons and a patterned texture (\secref{texture_rendering}), but not different from vibrations for rendering high-frequency textures (3.5/7 for both).
However, as with \textcite{teng2021touch}, finger speed was not taken into account when rendering vibrations, which may have been detrimental to texture perception, as described in \secref{texture_rendering}. However, as with \textcite{teng2021touch}, finger speed was not taken into account when rendering vibrations, which may have been detrimental to texture perception, as described in \secref{texture_rendering}.
@@ -201,17 +176,14 @@ Recall that these devices have also been used to modify the perceived stiffness,
In a \VST-\AR setup, \textcite{scheggi2010shape} explored the effect of rendering the weight of a virtual cube placed on a real surface hold with the thumb, index, and middle fingers (\figref{scheggi2010shape}). In a \VST-\AR setup, \textcite{scheggi2010shape} explored the effect of rendering the weight of a virtual cube placed on a real surface hold with the thumb, index, and middle fingers (\figref{scheggi2010shape}).
The middle phalanx of each of these fingers was equipped with a haptic ring of \textcite{minamizawa2007gravity}. The middle phalanx of each of these fingers was equipped with a haptic ring of \textcite{minamizawa2007gravity}.
%However, no proper user study was conducted to evaluate this feedback.% on the manipulation of the cube.
%that simulated the weight of the cube.
%A virtual cube that could push on the cube was manipulated with the other hand through a force-feedback device.
\textcite{scheggi2010shape} reported that 12 out of 15 participants found the weight haptic feedback essential to feeling the presence of the virtual cube. \textcite{scheggi2010shape} reported that 12 out of 15 participants found the weight haptic feedback essential to feeling the presence of the virtual cube.
In a pick-and-place task in non-immersive \VST-\AR involving direct hand manipulation of both virtual and real objects (\figref{maisto2017evaluation}), \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the effects of providing haptic or visual feedback about fingertip-object contacts. In a pick-and-place task in \VST-\AR involving direct hand manipulation of both virtual and real objects (\figref{maisto2017evaluation}), \textcite{maisto2017evaluation} and \textcite{meli2018combining} compared the effects of providing haptic or visual feedback about fingertip-object contacts.
They compared the haptic ring of \textcite{pacchierotti2016hring} on the proximal phalanx, the moving platform of \textcite{chinello2020modular} on the fingertip, and a visual feedback of the tracked fingertips as virtual points. They compared the haptic ring of \textcite{pacchierotti2016hring} on the proximal phalanx, the moving platform of \textcite{chinello2020modular} on the fingertip, and a visual feedback of the tracked fingertips as virtual points.
They showed that the haptic feedback improved the completion time, reduced the force exerted on the cubes compared to the visual feedback (\figref{visual-hands}). They showed that the haptic feedback improved the completion time, reduced the force exerted on the cubes compared to the visual feedback (\figref{visual-hands}).
The haptic ring was also perceived as more effective than the moving platform. The haptic ring was also perceived as more effective than the moving platform.
However, the measured difference in performance could be due to either the device or the device position (proximal vs fingertip), or both. However, the measured difference in performance could be due to either the device or the device position (proximal vs fingertip), or both.
These two studies were also conducted in non-immersive setups, where users viewed a screen displaying the visual interactions, and only compared the haptic and visual feedback of the hand-object contacts, but did not examine them together. These two studies were also conducted in static setups, where users viewed a screen displaying the visual interactions, and only compared the haptic and visual feedback of the hand-object contacts, but did not examine them together.
\begin{subfigs}{ar_rings}{Wearable haptic ring devices for \AR. }[][ \begin{subfigs}{ar_rings}{Wearable haptic ring devices for \AR. }[][
\item Rendering weight of a virtual cube placed on a real surface \cite{scheggi2010shape}. \item Rendering weight of a virtual cube placed on a real surface \cite{scheggi2010shape}.
@@ -221,36 +193,14 @@ These two studies were also conducted in non-immersive setups, where users viewe
\subfig[.48][m]{maisto2017evaluation} \subfig[.48][m]{maisto2017evaluation}
\end{subfigs} \end{subfigs}
%\subsubsection{Wrist Bracelet Devices}
%\label{vhar_bracelets}
With their \enquote{Tactile And Squeeze Bracelet Interface} (Tasbi), already mentioned in \secref{belt_actuators}, \textcite{pezent2019tasbi} and \textcite{pezent2022design} explored the use of a wrist-worn bracelet actuator. With their \enquote{Tactile And Squeeze Bracelet Interface} (Tasbi), already mentioned in \secref{belt_actuators}, \textcite{pezent2019tasbi} and \textcite{pezent2022design} explored the use of a wrist-worn bracelet actuator.
It is capable of providing a uniform pressure sensation (up to \qty{15}{\N} and \qty{10}{\Hz}) and vibration with six \LRAs (\qtyrange{150}{200}{\Hz} bandwidth). It is capable of providing a uniform pressure sensation (up to \qty{15}{\N} and \qty{10}{\Hz}) and vibration with six \LRAs (\qtyrange{150}{200}{\Hz} bandwidth).
A user study was conducted in \VR to compare the perception of visuo-haptic stiffness rendering \cite{pezent2019tasbi}, and showed that the haptic pressure feedback was more important than the visual displacement. A user study was conducted in \VR to compare the perception of visuo-haptic stiffness rendering \cite{pezent2019tasbi}, and showed that the haptic pressure feedback was more important than the visual displacement.
%In a \TIFC task (\secref{sensations_perception}), participants pressed a virtual button with different levels of stiffness via a virtual hand constrained by the \VE (\figref{pezent2019tasbi_2}).
%A higher visual stiffness required a larger physical displacement to press the button (C/D ratio, see \secref{pseudo_haptic}), while the haptic stiffness control the rate of the pressure feedback when pressing.
%When the visual and haptic stiffness were coherent or when only the haptic stiffness changed, participants easily discriminated two buttons with different stiffness levels (\figref{pezent2019tasbi_3}).
%However, if only the visual stiffness changed, participants were not able to discriminate the different stiffness levels (\figref{pezent2019tasbi_4}).
%This suggests that in \VR, the haptic pressure is more important perceptual cue than the visual displacement to render stiffness.
%A short vibration (\qty{25}{\ms} \qty{175}{\Hz} square-wave) was also rendered when contacting the button, but kept constant across all conditions: It may have affected the overall perception when only the visual stiffness changed.
%\begin{subfigs}{pezent2019tasbi}{Visuo-haptic stiffness rendering of a virtual button in \VR with the Tasbi bracelet. }[][
% \item The \VE seen by the user: the virtual hand (in beige) is constrained by the virtual button. The displacement is proportional to the visual stiffness. The real hand (in green) is hidden by the \VE.
% \item When the rendered visuo-haptic stiffness are coherents (in purple) or only the haptic stiffness change (in blue), participants easily discrimated the different levels.
% \item When varying only the visual stiffness (in red) but keeping the haptic stiffness constant, participants were not able to discriminate the different stiffness levels.
% ]
% \subfigsheight{45mm}
% \subfig{pezent2019tasbi_2}
% \subfig{pezent2019tasbi_3}
% \subfig{pezent2019tasbi_4}
%\end{subfigs}
% \cite{sarac2022perceived,palmer2022haptic} not in \AR but studies on relocating to the wrist the haptic feedback of the fingertip-object contacts.
\subsection{Conclusion} \subsection{Conclusion}
\label{visuo_haptic_conclusion} \label{visuo_haptic_conclusion}
Providing coherent visuo-haptic feedback to enhance direct hand perception and manipulation with virtual objects in immersive \AR is challenging. Providing coherent visuo-haptic feedback to enhance direct hand perception and manipulation with virtual objects in \AR is challenging.
While many wearable haptic devices have been developed and are capable of providing varied tactile feedback, few have be integrated or experimentally evaluated for direct hand interaction in \AR. While many wearable haptic devices have been developed and are capable of providing varied tactile feedback, few have be integrated or experimentally evaluated for direct hand interaction in \AR.
Their haptic end-effector must be moved away from the inside of the hand so as not to interfere with the user interaction with the \RE. Their haptic end-effector must be moved away from the inside of the hand so as not to interfere with the user interaction with the \RE.
Different relocation strategies have been proposed for different parts of the hand, such as the nail, the index phalanges, or the wrist, but it remains unclear whether any of them are best suited for direct hand interaction in \AR. Different relocation strategies have been proposed for different parts of the hand, such as the nail, the index phalanges, or the wrist, but it remains unclear whether any of them are best suited for direct hand interaction in \AR.

View File

@@ -10,13 +10,13 @@ Only a few haptic systems can be considered wearable due to their compactness an
If their haptic rendering is timely associated with the user's touch actions on a real object, the perceived haptic properties of the object, such as its roughness and hardness, can be modified. If their haptic rendering is timely associated with the user's touch actions on a real object, the perceived haptic properties of the object, such as its roughness and hardness, can be modified.
Wearable haptic augmentation of roughness and hardness is mostly achieved with vibrotactile feedback (\secref{tactile_rendering}). Wearable haptic augmentation of roughness and hardness is mostly achieved with vibrotactile feedback (\secref{tactile_rendering}).
\AR headsets integrate virtual content into the user's perception in an immersive way, as if it were part of the \RE, with real-time pose estimation of the head and hands (\secref{what_is_ar}). \AR headsets integrate virtual content into the user's perception as if it were part of the \RE, with real-time pose estimation of the head and hands (\secref{what_is_ar}).
Direct interaction with the hand of virtual content is often implemented using virtual hand interaction technique, which reconstructs the user's hand in the \VE and simulates its interactions with the virtual. Direct interaction with the hand of virtual content is often implemented using virtual hand interaction technique, which reconstructs the user's hand in the \VE and simulates its interactions with the virtual.
However, the perception and manipulation of the virtual is difficult due to the lack of haptic feedback and the mutual occlusion of the hand with the virtual content (\secref{ar_interaction}). %, which could be addressed by a visual augmentation of the hand (\secref{ar_visual_hands}). However, the perception and manipulation of the virtual is difficult due to the lack of haptic feedback and the mutual occlusion of the hand with the virtual content (\secref{ar_interaction}). %, which could be addressed by a visual augmentation of the hand (\secref{ar_visual_hands}).
Real surrounding objects can also be used as proxies to interact with the virtual, but they may be incoherent with their visual augmentation because they are haptically passive (\secref{ar_interaction}). Real surrounding objects can also be used as proxies to interact with the virtual, but they may be incoherent with their visual augmentation because they are haptically passive (\secref{ar_interaction}).
Wearable haptics on the hand seems to be a promising solution to enable coherent and effective visuo-haptic augmentation of both virtual and real objects. Wearable haptics on the hand seems to be a promising solution to enable coherent and effective visuo-haptic augmentation of both virtual and real objects.
\noindentskip In this thesis, we will use wearable haptic feedback in immersive \AR to create visuo-haptic texture augmentation when touching real objects (\partref{perception}) and to improve manipulation of virtual objects (\partref{manipulation}), both directly with the bare hand. \noindentskip In this thesis, we will use wearable haptic feedback with an \AR headset to create visuo-haptic texture augmentation when touching real objects (\partref{perception}) and to improve manipulation of virtual objects (\partref{manipulation}), both directly with the bare hand.
First, it is challenging to provide coherent visuo-haptic feedback when augmenting real objects. First, it is challenging to provide coherent visuo-haptic feedback when augmenting real objects.
By integrating different sensory feedback, haptic and visual, real and virtual, into a single object property, perception is somewhat robust to variations in reliability and to spatial and temporal differences. By integrating different sensory feedback, haptic and visual, real and virtual, into a single object property, perception is somewhat robust to variations in reliability and to spatial and temporal differences.

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

View File

@@ -6,7 +6,6 @@
\bigskip \bigskip
This chapter reviews previous work on the perception and manipulation of virtual and augmented objects directly with the hand, using either wearable haptics, \AR, or their combination. This chapter reviews previous work on the perception and manipulation of virtual and augmented objects directly with the hand, using either wearable haptics, \AR, or their combination.
%Experiencing a visual, haptic, or visuo-haptic augmented environment relies on one to many interaction loops between a user and the environment, as shown in \figref[introduction]{interaction-loop}, and each main step must be addressed and understood: the tracking and modelling of the \RE into a \VE, the interaction techniques to act on the \VE, the rendering of the \VE to the user through visual and haptic user interfaces, and, finally, the user's perception and actions on the overall augmented environment.
First, we review how the hand senses and interacts with its environment to perceive and manipulate the haptic properties of real everyday objects. First, we review how the hand senses and interacts with its environment to perceive and manipulate the haptic properties of real everyday objects.
Second, we present how wearable haptic devices and renderings have been used to augment the haptic perception of roughness and hardness of real objects. Second, we present how wearable haptic devices and renderings have been used to augment the haptic perception of roughness and hardness of real objects.
Third, we introduce the principles and user experience of \AR and review the main interaction techniques used to manipulate virtual objects directly with the hand. Third, we introduce the principles and user experience of \AR and review the main interaction techniques used to manipulate virtual objects directly with the hand.

View File

@@ -4,12 +4,12 @@
One approach to render virtual haptic textures consists in simulating the roughness of a periodic grating surface as a vibrotactile sinusoidal (\secref[related_work]{texture_rendering}). One approach to render virtual haptic textures consists in simulating the roughness of a periodic grating surface as a vibrotactile sinusoidal (\secref[related_work]{texture_rendering}).
The vibrations are rendered to a voice-coil actuator embedded in a hand-held tool or worn on the finger, but to create the illusion of touching a pattern with a fixed spatial period, the frequency of signal must be modulated according to the finger movement. The vibrations are rendered to a voice-coil actuator embedded in a hand-held tool or worn on the finger, but to create the illusion of touching a pattern with a fixed spatial period, the frequency of signal must be modulated according to the finger movement.
Previous work either used mechanical system to track the movement at high frequency \cite{strohmeier2017generating,friesen2024perceived}, or required the user to move at a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,ujitoko2019modulating}. Previous work either used mechanical system to track the movement at high frequency \cite{strohmeier2017generating,friesen2024perceived}, or required the user to move at a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,ujitoko2019modulating}.
However, this method has not yet been integrated in an \AR context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations. However, this method has not yet been integrated in an \AR headset context, where the user should be able to freely touch and explore the visuo-haptic texture augmentations.
%which either constrained hand to a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,friesen2024perceived}, or used mechanical sensors attached to the hand \cite{friesen2024perceived,strohmeier2017generating} %which either constrained hand to a constant speed to keep the signal frequency constant \cite{asano2015vibrotactile,friesen2024perceived}, or used mechanical sensors attached to the hand \cite{friesen2024perceived,strohmeier2017generating}
In this chapter, we propose a \textbf{system for rendering visual and haptic virtual textures that augment real surfaces}. In this chapter, we propose a \textbf{system for rendering visual and haptic virtual textures that augment real surfaces}.
It is implemented with an immersive \OST-\AR headset Microsoft HoloLens~2 and a wearable vibrotactile (voice-coil) device worn on the outside of finger (not covering the fingertip, \secref[related_work]{vhar_haptics}). It is implemented with the \OST-\AR headset Microsoft HoloLens~2 and a wearable vibrotactile (voice-coil) device worn on the outside of finger (not covering the fingertip, \secref[related_work]{vhar_haptics}).
The visuo-haptic augmentations can be \textbf{viewed from any angle} and \textbf{explored freely with the bare finger}, as if they were real textures. The visuo-haptic augmentations can be \textbf{viewed from any angle} and \textbf{explored freely with the bare finger}, as if they were real textures.
To ensure both real-time and reliable renderings, the hand and the real surfaces are tracked using a webcam and marker-based pose estimation. To ensure both real-time and reliable renderings, the hand and the real surfaces are tracked using a webcam and marker-based pose estimation.
The haptic textures are rendered as a vibrotactile signal representing a patterned grating texture that is synchronized with the finger movement on the augmented surface. The haptic textures are rendered as a vibrotactile signal representing a patterned grating texture that is synchronized with the finger movement on the augmented surface.
@@ -17,10 +17,10 @@ The haptic textures are rendered as a vibrotactile signal representing a pattern
\noindentskip The contributions of this chapter are: \noindentskip The contributions of this chapter are:
\begin{itemize} \begin{itemize}
\item The rendering of virtual vibrotactile roughness textures representing a patterned grating texture in real time from free finger movements and using vision-based finger pose estimation. \item The rendering of virtual vibrotactile roughness textures representing a patterned grating texture in real time from free finger movements and using vision-based finger pose estimation.
\item A system to provide a coherent visuo-haptic texture augmentations of the \RE in a direct touch context using an immersive \AR headset and wearable haptics. \item A system to provide a coherent visuo-haptic texture augmentations of the \RE in a direct touch context using an \OST-\AR headset and wearable haptics.
\end{itemize} \end{itemize}
\noindentskip In the remainder of this chapter, we describe the principles of the system, how the real and virtual environments are registered, the generation of the vibrotactile textures, and measures of visual and haptic rendering latencies. \noindentskip In the remainder of this chapter, we describe the principles of the system, how the \RE and \VE are registered, the generation of the vibrotactile textures, and measures of visual and haptic rendering latencies.
\bigskip \bigskip

View File

@@ -1,16 +1,10 @@
%With a vibrotactile actuator attached to a hand-held device or directly on the finger, it is possible to simulate virtual haptic sensations as vibrations, such as texture, friction or contact vibrations \cite{culbertson2018haptics}.
%
%We describe a system for rendering vibrotactile roughness textures in real time, on any real surface, touched directly with the index fingertip, with no constraints on hand movement and using a simple camera to track the finger pose.
%
%We also describe how to pair this tactile rendering with an immersive \AR or \VR headset visual display to provide a coherent visuo-haptic augmentation of the \RE.
\section{Concept} \section{Concept}
\label{principle} \label{principle}
The visuo-haptic texture rendering system is based on: The visuo-haptic texture rendering system is based on:
\begin{enumerate}[label=(\arabic*)] \begin{enumerate}[label=(\arabic*)]
\item a real-time interaction loop between the finger movements and a coherent visuo-haptic feedback simulating the sensation of a touched texture, \item a real-time interaction loop between the finger movements and a coherent visuo-haptic feedback simulating the sensation of a touched texture,
\item a precise alignment of the \VE with its real counterpart, and \item a precise registration of the \VE with its real counterpart, and
\item a modulation of the signal frequency by the estimated finger speed with a phase matching. \item a modulation of the signal frequency by the estimated finger speed with a phase matching.
\end{enumerate} \end{enumerate}
@@ -50,17 +44,26 @@ The velocity (without angular velocity) of the finger marker, denoted as $\pose{
It is then filtered with another 1€ filter with the same parameters, and denoted as $\pose{c}{\hat{\dot{X}}}{f}$. It is then filtered with another 1€ filter with the same parameters, and denoted as $\pose{c}{\hat{\dot{X}}}{f}$.
Finally, this filtered finger velocity is transformed into the augmented surface frame $\poseFrame{s}$ to be used in the vibrotactile signal generation, such as $\pose{s}{\hat{\dot{X}}}{f} = \pose{s}{T}{c} \, \pose{c}{\hat{\dot{X}}}{f}$. Finally, this filtered finger velocity is transformed into the augmented surface frame $\poseFrame{s}$ to be used in the vibrotactile signal generation, such as $\pose{s}{\hat{\dot{X}}}{f} = \pose{s}{T}{c} \, \pose{c}{\hat{\dot{X}}}{f}$.
\subsection{Virtual Environment Alignment} \subsection{Virtual Environment Registration}
\label{virtual_real_alignment} \label{virtual_real_registration}
%To be able to compare virtual and augmented realities, we then create a \VE that closely replicate the real one. \comans{JG}{The registration process between the external camera, the finger, surface and HoloLens could have been described in more detail. Specifically, it could have been described clearer how the HoloLens coordinate system was aligned (e.g., by also tracking the fiducials on the surface and or finger).}{This has been better described.}
Before a user interacts with the system, it is necessary to design a \VE that will be registered with the \RE during the experiment. Before a user interacts with the system, it is necessary to design a \VE that will be registered with the \RE during the experiment.
Each real element tracked by a marker is modelled virtually, \eg the hand and the augmented surface (\figref{device}). Each real element tracked by a marker is modelled virtually, \eg the hand and the augmented surface (\figref{device}).
In addition, the pose and size of the virtual textures were defined on the virtual replicas. In addition, the pose and size of the virtual textures were defined on the virtual replicas.
During the experiment, the system uses marker pose estimates to align the virtual models with their real world counterparts. %, according to the condition being tested. Prior to any usage, it is necessary to register the \VE with the \RE.
This allows to detect if a finger touches a virtual texture using a collision detection algorithm (Nvidia PhysX), and to show the virtual elements and textures in real-time, aligned with the \RE, using the considered \AR or \VR headset. First, the coordinate system of the headset is manually aligned with that of the external camera by the experimenter using a raycast from the headset to the origin point of the camera (the center of the black rectangle in the box in \figref{apparatus}).
This resulted in a \qty{\pm .5}{\cm} spatial alignment error between the \RE and the \VE.
While this was sufficient for our use cases, other methods can achieve better accuracy if needed \cite{grubert2018survey}.
The registration of the coordinate systems of the camera and the headset thus allows the use of the marker estimation poses performed with the camera to display in the headset the virtual models aligned with their real-world counterparts.
In our implementation, the \VE is designed with Unity and the Mixed Reality Toolkit (MRTK)\footnoteurl{https://learn.microsoft.com/windows/mixed-reality/mrtk-unity}. \comans{JG}{A description if and how the offset between the lower side of the fingertip touching the surface and the fiducial mounted on the top of the finger was calibrated / compensated is missing}{This has been better described.}
An additional calibration is performed to compensate for the offset between the finger contact point and the estimated marker pose \cite{son2022effect}.
The current user then places the index finger on the origin point, whose respective poses are known from the attached fiducial markers.
The transformation between the marker pose of the finger and the finger contact point can be estimated and compensated with an inverse transformation.
This allows to detect if the calibrated real finger touches a virtual texture using a collision detection algorithm (Nvidia PhysX).
In our implementation, the \VE is designed with Unity (v2021.1) and the Mixed Reality Toolkit (v2.7)\footnoteurl{https://learn.microsoft.com/windows/mixed-reality/mrtk-unity}.
The visual rendering is achieved using the Microsoft HoloLens~2, an \OST-\AR headset with a \qtyproduct{43 x 29}{\degree} \FoV, a \qty{60}{\Hz} refresh rate, and self-localisation capabilities. The visual rendering is achieved using the Microsoft HoloLens~2, an \OST-\AR headset with a \qtyproduct{43 x 29}{\degree} \FoV, a \qty{60}{\Hz} refresh rate, and self-localisation capabilities.
A \VST-\AR or a \VR headset could have been used as well. A \VST-\AR or a \VR headset could have been used as well.
@@ -69,19 +72,15 @@ A \VST-\AR or a \VR headset could have been used as well.
A voice-coil actuator (HapCoil-One, Actronika) is used to display the vibrotactile signal, as it allows the frequency and amplitude of the signal to be controlled independently over time, covers a wide frequency range (\qtyrange{10}{1000}{\Hz}), and outputs the signal accurately with relatively low acceleration distortion\footnote{HapCoil-One specific characteristics are described in its data sheet: \url{https://tactilelabs.com/wp-content/uploads/2023/11/HapCoil_One_datasheet.pdf}}. A voice-coil actuator (HapCoil-One, Actronika) is used to display the vibrotactile signal, as it allows the frequency and amplitude of the signal to be controlled independently over time, covers a wide frequency range (\qtyrange{10}{1000}{\Hz}), and outputs the signal accurately with relatively low acceleration distortion\footnote{HapCoil-One specific characteristics are described in its data sheet: \url{https://tactilelabs.com/wp-content/uploads/2023/11/HapCoil_One_datasheet.pdf}}.
The voice-coil actuator is encased in a \ThreeD printed plastic shell and firmly attached to the middle phalanx of the user's index finger with a Velcro strap, to enable the fingertip to directly touch the environment (\figref{device}). The voice-coil actuator is encased in a \ThreeD printed plastic shell and firmly attached to the middle phalanx of the user's index finger with a Velcro strap, to enable the fingertip to directly touch the environment (\figref{device}).
The actuator is driven by a class D audio amplifier (XY-502 / TPA3116D2, Texas Instrument). %, which has proven to be an effective type of amplifier for driving moving-coil \cite{mcmahan2014dynamic}. The actuator is driven by a class D audio amplifier (XY-502 / TPA3116D2, Texas Instrument).
The amplifier is connected to the audio output of a computer that generates the signal using the WASAPI driver in exclusive mode and the NAudio library\footnoteurl{https://github.com/naudio/NAudio}. The amplifier is connected to the audio output of a computer that generates the signal using the WASAPI driver in exclusive mode and the NAudio library\footnoteurl{https://github.com/naudio/NAudio}.
The represented haptic texture is a 1D series of parallels virtual grooves and ridges, similar to the real linear grating textures manufactured for psychophysical roughness perception studies \secref[related_work]{roughness}. %\cite{friesen2024perceived,klatzky2003feeling,unger2011roughness}. The represented haptic texture is a 1D series of parallels virtual grooves and ridges, similar to the real linear grating textures manufactured for psychophysical roughness perception studies \secref[related_work]{roughness}.
It is generated as a square wave audio signal $r$, sampled at \qty{48}{\kilo\hertz}, with a texture period $\lambda$ and an amplitude $A$, similar to \eqref[related_work]{grating_rendering}. It is generated as a square wave audio signal $r$, sampled at \qty{48}{\kilo\hertz}, with a texture period $\lambda$ and an amplitude $A$, similar to \eqref[related_work]{grating_rendering}.
Its frequency is a ratio of the absolute finger filtered (scalar) velocity $\dot{x} = \poseX{s}{|\hat{\dot{X}}|}{f}$, and the texture period $\lambda$ \cite{friesen2024perceived}. Its frequency is a ratio of the absolute finger filtered (scalar) velocity $\dot{x} = \poseX{s}{|\hat{\dot{X}}|}{f}$, and the texture period $\lambda$ \cite{friesen2024perceived}.
As the finger is moving horizontally on the texture, only the $X$ component of the velocity is used. As the finger is moving horizontally on the texture, only the $X$ component of the velocity is used.
This velocity modulation strategy is necessary as the finger position is estimated at a far lower rate (\qty{60}{\hertz}) than the audio signal (unlike high-fidelity force-feedback devices \cite{unger2011roughness}). This velocity modulation strategy is necessary as the finger position is estimated at a far lower rate (\qty{60}{\hertz}) than the audio signal (unlike high-fidelity force-feedback devices \cite{unger2011roughness}).
%As the finger position is estimated at a far lower rate (\qty{60}{\hertz}), the filtered finger (scalar) position ${}^t\hat{X}_f$ in the texture frame $\poseFrame{t}$ cannot be directly used. % to render the signal if the finger moves fast or if the texture period is small.
%
%The best strategy instead is to modulate the frequency of the signal as a ratio of the filtered finger velocity ${}^t\hat{\dot{\mathbf{X}}}_f$ and the texture period $\lambda$ \cite{friesen2024perceived}.
%
When a new finger velocity $\dot{x}\,(t_j)$ is estimated at time $t_j$, the phase $\phi\,(t_j)$ of the signal $r$ needs also to be adjusted to ensure a continuity in the signal. When a new finger velocity $\dot{x}\,(t_j)$ is estimated at time $t_j$, the phase $\phi\,(t_j)$ of the signal $r$ needs also to be adjusted to ensure a continuity in the signal.
In other words, the sampling of the audio signal runs at \qty{48}{\kilo\hertz}, and its frequency and phase is updated at a far lower rate of \qty{60}{\hertz} when a new finger velocity is estimated. In other words, the sampling of the audio signal runs at \qty{48}{\kilo\hertz}, and its frequency and phase is updated at a far lower rate of \qty{60}{\hertz} when a new finger velocity is estimated.
A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >= t_j$, is thus given by: A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >= t_j$, is thus given by:
@@ -94,11 +93,6 @@ A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >=
\end{subequations} \end{subequations}
This rendering preserves the sensation of a constant spatial frequency of the virtual texture while the finger moves at various speeds, which is crucial for the perception of roughness \cite{klatzky2003feeling,unger2011roughness}. This rendering preserves the sensation of a constant spatial frequency of the virtual texture while the finger moves at various speeds, which is crucial for the perception of roughness \cite{klatzky2003feeling,unger2011roughness}.
%
%Note that the finger position and velocity are transformed from the camera frame $\poseFrame{c}$ to the texture frame $\poseFrame{t}$, with the $x$ axis aligned with the texture direction.
%
%However, when a new finger position is estimated at time $t_j$, the phase $\phi(t_j)$ needs to be adjusted as well with the frequency to ensure a continuity in the signal as described in \eqref{signal_phase}.
%
The phase matching avoids sudden changes in the actuator movement thus affecting the texture perception in an uncontrolled way (\figref{phase_adjustment}) and, contrary to previous work \cite{asano2015vibrotactile,ujitoko2019modulating}, it enables a free exploration of the texture by the user with no constraints on the finger speed. The phase matching avoids sudden changes in the actuator movement thus affecting the texture perception in an uncontrolled way (\figref{phase_adjustment}) and, contrary to previous work \cite{asano2015vibrotactile,ujitoko2019modulating}, it enables a free exploration of the texture by the user with no constraints on the finger speed.
A square wave is chosen to get a rendering closer to a real grating texture with the sensation of crossing edges \cite{ujitoko2019modulating}, and because the roughness perception of sine wave textures has been shown not to reproduce the roughness perception of real grating textures \cite{unger2011roughness}. A square wave is chosen to get a rendering closer to a real grating texture with the sensation of crossing edges \cite{ujitoko2019modulating}, and because the roughness perception of sine wave textures has been shown not to reproduce the roughness perception of real grating textures \cite{unger2011roughness}.
A square wave also makes it possible to render low signal frequencies that occur when the finger moves slowly or the texture period is large, as the actuator cannot render a pure sine wave signal below \qty{\approx 20}{\Hz} with sufficient amplitude to be perceived. A square wave also makes it possible to render low signal frequencies that occur when the finger moves slowly or the texture period is large, as the actuator cannot render a pure sine wave signal below \qty{\approx 20}{\Hz} with sufficient amplitude to be perceived.
@@ -130,5 +124,4 @@ The total visual latency can be considered slightly high, yet it is typical for
The two filters also introduce a constant lag between the finger movement and the estimated position and velocity, measured at \qty{160 \pm 30}{\ms}. The two filters also introduce a constant lag between the finger movement and the estimated position and velocity, measured at \qty{160 \pm 30}{\ms}.
With respect to the real hand position, it causes a distance error in the displayed virtual hand position, and thus a delay in the triggering of the vibrotactile signal. With respect to the real hand position, it causes a distance error in the displayed virtual hand position, and thus a delay in the triggering of the vibrotactile signal.
This is proportional to the speed of the finger, \eg distance error is \qty{12 \pm 2.3}{\mm} when the finger moves at \qty{75}{\mm\per\second}. This is proportional to the speed of the finger, \eg distance error is \qty{1.2 \pm .2}{\cm} when the finger moves at \qty{7.5}{\cm\per\second}.
%and of the vibrotactile signal frequency with respect to the finger speed.%, that is proportional to the speed of the finger.

View File

@@ -5,7 +5,7 @@
In this chapter, we designed and implemented a system for rendering virtual visuo-haptic textures that augment a real surface. In this chapter, we designed and implemented a system for rendering virtual visuo-haptic textures that augment a real surface.
Directly touched with the fingertip, the perceived roughness of the surface can be increased using a wearable vibrotactile voice-coil device mounted on the middle phalanx of the finger. Directly touched with the fingertip, the perceived roughness of the surface can be increased using a wearable vibrotactile voice-coil device mounted on the middle phalanx of the finger.
We adapted the 1D sinusoidal grating rendering method, common in the literature but not yet integrated in a direct touch context, for use with vision-based pose estimation of the finger and paired it with an immersive \AR headset. We adapted the 1D sinusoidal grating rendering method, common in the literature but not yet integrated in a direct touch context, for use with vision-based pose estimation of the finger and paired it with an \OST-\AR headset.
Our wearable visuo-haptic augmentation system enable any real surface to be augmented with a minimal setup. Our wearable visuo-haptic augmentation system enable any real surface to be augmented with a minimal setup.
It also allows a free exploration of the textures, as if they were real (\secref[related_work]{ar_presence}), by letting the user view them from different poses and touch them with the bare finger without constraints on hand movements. It also allows a free exploration of the textures, as if they were real (\secref[related_work]{ar_presence}), by letting the user view them from different poses and touch them with the bare finger without constraints on hand movements.
@@ -18,4 +18,3 @@ This system forms the basis of the apparatus for the user studies presented in t
%Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal. %Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
%\enquote{How Different Is the Perception of Vibrotactile Texture Roughness in Augmented versus Virtual Reality?}. %\enquote{How Different Is the Perception of Vibrotactile Texture Roughness in Augmented versus Virtual Reality?}.
%In: \textit{ACM Symposium on Virtual Reality Software and Technology}. Trier, Germany, October 2024. pp. 287--296. %In: \textit{ACM Symposium on Virtual Reality Software and Technology}. Trier, Germany, October 2024. pp. 287--296.

View File

@@ -2,29 +2,29 @@
\label{intro} \label{intro}
In the previous chapter, we investigated the role of the visual feedback of the virtual hand and the environment (\AR \vs \VR) on the perception of wearable haptic texture augmentation. In the previous chapter, we investigated the role of the visual feedback of the virtual hand and the environment (\AR \vs \VR) on the perception of wearable haptic texture augmentation.
In this chapter, we explore the perception of wearable visuo-haptic texture augmentation of real surfaces touched directly with the finger in an immersive \AR context and without a virtual hand overlay. In this chapter, we explore the perception of wearable visuo-haptic texture augmentation of real surfaces touched directly with the finger.
When we look at the surface of an everyday object, we then touch it to confirm or contrast our initial visual impression and to estimate the properties of the object, particularly its texture (\secref[related_work]{visual_haptic_influence}). When we look at the surface of an everyday object, we then touch it to confirm or contrast our initial visual impression and to estimate the properties of the object, particularly its texture (\secref[related_work]{visual_haptic_influence}).
Among the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus (\secref[related_work]{texture_rendering}). Among the various haptic texture augmentations, data-driven methods allow to capture, model and reproduce the roughness perception of real surfaces when touched by a hand-held stylus (\secref[related_work]{texture_rendering}).
Databases of visuo-haptic textures have been developed in this way \cite{culbertson2014one,balasubramanian2024sens3}, but they have not yet been explored in an immersive and direct touch context with \AR and wearable haptics. Databases of visuo-haptic textures have been developed in this way \cite{culbertson2014one,balasubramanian2024sens3}, but they have not yet been explored in a direct touch context with \AR and wearable haptics.
In this chapter, we investigate whether simultaneous and \textbf{co-localized visual and wearable haptic texture augmentation of real surfaces} in \AR can be perceived in a coherent and realistic manner, and to what extent each sensory modality would contribute to the overall perception of the augmented texture. In this chapter, we consider simultaneous and \textbf{co-localized visual and wearable haptic texture augmentation of real surfaces} with an \OST-\AR headset and wearable vibrotactile feedback.
We investigate how these textures can be perceived in a coherent and realistic manner, and to what extent each sensory modality would contribute to the overall perception of the augmented texture.
We used nine pairs of \textbf{data-driven visuo-haptic textures} from the \HaTT database \cite{culbertson2014one}, which we rendered using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}. %, an \OST-\AR headset, and a wearable voice-coil device worn on the finger. We used nine pairs of \textbf{data-driven visuo-haptic textures} from the \HaTT database \cite{culbertson2014one}, which we rendered using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}. %, an \OST-\AR headset, and a wearable voice-coil device worn on the finger.
In a \textbf{user study}, 20 participants freely explored in direct touch the combination of the visuo-haptic texture pairs to rate their coherence, realism and perceived roughness. In a \textbf{user study}, 20 participants freely explored in direct touch the combination of the visuo-haptic texture pairs to rate their coherence, realism and perceived roughness.
We aimed to assess \textbf{which haptic textures were matched with which visual textures}, how the roughness of the visual and haptic textures was perceived, and whether \textbf{the perceived roughness} could explain the matches made between them. We aimed to assess \textbf{which haptic textures were matched with which visual textures}, how the roughness of the visual and haptic textures was perceived, and whether \textbf{the perceived roughness} could explain the matches made between them.
\noindentskip The contributions of this chapter are: \noindentskip The contributions of this chapter are:
\begin{itemize} \begin{itemize}
\item Transposition of data-driven visuo-haptic textures to augment real objects in a direct touch context in immersive \AR. \item Transposition of data-driven visuo-haptic textures to augment real objects in a direct touch context in \AR.
\item A user study evaluating with 20 participants the coherence, realism and perceived roughness of nine pairs of these visuo-haptic texture augmentations. \item A user study evaluating with 20 participants the coherence, realism and perceived roughness of nine pairs of these visuo-haptic texture augmentations.
\end{itemize} \end{itemize}
\smallskip \smallskip
\fig[0.55]{experiment/view}{First person view of the user study.}[ \fig[0.55]{experiment/view}{First person view of the user study.}[
%As seen through the immersive \AR headset.
The visual texture overlays were statically displayed on the surfaces, allowing the user to move around to view them from different angles. The visual texture overlays were statically displayed on the surfaces, allowing the user to move around to view them from different angles.
The haptic texture augmentations were generated based on \HaTT data-driven texture models and finger speed, and were rendered on the middle index phalanx.% as it slides on the considered surface. The haptic texture augmentations were generated based on \HaTT data-driven texture models and finger speed, and were rendered on the middle index phalanx.
] ]
\noindentskip In the next sections, we first describe the apparatus of the user study experimental design, including the two tasks performed. We then present the results obtained and discuss them before concluding. \noindentskip In the next sections, we first describe the apparatus of the user study experimental design, including the two tasks performed. We then present the results obtained and discuss them before concluding.

View File

@@ -27,8 +27,8 @@ The user study was held in a quiet room with no windows, with one light source o
Nine \qty{5}{\cm} square cardboards with smooth, white melamine surface, arranged in a \numproduct{3 x 3} grid, were used as real surfaces to augment. Nine \qty{5}{\cm} square cardboards with smooth, white melamine surface, arranged in a \numproduct{3 x 3} grid, were used as real surfaces to augment.
Their poses were estimated with three \qty{2}{\cm} AprilTag fiducial markers glued on the surfaces grid. Their poses were estimated with three \qty{2}{\cm} AprilTag fiducial markers glued on the surfaces grid.
Similarly, a \qty{2}{\cm} fiducial marker was glued on top of the vibrotactile actuator to detect the finger pose. Similarly, a \qty{2}{\cm} fiducial marker was glued on top of the vibrotactile actuator to detect the finger pose.
Positioned \qty{20}{\cm} above the surfaces, a webcam (StreamCam, Logitech) filmed the markers to track finger movements relative to the surfaces, as described in \secref[vhar_system]{virtual_real_alignment}. Positioned \qty{20}{\cm} above the surfaces, a webcam (StreamCam, Logitech) filmed the markers to track finger movements relative to the surfaces, as described in \secref[vhar_system]{virtual_real_registration}.
The visual textures were displayed on the real surfaces using the \OST-\AR headset Microsoft HoloLens~2 running a custom application at \qty{60}{FPS} made with Unity 2021.1 and Mixed Reality Toolkit (MRTK) 2.7.2. The visual textures were displayed on the real surfaces using the \OST-\AR headset Microsoft HoloLens~2 running a custom application at \qty{60}{FPS} made with Unity (v2021.1) and Mixed Reality Toolkit (v2.7).
A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the visual textures, that were used throughout the user study. A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the visual textures, that were used throughout the user study.
When a virtual haptic texture was touched, a \qty{48}{kHz} audio signal was generated using the rendering procedure described in \cite{culbertson2014modeling} from the corresponding \HaTT haptic texture model and the measured tangential speed of the finger (\secref[vhar_system]{texture_generation}). When a virtual haptic texture was touched, a \qty{48}{kHz} audio signal was generated using the rendering procedure described in \cite{culbertson2014modeling} from the corresponding \HaTT haptic texture model and the measured tangential speed of the finger (\secref[vhar_system]{texture_generation}).
@@ -58,7 +58,7 @@ Participants were first given written instructions about the experimental setup,
Then, after having signed an informed consent form, they were asked to seat in front of the table with the experimental setup and to wear the \AR headset. Then, after having signed an informed consent form, they were asked to seat in front of the table with the experimental setup and to wear the \AR headset.
%The experimenter firmly attached the plastic shell encasing the vibrotactile actuator to the middle index phalanx of their dominant hand. %The experimenter firmly attached the plastic shell encasing the vibrotactile actuator to the middle index phalanx of their dominant hand.
As the haptic textures generated no audible noise, participants did not wear any noise reduction headphones. As the haptic textures generated no audible noise, participants did not wear any noise reduction headphones.
A calibration of both the HoloLens~2 and the finger pose estimation was performed to ensure the correct alignment of the visual and haptic textures on the real surfaces. A calibration of both the HoloLens~2 and the finger pose estimation was performed to ensure the correct registration of the visuo-haptic textures and the real finger with the real surfaces, as described in \secref[vhar_system]{virtual_real_registration}.
Finally, participants familiarized with the augmented surface in a \qty{2}{min} training session with textures different from the ones used in the user study. Finally, participants familiarized with the augmented surface in a \qty{2}{min} training session with textures different from the ones used in the user study.
Participants started with the \level{Matching} task. Participants started with the \level{Matching} task.
@@ -110,7 +110,7 @@ For each trial of the \level{Matching} task, the chosen \response{Haptic Texture
The \response{Completion Time} was also measured as the time between the visual texture display and the haptic texture selection. The \response{Completion Time} was also measured as the time between the visual texture display and the haptic texture selection.
For each modality of the \level{Ranking} task, the \response{Rank} of each of the visual, haptic, or visuo-haptic pairs of the textures presented was recorded. For each modality of the \level{Ranking} task, the \response{Rank} of each of the visual, haptic, or visuo-haptic pairs of the textures presented was recorded.
\noindentskip After each of the two tasks, participants answered to the following 7-item Likert scale questions (1=Not at all, 7=Extremely): After each of the two tasks, participants answered to the following 7-item Likert scale questions (1=Not at all, 7=Extremely):
\begin{itemize} \begin{itemize}
\item \response{Haptic Difficulty}: How difficult was it to differentiate the tactile textures? \item \response{Haptic Difficulty}: How difficult was it to differentiate the tactile textures?
\item \response{Visual Difficulty}: How difficult was it to differentiate the visual textures? \item \response{Visual Difficulty}: How difficult was it to differentiate the visual textures?
@@ -120,4 +120,7 @@ For each modality of the \level{Ranking} task, the \response{Rank} of each of th
\item \response{Uncomfort}: How uncomfortable was to use the haptic device? \item \response{Uncomfort}: How uncomfortable was to use the haptic device?
\end{itemize} \end{itemize}
\noindentskip In an open question, participants also commented on their strategy for completing the \level{Matching} task (\enquote{How did you associate the tactile textures with the visual textures?}) and the \level{Ranking} task (\enquote{How did you rank the textures?}). In an open question, participants also commented on their strategy for completing the \level{Matching} task (\enquote{How did you associate the tactile textures with the visual textures?}) and the \level{Ranking} task (\enquote{How did you rank the textures?}).
\comans{JG}{I suggest to also report on [...] the software packages used for statistical analysis (this holds also for the subsequent chapters).}{This has been added to all chapters where necessary.}
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), \textit{corrr} (v0.4), \textit{FactoMineR} (v2.11), \textit{lme4} (v1.1), and \textit{performance} (v0.13).

View File

@@ -7,27 +7,28 @@
\paragraph{Confusion Matrix} \paragraph{Confusion Matrix}
\label{results_matching_confusion_matrix} \label{results_matching_confusion_matrix}
\figref{results/matching_confusion_matrix} shows the confusion matrix of the \level{Matching} task with the visual textures and the proportion of haptic texture selected in response, \ie the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture. \comans{JG}{For the two-sample Chi-Squared tests in the matching task, the number of samples reported is 540 due to 20 participants conducting 3 trials for 9 textures each. However, this would only hold true if the repetitions per participant would be independent and not correlated (and then, one could theoretically also run 10 participants with 6 trials each, or 5 participants with 12 trials each). If they are not independent, this would lead to an artificial inflated sample size and Type I error. If the trials are not independent (please double check), I suggest either aggregating data on the participant level or to use alternative models that account for the within-subject correlation (as was done in other chapters).}{Data of the three confusion matrices have been aggregated on the participant level and analyzed using a Poisson regression.}
A two-sample Pearson Chi-Squared test (\chisqr{64}{540}{420}, \pinf{0.001}) and Holm-Bonferroni adjusted binomial tests indicated that the following (\factor{Visual Texture}, \response{Haptic Texture}) pairs have proportion selections statistically significantly higher than chance (\ie \percent{11} each): \figref{results/matching_confusion_matrix} shows the confusion matrix of the \level{Matching} task with the visual textures and the proportion of haptic texture selected in response, \ie the proportion of times the corresponding \response{Haptic Texture} was selected in response to the presentation of the corresponding \factor{Visual Texture}.
To determine which haptic textures were selected most often, the repetitions of the trials were first aggregated by counting the number of selections per participant for each (\factor{Visual Texture}, \response{Haptic Texture}) pair.
An \ANOVA based on a Poisson regression (no overdispersion was detected) indicated a statistically significant effect on the number of selections of the interaction \factor{Visual Texture} \x \response{Haptic Texture} (\chisqr{64}{180}{414}, \pinf{0.001}).
Post-hoc pairwise comparisons using the Tukey's \HSD test then indicated there was statistically significant differences for the following visual textures:
\begin{itemize} \begin{itemize}
\item (\level{Sandpaper~320}, \level{Coffee Filter}), (\level{Terra Cotta}, \level{Coffee Filter}), and (\level{Coffee Filter}, \level{Coffee Filter}) (\pinf{0.001} each); \item With \level{Sandpaper~320}, \level{Coffee Filter} was more selected than the other haptic textures (\ztest{3.4}, \pinf{0.05} each) except \level{Plastic Mesh~1} and \level{Terra Cotta}.
\item (\level{Cork}, \level{Sandpaper~320}), (\level{Brick~2}, \level{Plastic Mesh~1}), (\level{Brick~2}, \level{Sandpaper~320}), (\level{Plastic Mesh~1}, \level{Sandpaper~320}), and (\level{Sandpaper~320}, \level{Plastic Mesh~1}) (\pinf{0.01}); and \item With \level{Terra Cotta}, \level{Coffee Filter} was more selected than the others (\ztest{3.4}, \pinf{0.05} each) except \level{Plastic Mesh~1} and \level{Terra Cotta}.
\item (\level{Metal Mesh}, \level{Cork}), (\level{Cork}, \level{Velcro Hooks}), (\level{Velcro Hooks}, \level{Plastic Mesh~1}), (\level{Velcro Hooks}, \level{Sandpaper~320}), and (\level{Coffee Filter}, \level{Terra Cotta}) (\pinf{0.05} each). \item With \level{Coffee Filter}, \level{Coffee Filter} was more selected than the others (\ztest{4.0}, \pinf{0.01} each) except \level{Terra Cotta}.
\end{itemize} \end{itemize}
Except for one visual texture (\level{Sandpaper~100}) and 4 haptic textures (\level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, and \level{Terra Cotta}), all haptic and visual textures were matched statistically significantly higher than chance with at least one visual and haptic texture, respectively. \fig[0.85]{results/matching_confusion_matrix}{Confusion matrix of the \level{Matching} task results.}[
However, many mistakes were made: the expected haptic texture was selected on average only \percent{20} of the time for five of the visual textures, and even around \percent{5} for (visual) \level{Sandpaper~100}, \level{Brick~2}, and \level{Sandpaper~320}.
Only haptic \level{Coffee Filter} was correctly selected \percent{59} of the time, and was also particularly matched with the visual \level{Sandpaper~320} and \level{Terra Cotta} (around \percent{45} each).
Similarly, the haptic textures \level{Sandpaper~320} and \level{Plastic Mesh~1} were also selected for four and three visual textures, respectively (around \percent{25} each).
Additionally, the Spearman correlations between the trials were computed for each participant and only 21 out of 60 were statistically significant (\pinf{0.05}), with a mean \spearman{0.52} (\ci{0.43}{0.59}).
\fig[0.82]{results/matching_confusion_matrix}{Confusion matrix of the \level{Matching} task.}[
With the presented visual textures as columns and the selected haptic texture in proportion as rows. With the presented visual textures as columns and the selected haptic texture in proportion as rows.
The number in a cell is the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture. The number in a cell is the proportion of times the corresponding haptic texture was selected in response to the presentation of the corresponding visual texture.
The diagonal represents the expected correct answers. The diagonal represents the expected correct answers.
Holm-Bonferroni adjusted binomial test results are marked in bold when the proportion is higher than chance (\ie more than \percent{11}, \pinf{0.05}).
] ]
Many mistakes were made: the expected haptic texture was selected on average only \percent{20} of the time for five of the visual textures, and even around \percent{5} for (visual) \level{Sandpaper~100}, \level{Brick~2}, and \level{Sandpaper~320}.
Only haptic \level{Coffee Filter} was correctly selected \percent{57} of the time, and was also particularly matched with the visual \level{Sandpaper~320} and \level{Terra Cotta} (around \percent{44} each).
Similarly, the haptic textures \level{Sandpaper~320} and \level{Plastic Mesh~1} were also selected for four and three visual textures, respectively (around \percent{25} each).
Additionally, the Spearman correlations between the trials were computed for each participant and only 21 out of 60 were statistically significant (\pinf{0.05}), with a mean \spearman{0.52} \ci{0.43}{0.59}.
These results indicate that the participants hesitated between several haptic textures for a given visual texture, as also reported in several comments, some haptic textures being more favored while some others were almost not selected at all. These results indicate that the participants hesitated between several haptic textures for a given visual texture, as also reported in several comments, some haptic textures being more favored while some others were almost not selected at all.
Another explanation could be that the participants had difficulties to estimate the roughness of the visual textures. Another explanation could be that the participants had difficulties to estimate the roughness of the visual textures.
Indeed, many participants explained that they tried to identify or imagine the roughness of a given visual texture then to select the most plausible haptic texture, in terms of frequency and/or amplitude of vibrations. Indeed, many participants explained that they tried to identify or imagine the roughness of a given visual texture then to select the most plausible haptic texture, in terms of frequency and/or amplitude of vibrations.
@@ -36,35 +37,49 @@ Indeed, many participants explained that they tried to identify or imagine the r
To verify that the difficulty with all the visual textures was the same on the \level{Matching} task, the \response{Completion Time} of a trial was analyzed. To verify that the difficulty with all the visual textures was the same on the \level{Matching} task, the \response{Completion Time} of a trial was analyzed.
As the \response{Completion Time} results were Gamma distributed, they were transformed with a log to approximate a normal distribution. As the \response{Completion Time} results were Gamma distributed, they were transformed with a log to approximate a normal distribution.
A \LMM on the log \response{Completion Time} with the \factor{Visual Texture} as fixed effect and the participant as random intercept was performed. An \ANOVA based on a \LMM on the log \response{Completion Time} with the \factor{Visual Texture} as fixed effect and the participant as random intercept was performed.
Normality was verified with a QQ-plot of the model residuals. Normality was verified with a QQ-plot of the model residuals.
No statistical significant effect of \factor{Visual Texture} was found (\anova{8}{512}{1.9}, \p{0.06}) on \response{Completion Time} (\geomean{44}{\s} \ci{42}{46}), indicating an equal difficulty and participant behaviour for all the visual textures. No statistical significant effect of \factor{Visual Texture} was found (\anova{8}{512}{1.9}, \p{0.06}) on \response{Completion Time} (\geomean{44}{\s} \ci{42}{46}), indicating an equal difficulty and participant behaviour for all the visual textures.
\subsection{Textures Ranking} \subsection{Textures Ranking}
\label{results_ranking} \label{results_ranking}
\figref{results/ranking_mean_ci} presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs. \figref{results/rankings_modality} presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs.
For each ranking, a Friedman test was performed with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment. For each ranking, a Friedman test was performed with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment.
\fig[1]{results/rankings_modality}{Means with bootstrap \percent{95} \CI of the \level{Ranking} task results for each \factor{Modality}.}[
Shown for the haptic textures alone (left), the visual textures alone (center) and the visuo-haptic textures pairs (right).
The order of the visual textures on the x-axis differs between modalities.
A lower rank means that the texture was considered rougher, a higher rank means smoother.
Wilcoxon signed-rank tests and Holm-Bonferroni adjustment: all comparisons were statistically significantly different (\pinf{0.05}) except when marked with an \enquote{X}.
]
\paragraph{Haptic Textures Ranking} \paragraph{Haptic Textures Ranking}
Almost all the texture pairs in the haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{146}, \pinf{0.001}; \pinf{0.05} for each comparison), except between (\level{Metal Mesh}, \level{Sandpaper~100}), (\level{Cork}, \level{Brick~2}), (\level{Cork}, \level{Sandpaper~320}) (\level{Plastic Mesh~1}, \level{Velcro Hooks}), and (\level{Plastic Mesh~1}, \level{Terra Cotta}). Almost all the texture pairs in the haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{146}, \pinf{0.001}; \pinf{0.05} for each comparison; see \figref{results/rankings_modality}, left).
However, no difference was found between the pairs (\level{Metal Mesh}, \level{Sandpaper~100}), (\level{Cork}, \level{Brick~2}), (\level{Cork}, \level{Sandpaper~320}), (\level{Plastic Mesh~1}, \level{Velcro Hooks}), and (\level{Plastic Mesh~1}, \level{Terra Cotta}).
Average Kendall's Tau correlations between the participants indicated a high consensus (\kendall{0.82} \ci{0.81}{0.84}) showing that participants perceived similarly the roughness of the haptic textures. Average Kendall's Tau correlations between the participants indicated a high consensus (\kendall{0.82} \ci{0.81}{0.84}) showing that participants perceived similarly the roughness of the haptic textures.
\paragraph{Visual Textures Ranking} \paragraph{Visual Textures Ranking}
Most of the texture pairs in the visual textures ranking results were also statistically significantly different (\chisqr{8}{20}{119}, \pinf{0.001}; \pinf{0.05} for each comparison), except for the following groups: \{\level{Metal Mesh}, \level{Cork}, \level{Plastic Mesh~1}\}; \{\level{Sandpaper~100}, \level{Brick~2}, \level{Plastic Mesh~1}, \level{Velcro Hooks}\}; \{\level{Cork}, \level{Velcro Hooks}\}; \{\level{Sandpaper~320}, \level{Terra Cotta}\}; and \{\level{Sandpaper~320}, \level{Coffee Filter}\}. Most of the texture pairs in the visual textures ranking results were also statistically significantly different (\chisqr{8}{20}{119}, \pinf{0.001}; \pinf{0.05} for each comparison; see \figref{results/rankings_modality}, center), except for the following.
No difference was found between \level{Plastic Mesh~1} and \level{Metal Mesh}, \level{Brick 2}, \level{Sandpaper 100}, \level{Cork}, \level{Velcro Hooks};
nor between \level{Velcro Hooks} and \level{Sandpaper 100}, \level{Cork}, \level{Brick 2}.
No difference was also found between the pairs (\level{Metal Mesh}, \level{Cork}), (\level{Sandpaper~100}, \level{Brick~2}), (\level{Sandpaper~320}, \level{Terra Cotta}) and (\level{Sandpaper~320}, \level{Coffee Filter}).
Even though the consensus was high (\kendall{0.61} \ci{0.58}{0.64}), the roughness of the visual textures were more difficult to estimate, in particular for \level{Plastic Mesh~1} and \level{Velcro Hooks}. Even though the consensus was high (\kendall{0.61} \ci{0.58}{0.64}), the roughness of the visual textures were more difficult to estimate, in particular for \level{Plastic Mesh~1} and \level{Velcro Hooks}.
\paragraph{Visuo-Haptic Textures Ranking} \paragraph{Visuo-Haptic Textures Ranking}
Also, almost all the texture pairs in the visuo-haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{140}, \pinf{0.001}; \pinf{0.05} for each comparison), except for the following groups: \{\level{Sandpaper~100}, \level{Cork}\}; \{\level{Cork}, \level{Brick~2}\}; and \{\level{Plastic Mesh~1}, \level{Velcro Hooks}, \level{Sandpaper~320}\}. Also, almost all the texture pairs in the visuo-haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{140}, \pinf{0.001}; \pinf{0.05} for each comparison; see \figref{results/rankings_modality}, right).
However, no difference was found between the textures for each of the following groups: \{\level{Sandpaper~100}, \level{Cork}\}; \{\level{Cork}, \level{Brick~2}\}; and \{\level{Plastic Mesh~1}, \level{Velcro Hooks}, \level{Sandpaper~320}\}.
The consensus between the participants was also high \kendall{0.77} \ci{0.74}{0.79}. The consensus between the participants was also high \kendall{0.77} \ci{0.74}{0.79}.
Finally, calculating the similarity of the three rankings of each participant, the \textit{Visuo-Haptic Textures Ranking} was on average highly similar to the \textit{Haptic Textures Ranking} (\kendall{0.79} \ci{0.72}{0.86}) and moderately to the \textit{Visual Textures Ranking} (\kendall{0.48} \ci{0.39}{0.56}).
Finally, the similarity of the three rankings of each participant was calculated (\figref{results/rankings_texture}).
The \textit{Visuo-Haptic Textures Ranking} was on average highly similar to the \textit{Haptic Textures Ranking} (\kendall{0.79} \ci{0.72}{0.86}) and moderately to the \textit{Visual Textures Ranking} (\kendall{0.48} \ci{0.39}{0.56}).
A Wilcoxon signed-rank test indicated that this difference was statistically significant (\wilcoxon{190}, \p{0.002}). A Wilcoxon signed-rank test indicated that this difference was statistically significant (\wilcoxon{190}, \p{0.002}).
These results indicate that the two haptic and visual modalities were integrated together, the resulting roughness ranking being between the two rankings of the modalities alone, but with haptics predominating. These results indicate that the two haptic and visual modalities were integrated together, the resulting roughness ranking being between the two rankings of the modalities alone, but with haptics predominating.
\fig[0.7]{results/ranking_mean_ci}{Means with bootstrap \percent{95} \CI of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs. }[ \fig[1]{results/rankings_texture}{Means with bootstrap \percent{95} \CI of the \level{Ranking} task results for each \factor{Visual Texture}.}[
A lower rank means that the texture was considered rougher, a higher rank means smoother. A lower rank means that the texture was considered rougher, a higher rank means smoother.
] ]
@@ -73,64 +88,74 @@ These results indicate that the two haptic and visual modalities were integrated
The high level of agreement between participants on the three haptic, visual and visuo-haptic rankings in the \level{Ranking} task (\secref{results_ranking}), as well as the similarity of the within-participant rankings, suggest that participants perceived the roughness of the textures similarly, but differed in their strategies for matching the haptic and visual textures in the \level{Matching} task (\secref{results_matching}). The high level of agreement between participants on the three haptic, visual and visuo-haptic rankings in the \level{Ranking} task (\secref{results_ranking}), as well as the similarity of the within-participant rankings, suggest that participants perceived the roughness of the textures similarly, but differed in their strategies for matching the haptic and visual textures in the \level{Matching} task (\secref{results_matching}).
To further investigate the perceived similarity of the haptic and visual textures and to identify groups of textures that were perceived as similar on the \level{Matching} task, a correspondence analysis and a hierarchical clustering were performed on the matching task confusion matrix (\figref{results/matching_confusion_matrix}). To further investigate the perceived similarity of the haptic and visual textures, and to identify groups of textures that were perceived as similar on the \level{Matching} task, a correspondence analysis and a hierarchical clustering were performed on the matching task confusion matrix (\figref{results/matching_confusion_matrix}).
\paragraph{Correspondence Analysis} \paragraph{Correspondence Analysis}
The correspondence analysis captured \percent{60} and \percent{29} of the variance in the first and second dimensions, respectively, with the remaining dimensions each accounting for less than \percent{5} each. The correspondence analysis captured \percent{60} and \percent{29} of the variance in the first and second dimensions, respectively, with the remaining dimensions each accounting for less than \percent{5} each.
\figref{results/matching_correspondence_analysis} shows the first two dimensions with the 18 haptic and visual textures. \figref{results/matching_correspondence_analysis} shows the first two dimensions with the 18 haptic and visual textures.
The first dimension was similar to the rankings (\figref{results/ranking_mean_ci}), distributing the textures according to their perceived roughness. The first dimension was similar to the rankings (\figref{results/rankings_texture}), distributing the textures according to their perceived roughness.
It seems that the second dimension opposed textures that were perceived as hard with those perceived as softer, as also reported by participants. It seems that the second dimension opposed textures that were perceived as hard with those perceived as softer, as also reported by participants.
Stiffness is indeed an important perceptual dimension of a material (\secref[related_work]{hardness}).% \cite{okamoto2013psychophysical,culbertson2014modeling}. Stiffness is indeed an important perceptual dimension of a material (\secref[related_work]{hardness}).
\fig[0.6]{results/matching_correspondence_analysis}{ \fig[1]{results/matching_correspondence_analysis}{Correspondence analysis of the confusion matrix of the \level{Matching} task.}[
Correspondence analysis of the confusion matrix of the \level{Matching} task. The closer the haptic and visual textures are, the more similar they were judged.
}[ The first dimension (horizontal axis) explains \percent{60} of the variance, the second dimension (vertical axis) explains \percent{29} of the variance.
The closer the haptic and visual textures are, the more similar they were judged. % The confusion matrix is shown in \figref{results/matching_confusion_matrix}.
The first dimension (horizontal axis) explains \percent{60} of the variance, the second dimension (vertical axis) explains \percent{30} of the variance.
The confusion matrix is \figref{results/matching_confusion_matrix}.
] ]
\paragraph{Hierarchical Clustering} \paragraph{Hierarchical Clustering}
\figref{results_clusters} shows the dendrograms of the two hierarchical clusterings of the haptic and visual textures, constructed using the Euclidean distance and the Ward's method on squared distance. \figref{results_clusters} shows the dendrograms of the two hierarchical clusterings of the haptic and visual textures, constructed using the Euclidean distance and the Ward's method on squared distance.
The four identified haptic texture clusters were: "Roughest" \{\level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, \level{Cork}\}; "Rougher" \{\level{Sandpaper~320}, \level{Velcro Hooks}\}; "Smoother" \{\level{Plastic Mesh~1}, \level{Terra Cotta}\}; "Smoothest" \{\level{Coffee Filter}\} (\figref{results/clusters_haptic}). The four identified haptic texture clusters were: \enquote{Roughest} \{\level{Metal Mesh}, \level{Sandpaper~100}, \level{Brick~2}, \level{Cork}\}; \enquote{Rougher} \{\level{Sandpaper~320}, \level{Velcro Hooks}\}; \enquote{Smoother} \{\level{Plastic Mesh~1}, \level{Terra Cotta}\}; \enquote{Smoothest} \{\level{Coffee Filter}\} (\figref{results/clusters_haptic}).
Similar to the haptic ranks (\figref{results/ranking_mean_ci}), the clusters could have been named according to their perceived roughness. Similar to the haptic ranks (\figref{results/rankings_modality}, left), the clusters could have been named according to their perceived roughness.
It also shows that the participants compared and ranked the haptic textures during the \level{Matching} task to select the one that best matched the given visual texture. It also shows that the participants compared and ranked the haptic textures during the \level{Matching} task to select the one that best matched the given visual texture.
The five identified visual texture clusters were: "Roughest" \{\level{Metal Mesh}\}; "Rougher" \{\level{Sandpaper~100}, \level{Brick~2}, \level{Velcro Hooks}\}; "Medium" \{\level{Cork}, \level{Plastic Mesh~1}\}; "Smoother" \{\level{Sandpaper~320}, \level{Terra Cotta}\}; "Smoothest" \{\level{Coffee Filter}\} (\figref{results/clusters_visual}). The five identified visual texture clusters were: \enquote{Roughest} \{\level{Metal Mesh}\}; \enquote{Rougher} \{\level{Sandpaper~100}, \level{Brick~2}, \level{Velcro Hooks}\}; \enquote{Medium} \{\level{Cork}, \level{Plastic Mesh~1}\}; \enquote{Smoother} \{\level{Sandpaper~320}, \level{Terra Cotta}\}; \enquote{Smoothest} \{\level{Coffee Filter}\} (\figref{results/clusters_visual}).
They are also easily identifiable on the visual ranking results, which also made it possible to name them. They are also easily identifiable on the visual ranking results, which also made it possible to name them.
\begin{subfigs}{results_clusters}{Dendrograms of the hierarchical clusterings of the \level{Matching} task confusion matrix.}[ \begin{subfigs}{results_clusters}{Dendrograms of the hierarchical clusterings of the confusion matrix of the \level{Matching} task.}[
Done with the Euclidean distance and the Ward's method on squared distance. Done with the Euclidean distance and the Ward's method on squared distance.
The height of the dendrograms represents the distance between the clusters. The height of the dendrograms represents the distance between the clusters.
][% ][
\item For the haptic textures. \item For the haptic textures.
\item For the visual textures. \item For the visual textures.
] ]
\subfig[0.45]{results/clusters_haptic} \subfig[0.48]{results/clusters_haptic}
\subfig[0.45]{results/clusters_visual} \subfig[0.48]{results/clusters_visual}
\end{subfigs} \end{subfigs}
\paragraph{Confusion Matrices of Clusters} \paragraph{Confusion Matrices of Clusters}
Based on these results, two alternative confusion matrices were constructed. Based on these results, two alternative confusion matrices were constructed.
Similarly to \secref{results_matching}, an \ANOVA based on a Poisson regression was performed for each confusion matrix on the number of selections, followed by post-hoc pairwise comparisons using the Tukey's \HSD test. No overdispersion was detected on the Poisson regressions.
\figref{results/haptic_visual_clusters_confusion_matrices} (left) shows the confusion matrix of the \level{Matching} task with visual texture clusters and the proportion of haptic texture clusters selected in response. \figref{results/haptic_visual_clusters_confusion_matrices} (left) shows the confusion matrix of the \level{Matching} task with visual texture clusters and the proportion of haptic texture clusters selected in response.
A two-sample Pearson Chi-Squared test (\chisqr{16}{540}{353}, \pinf{0.001}) and Holm-Bonferroni adjusted binomial tests indicated that the following (Visual Cluster, Haptic Cluster) pairs have proportion selections statistically significantly higher than chance (\ie \percent{20} each): % There was a statistically significant effect on the number of selections of the interaction visual texture cluster \x haptic texture cluster (\chisqr{12}{180}{324}, \pinf{0.001}), and statistically significant differences for the following visual clusters:
(Roughest, Roughest), (Rougher, Rougher), (Medium, Rougher), (Medium, Smoother), (Smoother, Smoother), (Smoother, Smoothest), and (Smoothest, Smoothest) (\pinf{0.005} each). \begin{itemize}
\item With \enquote{Roughest}, the haptic cluster \enquote{Roughest} was the most selected (\ztest{4.6}, \pinf{0.001}).
\item With \enquote{Rougher}, \enquote{Smoothest} was the least selected (\ztest{-4.0}, \pinf{0.001}) and \enquote{Rougher} more than \enquote{Smoother} (\ztest{-3.4}, \pinf{0.001}).
\item With \enquote{Medium}, \enquote{Rougher} and \enquote{Smoother} were both (\ztest{4.5}, \pinf{0.001}) more selected than \enquote{Roughest} and \enquote{Smoothest}.
\item With \enquote{Smoother}, \enquote{Smoother} (\ztest{4.2}, \pinf{0.001}) and \enquote{Smoothest} (\ztest{4.7}, \pinf{0.001}) were both more selected than \enquote{Roughest} and \enquote{Rougher}.
\item With \enquote{Smoothest}, \enquote{Smoother} (\ztest{2.6}, \p{0.05}) and \enquote{Smoothest} (\ztest{3.9}, \pinf{0.001}) were both more selected than \enquote{Roughest} and \enquote{Rougher}.
\end{itemize}
\figref{results/haptic_visual_clusters_confusion_matrices} (right) shows the confusion matrix of the \level{Matching} task with visual texture ranks and the proportion of haptic texture clusters selected in response. \figref{results/haptic_visual_clusters_confusion_matrices} (right) shows the confusion matrix of the \level{Matching} task with visual texture ranks and the proportion of haptic texture clusters selected in response.
A two-sample Pearson Chi-Squared test (\chisqr{24}{540}{342}, \pinf{0.001}) and Holm-Bonferroni adjusted binomial tests indicated that the following (Visual Texture Rank, Haptic Cluster) pairs have proportion selections statistically significantly higher than chance: % There was a statistically significant effect on the number of selections of the visual texture rank \x haptic texture cluster interaction (\chisqr{24}{180}{340}, \pinf{0.001}), and statistically significant differences for the following visual texture ranks:
(0, Roughest); (1, Rougher); (2, Rougher); (3, Rougher); (4, Rougher); (5, Smoother); (6, Smoother); (7, Smoothest); and (8, Smoothest) (\pinf{0.05} each). \begin{itemize}
This shows that the participants consistently identified the roughness of each visual texture and selected the corresponding haptic texture cluster. \item Rank 0: the haptic cluster \enquote{Roughest} was the most selected (\ztest{4.5}, \pinf{0.001}).
\item Ranks 1, 2 and 3: \enquote{Smoothest} was the least selected (\ztest{-3.0}, \p{0.04}).
\item Rank 4: \enquote{Rougher} was more selected than \enquote{Roughest} and \enquote{Smoothest} (\ztest{3.0}, \p{0.03}).
\item Rank 5: \enquote{Rougher} and \enquote{Smoother} were both (\ztest{4.5}, \p{0.01}) more selected than \enquote{Roughest} and \enquote{Smoothest}.
\item Rank 6: \enquote{Smoother} was more selected than \enquote{Roughest} (\ztest{3.2}, \p{0.006}).
\item Rank 7: \enquote{Smoother} and \enquote{Smoothest} were both (\ztest{3.4}, \p{0.04}) more selected than \enquote{Roughest} and \enquote{Rougher}.
\item Rank 7: \enquote{Smoother} and \enquote{Smoothest} were both (\ztest{3.2}, \p{0.04}) more selected than \enquote{Roughest} and \enquote{Rougher}.
\end{itemize}
\fig{results/haptic_visual_clusters_confusion_matrices}{ \fig{results/haptic_visual_clusters_confusion_matrices}{
Confusion matrices of the visual texture (left) or rank (right) with the corresponding haptic texture clusters selected in proportion. Confusion matrices of the visual texture (left) or rank (right) with the corresponding haptic texture clusters selected in proportion.
}[ }[]
Holm-Bonferroni adjusted binomial test results are marked in bold when the proportion is higher than chance (\ie more than \percent{20}, \pinf{0.05}).
]
\subsection{Questionnaire} \subsection{Questionnaire}
\label{results_questions} \label{results_questions}
@@ -140,7 +165,7 @@ A non-parametric \ANOVA on \ART models were used for the \response{Difficulty} a
The other question results were analyzed using Wilcoxon signed-rank tests, with Holm-Bonferroni adjustment. The other question results were analyzed using Wilcoxon signed-rank tests, with Holm-Bonferroni adjustment.
The results are shown as mean $\pm$ standard deviation. The results are shown as mean $\pm$ standard deviation.
On \response{Difficulty}, there were statistically significant effects of \factor{Task} (\anova{1}{57}{13}, \pinf{0.001}) and of \factor{Modality} (\anova{1}{57}{8}, \p{0.007}), but no interaction effect. % \factor{Task} \x \factor{Modality} (\anova{1}{57}{2}, \ns). On \response{Difficulty}, there were statistically significant effects of \factor{Task} (\anova{1}{57}{13}, \pinf{0.001}) and of \factor{Modality} (\anova{1}{57}{8}, \p{0.007}), but no interaction effect \factor{Task} \x \factor{Modality} (\anova{1}{57}{2}, \ns).
The \level{Ranking} task was found easier (\num{2.9 \pm 1.2}) than the \level{Matching} task (\num{3.9 \pm 1.5}), and the Haptic textures were found easier to discriminate (\num{3.0 \pm 1.3}) than the Visual ones (\num{3.8 \pm 1.5}). The \level{Ranking} task was found easier (\num{2.9 \pm 1.2}) than the \level{Matching} task (\num{3.9 \pm 1.5}), and the Haptic textures were found easier to discriminate (\num{3.0 \pm 1.3}) than the Visual ones (\num{3.8 \pm 1.5}).
Both haptic and visual textures were judged moderately realistic for both tasks (\num{4.2 \pm 1.3}), with no statistically significant effect of \factor{Task}, \factor{Modality} or their interaction on \response{Realism}. Both haptic and visual textures were judged moderately realistic for both tasks (\num{4.2 \pm 1.3}), with no statistically significant effect of \factor{Task}, \factor{Modality} or their interaction on \response{Realism}.
@@ -151,10 +176,10 @@ The coherence of the texture pairs was considered moderate (\num{4.6 \pm 1.2}) a
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}. Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
Lower is better for Difficulty and Uncomfortable; higher is better for Realism and Textures Match. Lower is better for Difficulty and Uncomfortable; higher is better for Realism and Textures Match.
][ ][
\item By modality. \item By \factor{Modality}.
\item By task. \item By \factor{Task}.
] ]
\subfigsheight{70mm} \subfigsheight{70mm}
\subfig{results/questions_modalities}% \subfig{results/questions_modalities}
\subfig{results/questions_tasks}% \subfig{results/questions_tasks}
\end{subfigs} \end{subfigs}

View File

@@ -17,19 +17,19 @@ Several strategies were used, as some participants reported using vibration freq
It should be noted that the task was rather difficult (\figref{results_questions}), as participants had no prior knowledge of the textures, there were no additional visual cues such as the shape of an object, and the term \enquote{roughness} had not been used by the experimenter prior to the \level{Ranking} task. It should be noted that the task was rather difficult (\figref{results_questions}), as participants had no prior knowledge of the textures, there were no additional visual cues such as the shape of an object, and the term \enquote{roughness} had not been used by the experimenter prior to the \level{Ranking} task.
The correspondence analysis (\figref{results/matching_correspondence_analysis}) highlighted that participants did indeed match visual and haptic textures primarily on the basis of their perceived roughness (\percent{60} of variance), which is in line with previous perception studies on real textures \cite{baumgartner2013visual} and virtual textures \cite{culbertson2014modeling}. The correspondence analysis (\figref{results/matching_correspondence_analysis}) highlighted that participants did indeed match visual and haptic textures primarily on the basis of their perceived roughness (\percent{60} of variance), which is in line with previous perception studies on real textures \cite{baumgartner2013visual} and virtual textures \cite{culbertson2014modeling}.
The rankings (\figref{results/ranking_mean_ci}) confirmed that the participants all perceived the roughness of haptic textures very similarly, but that there was less consensus for visual textures, which is also in line with roughness rankings for real haptic and visual textures \cite{bergmanntiest2007haptic}. The rankings (\figref{results/rankings_modality}) confirmed that the participants all perceived the roughness of haptic textures very similarly, but that there was less consensus for visual textures, which is also in line with roughness rankings for real haptic and visual textures \cite{bergmanntiest2007haptic}.
These results made it possible to identify and name groups of textures in the form of clusters (\figref{results_clusters}), and to construct confusion matrices between these clusters and between visual texture ranks with haptic clusters (\figref{results/haptic_visual_clusters_confusion_matrices}), showing that participants consistently identified and matched haptic and visual textures. These results made it possible to identify and name groups of textures in the form of clusters (\figref{results_clusters}), and to construct confusion matrices between these clusters and between visual texture ranks with haptic clusters (\figref{results/haptic_visual_clusters_confusion_matrices}), showing that participants consistently identified and matched haptic and visual textures.
\percent{30} of the matching variance of the correspondence analysis was also captured with a second dimension, opposing the roughest textures (\level{Metal Mesh}, \level{Sandpaper~100}), and to a lesser extent the smoothest (\level{Coffee Filter}, \level{Sandpaper~320}), with all other textures (\figref{results/matching_correspondence_analysis}). \percent{30} of the matching variance of the correspondence analysis was also captured with a second dimension, opposing the roughest textures (\level{Metal Mesh}, \level{Sandpaper~100}), and to a lesser extent the smoothest (\level{Coffee Filter}, \level{Sandpaper~320}), with all other textures (\figref{results/matching_correspondence_analysis}).
One hypothesis is that this dimension could be the perceived hardness (\secref[related_work]{hardness}) of the virtual materials, with \level{Metal Mesh} and smooth textures appearing harder than the other textures, whose granularity could have been perceived as bumps on the surface that could deform under finger pressure. One hypothesis is that this dimension could be the perceived hardness (\secref[related_work]{hardness}) of the virtual materials, with \level{Metal Mesh} and smooth textures appearing harder than the other textures, whose granularity could have been perceived as bumps on the surface that could deform under finger pressure.
Hardness is, with roughness, one of the main characteristics perceived by the vision and touch of real materials \cite{baumgartner2013visual,vardar2019fingertip}, but also on virtual haptic renderings \cite{culbertson2014modeling,degraen2019enhancing}. Hardness is, with roughness, one of the main characteristics perceived by the vision and touch of real materials \cite{baumgartner2013visual,vardar2019fingertip}, but also on virtual haptic renderings \cite{culbertson2014modeling,degraen2019enhancing}.
The last visuo-haptic roughness ranking (\figref{results/ranking_mean_ci}) showed that both haptic and visual sensory information were well integrated as the resulting roughness ranking was being in between the two individual haptic and visual rankings. The last visuo-haptic roughness ranking (\figref{results/rankings_texture}) showed that both haptic and visual sensory information were well integrated as the resulting roughness ranking was being in between the two individual haptic and visual rankings.
Several strategies were reported: some participants first classified visually and then corrected with haptics, others classified haptically and then integrated visuals. Several strategies were reported: some participants first classified visually and then corrected with haptics, others classified haptically and then integrated visuals.
While visual sensation did influence perception, as observed in previous haptic \AR studies \cite{punpongsanon2015softar,gaffary2017ar,fradin2023humans}, haptic sensation dominated here. While visual sensation did influence perception, as observed in previous haptic \AR studies \cite{punpongsanon2015softar,gaffary2017ar,fradin2023humans}, haptic sensation dominated here.
This indicates that participants were more confident and relied more on the haptic roughness perception than on the visual roughness perception when integrating both in one coherent perception. This indicates that participants were more confident and relied more on the haptic roughness perception than on the visual roughness perception when integrating both in one coherent perception.
Several participants also described attempting to identify visual and haptic textures using spatial breaks, edges or patterns, that were not reported when these textures were displayed in non-immersive \VEs with a screen \cite{culbertson2014modeling,culbertson2015should}. Several participants also described attempting to identify visual and haptic textures using spatial breaks, edges or patterns, that were not reported when these textures were displayed in \VEs using a screen \cite{culbertson2014modeling,culbertson2015should}.
A few participants even reported that they clearly sensed patterns on haptic textures. A few participants even reported that they clearly sensed patterns on haptic textures.
However, the visual and haptic textures used were isotropic and homogeneous models of real texture captures, \ie their rendered roughness was constant and did not depend on the direction of movement but only on the speed of the finger (\secref[related_work]{texture_rendering}). However, the visual and haptic textures used were isotropic and homogeneous models of real texture captures, \ie their rendered roughness was constant and did not depend on the direction of movement but only on the speed of the finger (\secref[related_work]{texture_rendering}).
Overall, the haptic device was judged to be comfortable, and the visual and haptic textures were judged to be fairly realistic and to work well together (\figref{results_questions}). Overall, the haptic device was judged to be comfortable, and the visual and haptic textures were judged to be fairly realistic and to work well together (\figref{results_questions}).

View File

@@ -1,7 +1,7 @@
\section{Conclusion} \section{Conclusion}
\label{conclusion} \label{conclusion}
In this chapter, we investigated how users perceived simultaneous and co-localized visuo-haptic texture augmentations of real surfaces seen in immersive \OST-\AR and touched directly with the index finger. In this chapter, we investigated how users perceived simultaneous and co-localized visuo-haptic texture augmentations of real surfaces seen with an \OST-\AR headset and touched directly with the index finger.
Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, the haptic roughness texture was rendered with on the \HaTT data-driven models and finger speed. Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, the haptic roughness texture was rendered with on the \HaTT data-driven models and finger speed.
In a user study, 20 participants rated the coherence, realism and perceived roughness of the combination of nine representative visuo-haptic texture pairs. In a user study, 20 participants rated the coherence, realism and perceived roughness of the combination of nine representative visuo-haptic texture pairs.
@@ -11,18 +11,18 @@ Conversely, there was less consensus on the perceived roughness of visual textur
Regarding the final roughness perception ranking of the original visuo-haptic pairs, the haptic roughness sensation dominated the perception. Regarding the final roughness perception ranking of the original visuo-haptic pairs, the haptic roughness sensation dominated the perception.
This suggests that \AR visual textures that augments real surfaces can be enhanced with a set of data-driven vibrotactile haptic textures in a coherent and realistic manner. This suggests that \AR visual textures that augments real surfaces can be enhanced with a set of data-driven vibrotactile haptic textures in a coherent and realistic manner.
This paves the way for new \AR applications capable of augmenting a real environment with virtual visuo-haptic textures, such as visuo-haptic painting in artistic or object design context, or viewing and touching virtual objects in a museum or a showroom. This paves the way for new \AR applications capable of augmenting a \RE with virtual visuo-haptic textures, such as visuo-haptic painting in artistic or object design context, or viewing and touching virtual objects in a museum or a showroom.
The latter is illustrated in \figref{experiment/use_case}, where a user applies different visuo-haptic textures to a wall, in an interior design scenario, to compare them visually and by touch. The latter is illustrated in \figref{experiment/use_case}, where a user applies different visuo-haptic textures to a wall, in an interior design scenario, to compare them visually and by touch.
We instinctively perceive the properties of everyday objects by touching and exploring them, but we essentially interact with them by grasping in order to manipulate them. We instinctively perceive the properties of everyday objects by touching and exploring them, but we essentially interact with them by grasping in order to manipulate them.
In this first part, we focused on the perception of wearable and immersive virtual textures that augment real surfaces when touched with the fingertip. In this first part, we focused on the perception of virtual visuo-haptic textures that augment real surfaces when touched with the fingertip.
In the next part, we will propose to improve the direct manipulation with the hand of virtual object with wearable visuo-haptic interaction feedback. In the next part, we will propose to improve the direct manipulation with the hand of virtual object with wearable visuo-haptic interaction feedback.
\noindentskip The work described in \chapref{vhar_textures} was presented at the EuroHaptics 2024 conference: \noindentskip The work described in \chapref{vhar_textures} was presented at the EuroHaptics 2024 conference:
Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal. \enquote{Augmenting the Texture Perception of Tangible Surfaces in Augmented Reality using Vibrotactile Haptic Stimuli}. In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484. Erwan Normand, Claudio Pacchierotti, Eric Marchand, and Maud Marchal. \enquote{Augmenting the Texture Perception of Tangible Surfaces in Augmented Reality using Vibrotactile Haptic Stimuli}. In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484.
\fig[0.5]{experiment/use_case}{ \fig[0.65]{experiment/use_case}{
Illustration of the texture augmentation in \AR through an interior design scenario. Illustration of the texture augmentation in \AR through an interior design scenario.
}[ }[
A user wearing an \AR headset and a wearable vibrotactile haptic device worn on their index is applying different virtual visuo-haptic textures to a real wall to compare them visually and by touch. A user wearing an \AR headset and a wearable vibrotactile haptic device worn on their index is applying different virtual visuo-haptic textures to a real wall to compare them visually and by touch.

View File

@@ -1,7 +1,7 @@
\section{Introduction} \section{Introduction}
\label{intro} \label{intro}
In the previous chapter, we presented a system for augmenting the visuo-haptic texture perception of real surfaces directly touched with the finger, using wearable vibrotactile haptics and an immersive \AR headset. In the previous chapter, we presented a system for augmenting the visuo-haptic texture perception of real surfaces directly touched with the finger, using wearable vibrotactile haptics and an \OST-\AR headset.
In this chapter and the next one, we evaluate the user's perception of such wearable haptic texture augmentation under different visual rendering conditions. In this chapter and the next one, we evaluate the user's perception of such wearable haptic texture augmentation under different visual rendering conditions.
Most of the haptic augmentations of real surfaces using with wearable haptic devices, including roughness of textures (\secref[related_work]{texture_rendering}), have been studied without a visual feedback, and none have considered the influence of the visual rendering on their perception or integrated them in \AR and \VR (\secref[related_work]{texture_rendering}). Most of the haptic augmentations of real surfaces using with wearable haptic devices, including roughness of textures (\secref[related_work]{texture_rendering}), have been studied without a visual feedback, and none have considered the influence of the visual rendering on their perception or integrated them in \AR and \VR (\secref[related_work]{texture_rendering}).
@@ -26,6 +26,6 @@ We then present the results obtained, discuss them, and outline recommendations
\fig[0.55]{teaser/teaser2}{ \fig[0.55]{teaser/teaser2}{
Vibrotactile textures were rendered in real time on a real surface using a wearable vibrotactile device worn on the finger. Vibrotactile textures were rendered in real time on a real surface using a wearable vibrotactile device worn on the finger.
}[% }[
Participants explored this haptic roughness augmentation with (\level{Real}) their real hand alone, (\level{Mixed}) a realistic virtual hand overlay in \AR, and (\level{Virtual}) the same virtual hand in \VR. Participants explored this haptic roughness augmentation with (\level{Real}) their real hand alone, (\level{Mixed}) a realistic virtual hand overlay in \AR, and (\level{Virtual}) the same virtual hand in \VR.
] ]

View File

@@ -9,14 +9,14 @@ In order not to influence the perception, as vision is an important source of in
\begin{subfigs}{renderings}{ \begin{subfigs}{renderings}{
The three visual rendering conditions and the experimental procedure of the \TIFC psychophysical study. The three visual rendering conditions and the experimental procedure of the \TIFC psychophysical study.
}[% }[
During a trial, two tactile textures were rendered on the augmented area of the paper sheet (black rectangle) for \qty{3}{\s} each, one after the other, then the participant chose which one was the roughest. During a trial, two tactile textures were rendered on the augmented area of the paper sheet (black rectangle) for \qty{3}{\s} each, one after the other, then the participant chose which one was the roughest.
The visual rendering stayed the same during the trial. The visual rendering stayed the same during the trial.
The pictures are captured directly from the Microsoft HoloLens 2 headset. The pictures are captured directly from the Microsoft HoloLens 2 headset.
][ ][
\item The real environment and real hand view without any visual augmentation. \item The \RE with real hand view and without any visual augmentation.
\item The real environment and hand view with the virtual hand. \item The \RE with real hand and virtual hand view.
\item Virtual environment with the virtual hand. \item The \VE with the virtual hand.
] ]
\subfig[0.326]{experiment/real} \subfig[0.326]{experiment/real}
\subfig[0.326]{experiment/mixed} \subfig[0.326]{experiment/mixed}
@@ -27,17 +27,19 @@ In order not to influence the perception, as vision is an important source of in
\label{apparatus} \label{apparatus}
An experimental environment was created to ensure a similar visual rendering in \AR and \VR (\figref{renderings}). An experimental environment was created to ensure a similar visual rendering in \AR and \VR (\figref{renderings}).
It consisted of a \qtyproduct{300 x 210 x 400}{\mm} medium-density fibreboard (MDF) box with a paper sheet glued inside and a \qtyproduct{50 x 15}{\mm} rectangle printed on the sheet to delimit the area where the tactile textures were rendered. It consisted of a \qtyproduct{30 x 21 x 40}{\cm} medium-density fibreboard box with a paper sheet glued inside and a \qtyproduct{5 x 1.5}{\cm} rectangle printed on the sheet to delimit the area where the tactile textures were rendered.
A single light source of \qty{800}{\lumen} placed \qty{70}{\cm} above the table fully illuminated the inside of the box. A single light source of \qty{800}{\lumen} placed \qty{70}{\cm} above the table fully illuminated the inside of the box.
Participants rated the roughness of the paper (without any texture augmentation) before the experiment on a 7-point Likert scale (1~=~Extremely smooth, 7~=~Extremely rough) as quite smooth (\mean{2.5}, \sd{1.3}). Participants rated the roughness of the paper (without any texture augmentation) before the experiment on a 7-point Likert scale (1~=~Extremely smooth, 7~=~Extremely rough) as quite smooth (\mean{2.5}, \sd{1.3}).
The visual rendering of the virtual hand and environment was achieved using the \OST-\AR headset Microsoft HoloLens~2 (\secref[vhar_system]{virtual_real_alignment}) running at \qty{60}{FPS} a custom application made with Unity 2021.1 and Mixed Reality Toolkit (MRTK) 2.7.2. The visual rendering of the virtual hand and the \VE was achieved using the \OST-\AR headset Microsoft HoloLens~2 running at \qty{60}{FPS} a custom application made with Unity (v2021.1) and Mixed Reality Toolkit (v2.7).
An \OST-\AR headset was chosen over a \VST-\AR headset because the former only adds virtual content to the \RE, while the latter streams a real-time video capture of the \RE, and one of our objectives was to directly compare a \VE replicating a real one, not to a video feed that introduces many other visual limitations (\secref[related_work]{ar_displays}). An \OST-\AR headset was chosen over a \VST-\AR headset because the former only adds virtual content to the \RE, while the latter streams a real-time video capture of the \RE, and one of our objectives was to directly compare a \VE replicating a real one, not to a video feed that introduces many other visual limitations (\secref[related_work]{ar_displays}).
The \VE carefully reproduced the \RE, including the geometry of the box, textures, lighting, and shadows (\figref{renderings}, \level{Virtual}). \comans{JG}{In addition, the lag between the real and virtual hand in the Mixed condition could have been quantified (e.g. using a camera filming through the headset) to shed more light on the reported differences, as also noted in Section 4.5, as well as the registration error between the real and the virtual hand (as visible in Figure 4.1, Mixed).}{This has been added.}
We carefully reproduced the \RE in the \VE, including the geometry of the box, textures, lighting, and shadows (\figref{renderings}, \level{Virtual}).
The virtual hand model was a gender-neutral human right hand with realistic skin texture, similar to that used by \textcite{schwind2017these}. The virtual hand model was a gender-neutral human right hand with realistic skin texture, similar to that used by \textcite{schwind2017these}.
Its size was adjusted to match the real hand of the participants before the experiment. Prior to the experiment, the virtual hand and the \VE were registered to the real hand of the participant and the \RE, respectively, as described in \secref[vhar_system]{virtual_real_registration}.
The visual rendering of the virtual hand and environment is described in \secref[vhar_system]{virtual_real_alignment}. The size of the virtual hand was also manually adjusted to match the real hand of the participant.
A \qty{\pm .5}{\cm} spatial alignment error (\secref[vhar_system]{virtual_real_registration}) and a \qty{160 \pm 30}{\ms} lag (\secref[vhar_system]{virtual_real_registration}) between the real hand the virtual hand were measured.
To ensure the same \FoV in all \factor{Visual Rendering} condition, a cardboard mask was attached to the \AR headset (\figref{experiment/headset}). To ensure the same \FoV in all \factor{Visual Rendering} condition, a cardboard mask was attached to the \AR headset (\figref{experiment/headset}).
In the \level{Virtual} rendering, the mask only had holes for sensors to block the view of the \RE and simulate a \VR headset. In the \level{Virtual} rendering, the mask only had holes for sensors to block the view of the \RE and simulate a \VR headset.
@@ -51,7 +53,7 @@ They also wore headphones with a brown noise masking the sound of the voice-coil
The user study was held in a quiet room with no windows. The user study was held in a quiet room with no windows.
\begin{subfigs}{setup}{Visuo-haptic textures rendering setup. }[][ \begin{subfigs}{setup}{Visuo-haptic textures rendering setup. }[][
\item HoloLens~2 \OST-\AR headset, the two cardboard masks to switch the real or virtual environments with the same field of view, and the \ThreeD-printed piece for attaching the masks to the headset. \item HoloLens~2 \OST-\AR headset, the two cardboard masks to switch the \RE and \VE with the same \FoV, and the \ThreeD-printed piece for attaching the masks to the headset.
\item User exploring a virtual vibrotactile texture on a real sheet of paper. \item User exploring a virtual vibrotactile texture on a real sheet of paper.
] ]
\subfigsheight{48.5mm} \subfigsheight{48.5mm}
@@ -63,7 +65,7 @@ The user study was held in a quiet room with no windows.
\label{procedure} \label{procedure}
Participants were first given written instructions about the experimental setup and procedure, the informed consent form to sign, and a demographic questionnaire. Participants were first given written instructions about the experimental setup and procedure, the informed consent form to sign, and a demographic questionnaire.
A calibration was then performed to adjust the HoloLens~2 to the participant's interpupillary distance (IPD), the virtual hand to the real hand size, and the fiducial marker to the finger position. The calibration was then performed to adjust the HoloLens~2 to the participant's interpupillary distance, the fiducial marker to the finger position, and the virtual hand size to the real hand.
They familiarized themselves with the task by completing four training trials with the most different pair of textures. They familiarized themselves with the task by completing four training trials with the most different pair of textures.
The trials were divided into three blocks, one for each \factor{Visual Rendering} condition, with a break and questionnaire between each block. The trials were divided into three blocks, one for each \factor{Visual Rendering} condition, with a break and questionnaire between each block.
Before each block, the experimenter ensured that the \VE and the virtual hand were correctly aligned with their real equivalents, that the haptic device was in place, and attached the cardboard mask corresponding to the next \factor{Visual Rendering} condition to the headset. Before each block, the experimenter ensured that the \VE and the virtual hand were correctly aligned with their real equivalents, that the haptic device was in place, and attached the cardboard mask corresponding to the next \factor{Visual Rendering} condition to the headset.
@@ -88,7 +90,7 @@ The user study took on average one hour to complete.
The user study was a within-subjects design with two factors: The user study was a within-subjects design with two factors:
\begin{itemize} \begin{itemize}
\item \factor{Visual Rendering} consists of the augmented or virtual view of the environment, the hand and the wearable haptic device, with 3 levels: real environment and hand view without any visual augmentation (\figref{renderings}, \level{Real}), real environment and hand view with the superimposed virtual hand (\figref{renderings}, \level{Mixed}) and virtual environment with the virtual hand (\figref{renderings}, \level{Virtual}). \item \factor{Visual Rendering} consists of the augmented or virtual view of the environment, the hand and the wearable haptic device, with 3 levels: \RE and real hand view without any visual augmentation (\figref{renderings}, \level{Real}), \AE with real hand view and the superimposed virtual hand (\figref{renderings}, \level{Mixed}), and \VE with the virtual hand (\figref{renderings}, \level{Virtual}).
\item \factor{Amplitude Difference} consists of the difference in amplitude of the comparison texture with the reference texture (which is identical for all visual renderings), with 6 levels: \qtylist{\pm 12.5; \pm 25.0; \pm 37.5}{\%}. \item \factor{Amplitude Difference} consists of the difference in amplitude of the comparison texture with the reference texture (which is identical for all visual renderings), with 6 levels: \qtylist{\pm 12.5; \pm 25.0; \pm 37.5}{\%}.
\end{itemize} \end{itemize}
@@ -118,6 +120,8 @@ After each \factor{Visual Rendering} block of trials, participants rated their e
They also assessed their workload with the NASA Task Load Index (\response{NASA-TLX}) questionnaire after each blocks of trials (\tabref{questions2}). They also assessed their workload with the NASA Task Load Index (\response{NASA-TLX}) questionnaire after each blocks of trials (\tabref{questions2}).
For all questions, participants were shown only labels (\eg \enquote{Not at all} or \enquote{Extremely}) and not the actual scale values (\eg 1 or 5) \cite{muller2014survey}. For all questions, participants were shown only labels (\eg \enquote{Not at all} or \enquote{Extremely}) and not the actual scale values (\eg 1 or 5) \cite{muller2014survey}.
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), \textit{MixedPsy} (v1.2), \textit{lme4} (v1.1), and \textit{performance} (v0.13).
\newcommand{\scalegroup}[2]{\multirow{#1}{1\linewidth}{#2}} \newcommand{\scalegroup}[2]{\multirow{#1}{1\linewidth}{#2}}
\afterpage{ \afterpage{
\begin{tabwide}{questions1} \begin{tabwide}{questions1}

View File

@@ -37,8 +37,8 @@ All pairwise differences were statistically significant.
\subsubsection{Response Time} \subsubsection{Response Time}
\label{response_time} \label{response_time}
A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering}, and a log transformation (as \response{Response Time} measures were gamma distributed) indicated a statistically significant effect on \response{Response Time} of \factor{Visual Rendering} (\anova{2}{18}{6.2}, \p{0.009}, \figref{results/trial_response_times}). A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering}, and a log transformation (as \response{Response Time} measures were gamma distributed) indicated a statistically significant effect on \response{Response Time} of \factor{Visual Rendering} (\anova{2}{18}{6.2}, \p{0.009}, see \figref{results/trial_response_times}).
Reported response times are \GM. Reported response times are geometric means (GM).
Participants took longer on average to respond with the \level{Virtual} rendering (\geomean{1.65}{\s} \ci{1.59}{1.72}) than with the \level{Real} rendering (\geomean{1.38}{\s} \ci{1.32}{1.43}), which is the only statistically significant difference (\ttest{19}{0.3}, \p{0.005}). Participants took longer on average to respond with the \level{Virtual} rendering (\geomean{1.65}{\s} \ci{1.59}{1.72}) than with the \level{Real} rendering (\geomean{1.38}{\s} \ci{1.32}{1.43}), which is the only statistically significant difference (\ttest{19}{0.3}, \p{0.005}).
The \level{Mixed} rendering was in between (\geomean{1.56}{\s} \ci{1.49}{1.63}). The \level{Mixed} rendering was in between (\geomean{1.56}{\s} \ci{1.49}{1.63}).
@@ -47,17 +47,17 @@ The \level{Mixed} rendering was in between (\geomean{1.56}{\s} \ci{1.49}{1.63}).
The frames analyzed were those in which the participants actively touched the comparison textures with a finger speed greater than \SI{1}{\mm\per\second}. The frames analyzed were those in which the participants actively touched the comparison textures with a finger speed greater than \SI{1}{\mm\per\second}.
A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering} indicated only one statistically significant effect on the total distance traveled by the finger in a trial of \factor{Visual Rendering} (\anova{2}{18}{3.9}, \p{0.04}, \figref{results/trial_distances}). A \LMM \ANOVA with by-participant random slopes for \factor{Visual Rendering} indicated only one statistically significant effect on the total distance traveled by the finger in a trial of \factor{Visual Rendering} (\anova{2}{18}{3.9}, \p{0.04}, see \figref{results/trial_distances}).
On average, participants explored a larger distance with the \level{Real} rendering (\geomean{20.0}{\cm} \ci{19.4}{20.7}) than with \level{Virtual} rendering (\geomean{16.5}{\cm} \ci{15.8}{17.1}), which is the only statistically significant difference (\ttest{19}{1.2}, \p{0.03}), with the \level{Mixed} rendering (\geomean{17.4}{\cm} \ci{16.8}{18.0}) in between. On average, participants explored a larger distance with the \level{Real} rendering (\geomean{20.0}{\cm} \ci{19.4}{20.7}) than with \level{Virtual} rendering (\geomean{16.5}{\cm} \ci{15.8}{17.1}), which is the only statistically significant difference (\ttest{19}{1.2}, \p{0.03}), with the \level{Mixed} rendering (\geomean{17.4}{\cm} \ci{16.8}{18.0}) in between.
Another \LMM \ANOVA with by-trial and by-participant random intercepts but no random slopes indicated only one statistically significant effect on \response{Finger Speed} of \factor{Visual Rendering} (\anova{2}{2142}{2.0}, \pinf{0.001}, \figref{results/trial_speeds}). Another \LMM \ANOVA with by-trial and by-participant random intercepts but no random slopes indicated only one statistically significant effect on \response{Finger Speed} of \factor{Visual Rendering} (\anova{2}{2142}{2.0}, \pinf{0.001}, see \figref{results/trial_speeds}).
On average, the textures were explored with the highest speed with the \level{Real} rendering (\geomean{5.12}{\cm\per\second} \ci{5.08}{5.17}), the lowest with the \level{Virtual} rendering (\geomean{4.40}{\cm\per\second} \ci{4.35}{4.45}), and the \level{Mixed} rendering (\geomean{4.67}{\cm\per\second} \ci{4.63}{4.71}) in between. On average, the textures were explored with the highest speed with the \level{Real} rendering (\geomean{5.12}{\cm\per\second} \ci{5.08}{5.17}), the lowest with the \level{Virtual} rendering (\geomean{4.40}{\cm\per\second} \ci{4.35}{4.45}), and the \level{Mixed} rendering (\geomean{4.67}{\cm\per\second} \ci{4.63}{4.71}) in between.
All pairwise differences were statistically significant: \level{Real} \vs \level{Virtual} (\ttest{19}{1.17}, \pinf{0.001}), \level{Real} \vs \level{Mixed} (\ttest{19}{1.10}, \pinf{0.001}), and \level{Mixed} \vs \level{Virtual} (\ttest{19}{1.07}, \p{0.02}). All pairwise differences were statistically significant: \level{Real} \vs \level{Virtual} (\ttest{19}{1.17}, \pinf{0.001}), \level{Real} \vs \level{Mixed} (\ttest{19}{1.10}, \pinf{0.001}), and \level{Mixed} \vs \level{Virtual} (\ttest{19}{1.07}, \p{0.02}).
This means that within the same time window on the same surface, participants explored the comparison texture on average at a greater distance and at a higher speed when in the \RE without visual representation of the hand (\level{Real} condition) than when in \VR (\level{Virtual} condition). This means that within the same time window on the same surface, participants explored the comparison texture on average at a greater distance and at a higher speed when in the \RE without visual representation of the hand (\level{Real} condition) than when in \VR (\level{Virtual} condition).
\begin{subfigs}{results_finger}{Results of the performance metrics for the rendering condition.}[ \begin{subfigs}{results_finger}{Results of the performance metrics for the rendering condition.}[
Boxplots and geometric means with bootstrap \percent{95} \CI, with Tukey's \HSD pairwise comparisons: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}. Boxplots and geometric means with bootstrap \percent{95} \CI and Tukey's \HSD pairwise comparisons: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
][ ][
\item Response time at the end of a trial. \item Response time at the end of a trial.
\item Distance travelled by the finger in a trial. \item Distance travelled by the finger in a trial.
@@ -71,8 +71,6 @@ This means that within the same time window on the same surface, participants ex
\subsection{Questionnaires} \subsection{Questionnaires}
\label{results_questions} \label{results_questions}
%\figref{results/question_heatmaps} shows the median and interquartile range (IQR) ratings to the questions in \tabref{questions} and to the NASA-TLX questionnaire.
%
Friedman tests were employed to compare the ratings to the questions (\tabref{questions1} and \tabref{questions2}), with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment, except for the questions regarding the virtual hand that were directly compared with Wilcoxon signed-rank tests. Friedman tests were employed to compare the ratings to the questions (\tabref{questions1} and \tabref{questions2}), with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment, except for the questions regarding the virtual hand that were directly compared with Wilcoxon signed-rank tests.
\figref{results_questions} shows these ratings for questions where statistically significant differences were found (results are shown as mean $\pm$ standard deviation): \figref{results_questions} shows these ratings for questions where statistically significant differences were found (results are shown as mean $\pm$ standard deviation):
@@ -103,7 +101,7 @@ The overall workload (mean NASA-TLX score) was low (\num{21 \pm 14}), with no st
The textures were also overall found to be very much caused by the finger movements (\response{Texture Agency}, \num{4.5 \pm 1.0}) with a very low perceived latency (\response{Texture Latency}, \num{1.6 \pm 0.8}), and to be quite realistic (\response{Texture Realism}, \num{3.6 \pm 0.9}) and quite plausible (\response{Texture Plausibility}, \num{3.6 \pm 1.0}). The textures were also overall found to be very much caused by the finger movements (\response{Texture Agency}, \num{4.5 \pm 1.0}) with a very low perceived latency (\response{Texture Latency}, \num{1.6 \pm 0.8}), and to be quite realistic (\response{Texture Realism}, \num{3.6 \pm 0.9}) and quite plausible (\response{Texture Plausibility}, \num{3.6 \pm 1.0}).
The vibrations were felt a slightly weak overall (\response{Vibration Strength}, \num{4.2 \pm 1.1}), and the vibrotactile device was perceived as neither distracting (\response{Device Distraction}, \num{1.2 \pm 0.4}) nor uncomfortable (\response{Device Discomfort}, \num{1.3 \pm 0.6}). The vibrations were felt a slightly weak overall (\response{Vibration Strength}, \num{4.2 \pm 1.1}), and the vibrotactile device was perceived as neither distracting (\response{Device Distraction}, \num{1.2 \pm 0.4}) nor uncomfortable (\response{Device Discomfort}, \num{1.3 \pm 0.6}).
Participants were mixed between feeling the vibrations on the surface or on the top of their finger (\response{Vibration Location}, \num{3.9 \pm 1.7}); the distribution of scores was split between the two poles of the scale with \level{Real} and \level{Mixed} renderings (\percent{42.5} more on surface or on finger top, \percent{15} neutral), but there was a trend towards the top of the finger in VR renderings (\percent{65} \vs \percent{25} more on surface and \percent{10} neutral), but this difference was not statistically significant neither. Participants were mixed between feeling the vibrations on the surface or on the top of their finger (\response{Vibration Location}, \num{3.9 \pm 1.7}); the distribution of scores was split between the two poles of the scale with \level{Real} and \level{Mixed} renderings (\percent{42.5} more on surface or on finger top, \percent{15} neutral), but there was a trend towards the top of the finger in \VR renderings (\percent{65} \vs \percent{25} more on surface and \percent{10} neutral), but this difference was not statistically significant neither.
\begin{tab}{questions2} \begin{tab}{questions2}
{NASA-TLX questions asked to participants after each \factor{Visual Rendering} block of trials.} {NASA-TLX questions asked to participants after each \factor{Visual Rendering} block of trials.}
@@ -124,15 +122,3 @@ Participants were mixed between feeling the vibrations on the surface or on the
\bottomrule \bottomrule
\end{tabularx} \end{tabularx}
\end{tab} \end{tab}
%\figwide{results/question_heatmaps}{%
%
% Heatmaps of the questionnaire responses, with the median rating and the interquartile range in brackets on each cell.
%
% (Left) point Likert scale questions (1=Not at all, 2=Slightly, 3=Moderately, 4=Very, 5=Extremely).
%
% (Middle) point Likert scale questions (1=Extremely A, 2=Moderately A, 3=Slightly A, 4=Neither A nor B, 5=Slightly B, 6=Moderately B, 7=Extremely B) with A and B being the two poles of the scale.
%
% (Right) Load Index (NASA-TLX) questionnaire (lower values are better).
%}

View File

@@ -29,5 +29,5 @@ Thereby, we hypothesize that the differences in the perception of vibrotactile r
The perceived delay was the most important in \AR, where the virtual hand visually lags significantly behind the real one, but less so in \VR, where only the proprioceptive sense can help detect the lag. The perceived delay was the most important in \AR, where the virtual hand visually lags significantly behind the real one, but less so in \VR, where only the proprioceptive sense can help detect the lag.
This delay was not perceived when touching the virtual haptic textures without visual augmentation, because only the finger velocity was used to render them, and, despite the varied finger movements and velocities while exploring the textures, the participants did not perceive any latency in the vibrotactile rendering (\secref{results_questions}). This delay was not perceived when touching the virtual haptic textures without visual augmentation, because only the finger velocity was used to render them, and, despite the varied finger movements and velocities while exploring the textures, the participants did not perceive any latency in the vibrotactile rendering (\secref{results_questions}).
\textcite{diluca2011effects} demonstrated similarly, in a \VST-\AR setup, how visual latency relative to proprioception increased the perception of stiffness of a virtual piston, while haptic latency decreased it (\secref[related_work]{ar_vr_haptic}). \textcite{diluca2011effects} demonstrated similarly, in a \VST-\AR setup, how visual latency relative to proprioception increased the perception of stiffness of a virtual piston, while haptic latency decreased it (\secref[related_work]{ar_vr_haptic}).
Another complementary explanation could be a pseudo-haptic effect (\secref[related_work]{visual_haptic_influence}) of the displacement of the virtual hand, as already observed with this vibrotactile texture rendering, but seen on a screen in a non-immersive context \cite{ujitoko2019modulating}. Another complementary explanation could be a pseudo-haptic effect (\secref[related_work]{visual_haptic_influence}) of the displacement of the virtual hand, as already observed with this vibrotactile texture rendering, but seen on a screen \cite{ujitoko2019modulating}.
Such hypotheses could be tested by manipulating the latency and pose estimation accuracy of the virtual hand or the vibrotactile feedback. % to observe their effects on the roughness perception of the virtual textures. Such hypotheses could be tested by manipulating the latency and pose estimation accuracy of the virtual hand or the vibrotactile feedback. % to observe their effects on the roughness perception of the virtual textures.

View File

@@ -4,20 +4,20 @@
In this chapter, we studied how the perception of wearable haptic augmented textures is affected by the visual feedback of the virtual hand and the environment, being either real, augmented or virtual. In this chapter, we studied how the perception of wearable haptic augmented textures is affected by the visual feedback of the virtual hand and the environment, being either real, augmented or virtual.
Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, we augmented the perceived roughness of real surfaces with virtual vibrotactile textures rendered on the finger. Using the wearable visuo-haptic augmentation system presented in \chapref{vhar_system}, we augmented the perceived roughness of real surfaces with virtual vibrotactile textures rendered on the finger.
%we rendered virtual vibrotactile patterned textures on the voice-coil worn on the middle-phalanx of the finger to augment the roughness perception of the real surface being touched. %we rendered virtual vibrotactile patterned textures on the voice-coil worn on the middle-phalanx of the finger to augment the roughness perception of the real surface being touched.
With an immersive \AR headset, that could be switched to a \VR only view, we considered three visual rendering conditions: (1) without visual augmentation, (2) with a realistic virtual hand rendering in \AR, and (3) with the same virtual hand in \VR. With an \OST-\AR headset, that could be switched to a \VR only view, we considered three visual rendering conditions: (1) without visual augmentation, (2) with a realistic virtual hand rendering in \AR, and (3) with the same virtual hand in \VR.
We then evaluated the perceived roughness augmentation in these three visual conditions with a psychophysical user study involving 20 participants and extensive questionnaires. We then evaluated the perceived roughness augmentation in these three visual conditions with a psychophysical user study involving 20 participants and extensive questionnaires.
Our results showed that the visual virtuality of the hand (real or virtual) and the environment (\AR or \VR) had a significant effect on the perception of haptic textures and the exploration behaviour of the participants. Our results showed that the visual virtuality of the hand (real or virtual) and the environment (\AR or \VR) had a significant effect on the perception of haptic textures and the exploration behaviour of the participants.
The textures were on average perceived as \enquote{rougher} when touched with the real hand alone than with a virtual hand either in \AR, with \VR in between. The textures were on average perceived as \enquote{rougher} when touched with the real hand alone than with a virtual hand either in \AR, with \VR in between.
Similarly, the sensitivity to differences in roughness was better with the real hand, less good with \AR, and in between with \VR. Similarly, the sensitivity to differences in roughness was better with the real hand, less good with \AR, and in between with \VR.
Exploration behaviour was also slower in \VR than with real hand alone, although subjective evaluation of the texture was not affected. Exploration behaviour was also slower in \VR than with real hand alone, although subjective evaluation of the texture was not affected.
We hypothesised that this difference in perception was due to the \emph{perceived latency} between the finger movements and the different visual, haptic and proprioceptive feedbacks, which were the same in all visual renderings, but were more noticeable in \AR and \VR than without visual augmentation. We hypothesized that this difference in perception was due to the \emph{perceived latency} between the finger movements and the different visual, haptic and proprioceptive feedbacks, which were the same in all visual renderings, but were more noticeable in \AR and \VR than without visual augmentation.
%We can outline recommendations for future \AR/\VR studies or applications using wearable haptics. %We can outline recommendations for future \AR/\VR studies or applications using wearable haptics.
This study suggests that attention should be paid to the respective latencies of the visual and haptic sensory feedbacks inherent in such systems and, more importantly, to \emph{the perception of their possible asynchrony}. This study suggests that attention should be paid to the respective latencies of the visual and haptic sensory feedbacks inherent in such systems and, more importantly, to \emph{the perception of their possible asynchrony}.
Latencies should be measured \cite{friston2014measuring}, minimized to an acceptable level for users and kept synchronised with each other \cite{diluca2019perceptual}. Latencies should be measured \cite{friston2014measuring}, minimized to an acceptable level for users and kept synchronized with each other \cite{diluca2019perceptual}.
It seems also that the visual aspect of the hand or the environment on itself has little effect on the perception of haptic feedback, but the degree of visual virtuality can affect the asynchrony perception of the latencies, even though the latencies remain identical. It seems also that the visual aspect of the hand or the environment on itself has little effect on the perception of haptic feedback, but the degree of visual virtuality can affect the asynchrony perception of the latencies, even though the latencies remain identical.
When designing for wearable haptics or integrating it into \AR/\VR, it seems important to test its perception in real, augmented and virtual environments. When designing for wearable haptics or integrating it into \AR/\VR, it seems important to test its perception in real (\RE), augmented (\AE) and virtual (\VE) environments.
%With a better understanding of how visual factors influence the perception of haptically augmented real objects, the many wearable haptic systems that already exist but have not yet been fully explored with \AR can be better applied and new visuo-haptic renderings adapted to \AR can be designed. %With a better understanding of how visual factors influence the perception of haptically augmented real objects, the many wearable haptic systems that already exist but have not yet been fully explored with \AR can be better applied and new visuo-haptic renderings adapted to \AR can be designed.
%Finally, a visual hand representation in OST-\AR together with wearable haptics should be avoided until acceptable tracking latencies \are achieved, as was also observed for virtual object interaction with the bare hand \cite{normand2024visuohaptic}. %Finally, a visual hand representation in OST-\AR together with wearable haptics should be avoided until acceptable tracking latencies \are achieved, as was also observed for virtual object interaction with the bare hand \cite{normand2024visuohaptic}.

View File

@@ -2,13 +2,12 @@
\label{intro} \label{intro}
Touching, grasping and manipulating virtual objects are fundamental interactions in \AR (\secref[related_work]{ve_tasks}) and essential for many of its applications (\secref[related_work]{ar_applications}). Touching, grasping and manipulating virtual objects are fundamental interactions in \AR (\secref[related_work]{ve_tasks}) and essential for many of its applications (\secref[related_work]{ar_applications}).
%The most common current \AR systems, in the form of portable and immersive \OST-\AR headsets \cite{hertel2021taxonomy}, allow real-time hand tracking and direct interaction with virtual objects with bare hands (\secref[related_work]{real_virtual_gap}).
Manipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the \VE and simulates interaction with virtual objects (\secref[related_work]{ar_virtual_hands}). Manipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the \VE and simulates interaction with virtual objects (\secref[related_work]{ar_virtual_hands}).
The visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}. The visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in an immersive context of virtual object manipulation \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}. Some work has also investigated the visual feedback of the virtual hand in \AR, but not in a context of virtual object manipulation \cite{al-kalbani2016analysis,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,blaga2017usability,maisto2017evaluation}.
\Gls{OST}-\AR also has significant perceptual differences from \VR due the lack of mutual occlusion between the hand and the virtual object in \OST-\AR (\secref[related_work]{ar_displays}), and the inherent delays between the user's hand and the result of the interaction simulation (\secref[related_work]{ar_virtual_hands}). \Gls{OST}-\AR also has significant perceptual differences from \VR due the lack of mutual occlusion between the hand and the virtual object in \OST-\AR (\secref[related_work]{ar_displays}), and the inherent delays between the user's hand and the result of the interaction simulation (\secref[related_work]{ar_virtual_hands}).
In this chapter, we investigate the \textbf{visual rendering of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects in \OST-\AR. In this chapter, we investigate the \textbf{visual rendering of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects with an \OST-\AR headset.
To this end, we selected in the literature and compared the most popular visual hand augmentation used to interact with virtual objects in \AR. To this end, we selected in the literature and compared the most popular visual hand augmentation used to interact with virtual objects in \AR.
The virtual hand is \textbf{displayed superimposed} on the user's hand with these visual rendering, providing \textbf{feedback on the tracking} of the real hand, as shown in \figref{hands}. The virtual hand is \textbf{displayed superimposed} on the user's hand with these visual rendering, providing \textbf{feedback on the tracking} of the real hand, as shown in \figref{hands}.
The movement of the virtual hand is also \textbf{constrained to the surface} of the virtual object, providing an additional \textbf{feedback on the interaction} with the virtual object. The movement of the virtual hand is also \textbf{constrained to the surface} of the virtual object, providing an additional \textbf{feedback on the interaction} with the virtual object.

View File

@@ -56,7 +56,7 @@ Following the guidelines of \textcite{bergstrom2021how} for designing object man
\label{push-task} \label{push-task}
The first manipulation task consists in pushing a virtual object along a real flat surface towards a target placed on the same plane (\figref{method/task-push}). The first manipulation task consists in pushing a virtual object along a real flat surface towards a target placed on the same plane (\figref{method/task-push}).
The virtual object to manipulate is a small \qty{50}{\mm} blue and opaque cube, while the target is a (slightly) bigger \qty{70}{\mm} blue and semi-transparent volume. The virtual object to manipulate is a small \qty{5}{\cm} blue and opaque cube, while the target is a (slightly) bigger \qty{7}{\cm} blue and semi-transparent volume.
At every repetition of the task, the cube to manipulate always spawns at the same place, on top of a real table in front of the user. At every repetition of the task, the cube to manipulate always spawns at the same place, on top of a real table in front of the user.
On the other hand, the target volume can spawn in eight different locations on the same table, located on a \qty{20}{\cm} radius circle centred on the cube, at \qty{45}{\degree} from each other (again \figref{method/task-push}). On the other hand, the target volume can spawn in eight different locations on the same table, located on a \qty{20}{\cm} radius circle centred on the cube, at \qty{45}{\degree} from each other (again \figref{method/task-push}).
Users are asked to push the cube towards the target volume using their fingertips in any way they prefer. Users are asked to push the cube towards the target volume using their fingertips in any way they prefer.
@@ -100,12 +100,11 @@ This design led to a total of 2 manipulation tasks \x 6 visual hand augmentation
\subsection{Apparatus} \subsection{Apparatus}
\label{apparatus} \label{apparatus}
We used the \OST-\AR headset HoloLens~2, as described in \secref[vhar_system]{virtual_real_alignment}. We used the \OST-\AR headset HoloLens~2, as described in \secref[vhar_system]{virtual_real_registration}.
%It is capable of rendering virtual content within an horizontal field of view of \qty{43}{\degree} and a vertical one of \qty{29}{\degree}. It is also able to track the environment as well as the user's fingers.
It is also able to track the user's fingers. It is also able to track the user's fingers.
We measured the latency of the hand tracking at \qty{15}{\ms}, independent of the hand movement speed. We measured the latency of the hand tracking at \qty{15}{\ms}, independent of the hand movement speed.
The implementation of our experiment was done using Unity 2022.1, PhysX 4.1, and the Mixed Reality Toolkit (MRTK) 2.8. The implementation of our experiment was done using Unity (v2022.1), PhysX (v4.1), and the Mixed Reality Toolkit (MRTK, v2.8).
The compiled application ran directly on the HoloLens~2 at \qty{60}{FPS}. The compiled application ran directly on the HoloLens~2 at \qty{60}{FPS}.
The default \ThreeD hand model from MRTK was used for all visual hand augmentations. The default \ThreeD hand model from MRTK was used for all visual hand augmentations.
@@ -114,7 +113,7 @@ A calibration was performed for every participant, to best adapt the size of the
A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the virtual objects and hand renderings, which were applied throughout the experiment. A set of empirical tests enabled us to choose the best rendering characteristics in terms of transparency and brightness for the virtual objects and hand renderings, which were applied throughout the experiment.
The hand tracking information provided by MRTK was used to construct a virtual articulated physics-enabled hand (\secref[related_work]{ar_virtual_hands}) using PhysX. The hand tracking information provided by MRTK was used to construct a virtual articulated physics-enabled hand (\secref[related_work]{ar_virtual_hands}) using PhysX.
It featured 25 DoFs, including the fingers proximal, middle, and distal phalanges. It featured 25 \DoFs, including the fingers proximal, middle, and distal phalanges.
To allow effective (and stable) physical interactions between the hand and the virtual cube to manipulate, we implemented an approach similar to that of \textcite{borst2006spring}, where a series of virtual springs with high stiffness are used to couple the physics-enabled hand with the tracked hand. To allow effective (and stable) physical interactions between the hand and the virtual cube to manipulate, we implemented an approach similar to that of \textcite{borst2006spring}, where a series of virtual springs with high stiffness are used to couple the physics-enabled hand with the tracked hand.
As before, a set of empirical tests have been used to select the most effective physical characteristics in terms of mass, elastic constant, friction, damping, colliders size, and shape for the (tracked) virtual hand interaction model. As before, a set of empirical tests have been used to select the most effective physical characteristics in terms of mass, elastic constant, friction, damping, colliders size, and shape for the (tracked) virtual hand interaction model.
@@ -165,3 +164,5 @@ Participants also rated each visual hand augmentation individually on six questi
\end{itemize} \end{itemize}
Finally, participants were encouraged to comment out loud on the conditions throughout the experiment, as well as in an open-ended question at its end, to gather additional qualitative information. Finally, participants were encouraged to comment out loud on the conditions throughout the experiment, as well as in an open-ended question at its end, to gather additional qualitative information.
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), and \textit{performance} (v0.13).

View File

@@ -12,8 +12,8 @@ We found no statistically significant differences when comparing the considered
All visual hand augmentations showed \response{Grip Apertures} close to the size of the virtual cube, except for the \level{None} rendering (\figref{results/Grasp-GripAperture}), with which participants applied stronger grasps, \ie less distance between the fingertips. All visual hand augmentations showed \response{Grip Apertures} close to the size of the virtual cube, except for the \level{None} rendering (\figref{results/Grasp-GripAperture}), with which participants applied stronger grasps, \ie less distance between the fingertips.
Having no visual hand augmentation, but only the reaction of the cube to the interaction as feedback, made participants less confident in their grip. Having no visual hand augmentation, but only the reaction of the cube to the interaction as feedback, made participants less confident in their grip.
This result contrasts with the wrongly estimated grip apertures observed by \textcite{al-kalbani2016analysis} in an exocentric VST-AR setup. This result contrasts with the wrongly estimated grip apertures observed by \textcite{al-kalbani2016analysis} in an exocentric \VST-\AR setup.
Also, while some participants found the absence of visual hand augmentation more natural, many of them commented on the importance of having feedback on the tracking of their hands, as observed by \textcite{xiao2018mrtouch} in a similar immersive OST-AR setup. Also, while some participants found the absence of visual hand augmentation more natural, many of them commented on the importance of having feedback on the tracking of their hands, as observed by \textcite{xiao2018mrtouch} with an \OST-\AR headset.
Yet, participants' opinions of the visual hand augmentations were mixed on many questions, except for the \level{Occlusion} one, which was perceived less effective than more \enquote{complete} visual hands such as \level{Contour}, \level{Skeleton}, and \level{Mesh} hands (\figref{results_questions}). Yet, participants' opinions of the visual hand augmentations were mixed on many questions, except for the \level{Occlusion} one, which was perceived less effective than more \enquote{complete} visual hands such as \level{Contour}, \level{Skeleton}, and \level{Mesh} hands (\figref{results_questions}).
However, due to the latency of the hand tracking and the visual hand reacting to the cube, almost all participants thought that the \level{Occlusion} rendering to be a \enquote{shadow} of the real hand on the cube. However, due to the latency of the hand tracking and the visual hand reacting to the cube, almost all participants thought that the \level{Occlusion} rendering to be a \enquote{shadow} of the real hand on the cube.
@@ -24,7 +24,7 @@ while others found that it gave them a better sense of the contact points and im
This result is consistent with \textcite{saito2021contact}, who found that displaying the points of contacts was beneficial for grasping a virtual object over an opaque visual hand overlay. This result is consistent with \textcite{saito2021contact}, who found that displaying the points of contacts was beneficial for grasping a virtual object over an opaque visual hand overlay.
To summarize, when employing a visual feedback of the virtual hand overlaying the real hand, participants were more performant and confident in manipulating virtual objects with bare hands in \AR. To summarize, when employing a visual feedback of the virtual hand overlaying the real hand, participants were more performant and confident in manipulating virtual objects with bare hands in \AR.
These results contrast with similar manipulation studies, but in non-immersive, on-screen \AR, where the presence of a visual hand augmentation was found by participants to improve the usability of the interaction, but not their performance \cite{blaga2017usability,maisto2017evaluation,meli2018combining}. These results contrast with similar manipulation studies, but in on-screen \AR, where the presence of a visual hand augmentation was found by participants to improve the usability of the interaction, but not their performance \cite{blaga2017usability,maisto2017evaluation,meli2018combining}.
Our results show the most effective visual hand augmentation to be the \level{Skeleton} one. Our results show the most effective visual hand augmentation to be the \level{Skeleton} one.
Participants appreciated that it provided a detailed and precise view of the tracking of the real hand, without hiding or masking it. Participants appreciated that it provided a detailed and precise view of the tracking of the real hand, without hiding or masking it.
Although the \level{Contour} and \level{Mesh} hand renderings were also highly rated, some participants felt that they were too visible and masked the real hand. Although the \level{Contour} and \level{Mesh} hand renderings were also highly rated, some participants felt that they were too visible and masked the real hand.

View File

@@ -1,11 +1,11 @@
\section{Conclusion} \section{Conclusion}
\label{conclusion} \label{conclusion}
In this chapter, we addressed the challenge of touching, grasping and manipulating virtual objects directly with the hand in immersive \OST-\AR. In this chapter, we addressed the challenge of touching, grasping and manipulating virtual objects directly with the hand using an \OST-\AR headset.
To do so, we proposed to evaluate visual renderings of the virtual hand as augmentation of the real hand. To do so, we proposed to evaluate visual renderings of the virtual hand as augmentation of the real hand.
Superimposed on the user's hand, these visual renderings provide feedback from the virtual hand, which tracks the real hand, and simulates the interaction with virtual objects as a proxy. Superimposed on the user's hand, these visual renderings provide feedback from the virtual hand, which tracks the real hand, and simulates the interaction with virtual objects as a proxy.
We first selected and compared the six most popular visual hand augmentations used to interact with virtual objects in \AR. We first selected and compared the six most popular visual hand augmentations used to interact with virtual objects in \AR.
Then, in a user study with 24 participants and an immersive \OST-\AR headset, we evaluated the effect of these six visual hand augmentations on the user performance and experience in two representative manipulation tasks. Then, in a user study with 24 participants, we evaluated the effect of these six visual hand augmentations on the user performance and experience in two representative manipulation tasks.
Our results showed that a visual hand augmentation improved the performance, perceived effectiveness and confidence of participants compared to no augmentation. Our results showed that a visual hand augmentation improved the performance, perceived effectiveness and confidence of participants compared to no augmentation.
A skeleton rendering, which provided a detailed view of the tracked joints and phalanges while not hiding the real hand, was the most performant and effective. A skeleton rendering, which provided a detailed view of the tracked joints and phalanges while not hiding the real hand, was the most performant and effective.

View File

@@ -13,7 +13,7 @@ A final question is whether one or the other of these (haptic or visual) hand fe
However, these studies were conducted in non-immersive setups, with a screen displaying the \VE view. However, these studies were conducted in non-immersive setups, with a screen displaying the \VE view.
In fact, both hand feedback can provide sufficient sensory feedback for efficient direct hand manipulation of virtual objects in \AR, or conversely, they can be shown to be complementary. In fact, both hand feedback can provide sufficient sensory feedback for efficient direct hand manipulation of virtual objects in \AR, or conversely, they can be shown to be complementary.
In this chapter, we aim to investigate the role of \textbf{visuo-haptic feedback of the hand when manipulating virtual object} in immersive \OST-\AR using wearable vibrotactile haptics. In this chapter, we investigate the role of \textbf{visuo-haptic feedback of the hand when manipulating virtual object} using an \OST-\AR headset and wearable vibrotactile haptics.
We selected \textbf{four different delocalized positionings on the hand} that have been previously proposed in the literature for direct hand interaction in \AR using wearable haptic devices (\secref[related_work]{vhar_haptics}): on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand. We selected \textbf{four different delocalized positionings on the hand} that have been previously proposed in the literature for direct hand interaction in \AR using wearable haptic devices (\secref[related_work]{vhar_haptics}): on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand.
We focused on vibrotactile feedback, as it is used in most of the wearable haptic devices and has the lowest encumbrance. We focused on vibrotactile feedback, as it is used in most of the wearable haptic devices and has the lowest encumbrance.
In a \textbf{user study}, using the \OST-\AR headset Microsoft HoloLens~2 and two \ERM vibrotactile motors, we evaluated the effect of the four positionings with \textbf{two contact vibration techniques} on the user performance and experience with the same two manipulation tasks as in \chapref{visual_hand}. In a \textbf{user study}, using the \OST-\AR headset Microsoft HoloLens~2 and two \ERM vibrotactile motors, we evaluated the effect of the four positionings with \textbf{two contact vibration techniques} on the user performance and experience with the same two manipulation tasks as in \chapref{visual_hand}.

View File

@@ -88,14 +88,14 @@ Apparatus and experimental procedure were similar to the \chapref{visual_hand},
We report here only the differences. We report here only the differences.
We employed the same vibrotactile device used by \cite{devigne2020power}. We employed the same vibrotactile device used by \cite{devigne2020power}.
It is composed of two encapsulated \ERM (\secref[related_work]{vibrotactile_actuators}) vibration motors (Pico-Vibe 304-116, Precision Microdrive, UK). It is composed of two encapsulated \ERM (\secref[related_work]{vibrotactile_actuators}) vibration motors (Pico-Vibe 304-116, Precision Microdrive).
They are small and light (\qty{5}{\mm} \x \qty{20}{\mm}, \qty{1.2}{\g}) actuators capable of vibration frequencies from \qtyrange{120}{285}{\Hz} and They are small and light (\qty{5}{\mm} \x \qty{20}{\mm}, \qty{1.2}{\g}) actuators capable of vibration frequencies from \qtyrange{120}{285}{\Hz} and
amplitudes from \qtyrange{0.2}{1.15}{\g}. amplitudes from \qtyrange{0.2}{1.15}{\g}.
They have a latency of \qty{20}{\ms} that we partially compensated for at the software level with slightly larger colliders to trigger the vibrations close the moment the finger touched the cube. They have a latency of \qty{20}{\ms} that we partially compensated for at the software level with slightly larger colliders to trigger the vibrations close the moment the finger touched the cube.
These two outputs vary linearly together, based on the tension applied. These two outputs vary linearly together, based on the tension applied.
They were controlled by an Arduino Pro Mini (\qty{3.3}{\V}) and a custom board that delivered the tension independently to each motor. They were controlled by an Arduino Pro Mini (\qty{3.3}{\V}) and a custom board that delivered the tension independently to each motor.
A small \qty{400}{mAh} Li-ion battery allowed for 4 hours of constant vibration at maximum intensity. A small \qty{400}{mAh} Li-ion battery allowed for 4 hours of constant vibration at maximum intensity.
A Bluetooth module (RN42XV module, Microchip Technology Inc., USA) mounted on the Arduino ensured wireless communication with the HoloLens~2. A Bluetooth module (RN42XV module, Microchip Technology Inc.) mounted on the Arduino ensured wireless communication with the HoloLens~2.
To ensure minimal encumbrance, we used the same two motors throughout the experiment, moving them to the considered positioning before each new block. To ensure minimal encumbrance, we used the same two motors throughout the experiment, moving them to the considered positioning before each new block.
Thin self-gripping straps were placed on the five positionings, with an elastic strap stitched on top to place the motor, as shown in \figref{method/locations}. Thin self-gripping straps were placed on the five positionings, with an elastic strap stitched on top to place the motor, as shown in \figref{method/locations}.
@@ -140,3 +140,5 @@ They then rated the ten combinations of \factor{Positioning} \x \factor{Vibratio
Finally, they rated the ten combinations of \factor{Positioning} \x \factor{Hand} on a 7-item Likert scale (1=Not at all, 7=Extremely): Finally, they rated the ten combinations of \factor{Positioning} \x \factor{Hand} on a 7-item Likert scale (1=Not at all, 7=Extremely):
\response{Positioning \x Hand Rating}: How much do you like each combination of vibrotactile location for each visual hand rendering? \response{Positioning \x Hand Rating}: How much do you like each combination of vibrotactile location for each visual hand rendering?
The results were analyzed using R (v4.4) and the packages \textit{afex} (v1.4), \textit{ARTool} (v0.11), and \textit{performance} (v0.13).

View File

@@ -1,7 +1,7 @@
\section{Conclusion} \section{Conclusion}
\label{conclusion} \label{conclusion}
In this chapter, we investigated the visuo-haptic feedback of the hand when manipulating virtual objects in immersive \OST-\AR using wearable vibrotactile haptic. In this chapter, we investigated the visuo-haptic feedback of the hand when manipulating virtual objects using an \OST-\AR headset and wearable vibrotactile haptic.
To do so, we provided vibrotactile feedback of the fingertip contacts with virtual objects by moving away the haptic actuator that do not cover the inside of the hand: on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand. To do so, we provided vibrotactile feedback of the fingertip contacts with virtual objects by moving away the haptic actuator that do not cover the inside of the hand: on the nails, the proximal phalanges, the wrist, and the nails of the opposite hand.
We selected these four different delocalized positions on the hand from the literature for direct hand interaction in \AR using wearable haptic devices. We selected these four different delocalized positions on the hand from the literature for direct hand interaction in \AR using wearable haptic devices.
In a user study, we compared twenty visuo-haptic feedback of the hand as the combination of two vibrotactile contact techniques, provided at five different delocalized positions on the user's hand, and with the two most representative visual hand augmentations established in the \chapref{visual_hand}, \ie the skeleton hand rendering and no hand rendering. In a user study, we compared twenty visuo-haptic feedback of the hand as the combination of two vibrotactile contact techniques, provided at five different delocalized positions on the user's hand, and with the two most representative visual hand augmentations established in the \chapref{visual_hand}, \ie the skeleton hand rendering and no hand rendering.

View File

@@ -7,17 +7,17 @@ We conclude this thesis manuscript by summarizing our contributions and the main
\section{Summary} \section{Summary}
In this manuscript, we showed how wearable haptics can improve direct hand interaction in immersive \AR. % by augmenting the perception of the real and manipulation of the virtual. In this manuscript, we showed how \OST-\AR headsets and wearable haptics can improve direct hand interaction with virtual and augmented objects. % by augmenting the perception of the real and manipulation of the virtual.
Wearable haptics can provide rich tactile feedback on virtual objects and augment the perception of real objects, both directly touched by the hand, while preserving freedom of movement and interaction with the \RE. Wearable haptics can provide rich tactile feedback on virtual objects and augment the perception of real objects, both directly touched by the hand, while preserving freedom of movement and interaction with the \RE.
However, their integration with \AR is still in its infancy and presents many design, technical and human challenges. However, their integration with \AR is still in its infancy and presents many design, technical and human challenges.
We have structured this thesis around two research axes: \textbf{(I) modifying the visuo-haptic texture perception of real surfaces} and \textbf{(II) improving the manipulation of virtual objects}. We have structured this thesis around two research axes: \textbf{(I) modifying the visuo-haptic texture perception of real surfaces} and \textbf{(II) improving the manipulation of virtual objects}.
\noindentskip In \partref{perception}, we focused on the perception of wearable and immersive virtual textures that augment real surfaces. \noindentskip In \partref{perception}, we focused on the perception of wearable virtual textures that augment real surfaces.
Texture is a fundamental property of an object, perceived equally by sight and touch. Texture is a fundamental property of an object, perceived equally by sight and touch.
It is also one of the most studied haptic augmentations, but has not yet been integrated into \AR or \VR. It is also one of the most studied haptic augmentations, but has not yet been integrated into \AR or \VR.
We \textbf{(1) proposed a wearable visuo-haptic texture augmentation system}, \textbf{(2)} evaluated how the perception of haptic texture augmentations is \textbf{affected by the visual feedback of the virtual hand} and the environment (real, augmented, or virtual), and \textbf{(3)} investigated the \textbf{perception of co-localized visuo-haptic texture augmentations}. We \textbf{(1) proposed a wearable visuo-haptic texture augmentation system}, \textbf{(2)} evaluated how the perception of haptic texture augmentations is \textbf{affected by the visual feedback of the virtual hand} and the environment (real, augmented, or virtual), and \textbf{(3)} investigated the \textbf{perception of co-localized visuo-haptic texture augmentations}.
In \chapref{vhar_system}, we presented a system for \textbf{augmenting any real surface} with virtual \textbf{roughness textures with visuo-haptic feedback} using an immersive \AR headset and a wearable vibrotactile device worn on the middle phalanx of the finger. In \chapref{vhar_system}, we presented a system for \textbf{augmenting any real surface} with virtual \textbf{roughness textures with visuo-haptic feedback} using an \OST-\AR headset and a wearable vibrotactile device worn on the middle phalanx of the finger.
It allows \textbf{free visual and touch exploration} of the textures as if they were real, allowing the user to view them from different angles and touch them with the bare finger without constraints on hand movement. It allows \textbf{free visual and touch exploration} of the textures as if they were real, allowing the user to view them from different angles and touch them with the bare finger without constraints on hand movement.
The user studies in the next two chapters were based on this system. The user studies in the next two chapters were based on this system.
@@ -30,7 +30,7 @@ In \chapref{vhar_textures}, we investigated the perception of co-localized visua
We transposed the \textbf{data-driven visuo-haptic textures} from the \HaTT database to the system presented in \chapref{vhar_system} and conducted a user study with 20 participants to rate the coherence, realism, and perceived roughness of the combination of nine visuo-haptic texture pairs. We transposed the \textbf{data-driven visuo-haptic textures} from the \HaTT database to the system presented in \chapref{vhar_system} and conducted a user study with 20 participants to rate the coherence, realism, and perceived roughness of the combination of nine visuo-haptic texture pairs.
Participants integrated roughness sensations from both visual and haptic modalities well, with \textbf{haptics dominating perception}, and consistently identified and matched \textbf{clusters of visual and haptic textures with similar perceived roughness}. Participants integrated roughness sensations from both visual and haptic modalities well, with \textbf{haptics dominating perception}, and consistently identified and matched \textbf{clusters of visual and haptic textures with similar perceived roughness}.
\noindentskip In \partref{manipulation}, we focused on improving the manipulation of virtual objects directly with the hand in immersive \OST-\AR. \noindentskip In \partref{manipulation}, we focused on improving the manipulation of virtual objects directly with the hand using an \OST-\AR headset.
Our approach was to design visual augmentations of the hand and delocalized haptic feedback, based on the literature, and evaluate them in user studies. Our approach was to design visual augmentations of the hand and delocalized haptic feedback, based on the literature, and evaluate them in user studies.
We first considered \textbf{(1) the visual augmentation of the hand} and then the \textbf{(2)} combination of different \textbf{visuo-haptic feedback of the hand when manipulating virtual objects}. We first considered \textbf{(1) the visual augmentation of the hand} and then the \textbf{(2)} combination of different \textbf{visuo-haptic feedback of the hand when manipulating virtual objects}.
@@ -60,12 +60,17 @@ In addition, combination with pseudo-haptic rendering techniques \cite{ujitoko20
\paragraph{Fully Integrated Tracking.} \paragraph{Fully Integrated Tracking.}
In our system, we registered the real and virtual environments (\secref[related_work]{ar_definition}) using fiducial markers and a webcam external to the \AR headset. In our system, we registered the \RE and the \VE using fiducial markers and a webcam external to the \AR headset.
This only allowed us to estimate poses of the index finger and the surface to be augmented with the haptic texture, but it was reliable and accurate enough for our needs. This only allowed us to estimate poses of the index finger and the surface to be augmented with the haptic texture, but it was reliable and accurate enough for our needs.
In fact, preliminary tests we conducted showed that the built-in tracking capabilities of the Microsoft HoloLens~2 were not able to track hands wearing a vibrotactile voice-coil device. In fact, preliminary tests we conducted showed that the built-in tracking capabilities of the HoloLens~2 were not able to track hands wearing a vibrotactile voice-coil device.
A more robust hand pose estimation system would support wearing haptic devices on the hand as well as holding real objects. A more robust hand pose estimation system would support wearing haptic devices on the hand as well as holding real objects.
\comans{JG}{I [...] also want to highlight the opportunity to study the effect of visual registration error as noted already in chapter 4.}{Sentences along these lines has been added.}
The spatial registration error \cite{grubert2018survey} and the temporal latency \cite{diluca2019perceptual} between the real and the virtual content should also be reduced to be imperceptible.
The effect of these spatial and temporal errors on the perception and manipulation of the virtual content should be systematically investigated.
Prediction of hand movements should also be considered to overcome such issues \cite{klein2020predicting,gamage2021predictable}.
A complementary solution would be to embed tracking sensors in the wearable haptic devices, such as an inertial measurement unit (IMU) or cameras \cite{preechayasomboon2021haplets}. A complementary solution would be to embed tracking sensors in the wearable haptic devices, such as an inertial measurement unit (IMU) or cameras \cite{preechayasomboon2021haplets}.
Prediction of hand movements should also be considered \cite{klein2020predicting,gamage2021predictable}.
This would allow a complete portable and wearable visuo-haptic system to be used in practical applications. This would allow a complete portable and wearable visuo-haptic system to be used in practical applications.
\subsection*{Perception of Haptic Texture Augmentation in Augmented and Virtual Reality} \subsection*{Perception of Haptic Texture Augmentation in Augmented and Virtual Reality}
@@ -97,8 +102,8 @@ As in the previous chapter, our aim was not to accurately reproduce real texture
However, the results also have some limitations, as they addressed a small set of visuo-haptic textures that augmented the perception of smooth and white real surfaces. However, the results also have some limitations, as they addressed a small set of visuo-haptic textures that augmented the perception of smooth and white real surfaces.
Visuo-haptic texture augmentation might be difficult on surfaces that already have strong visual or haptic patterns \cite{asano2012vibrotactile}, or on objects with complex shapes. Visuo-haptic texture augmentation might be difficult on surfaces that already have strong visual or haptic patterns \cite{asano2012vibrotactile}, or on objects with complex shapes.
The role of visuo-haptic texture augmentation should also be evaluated in more complex tasks, such as object recognition and assembly, or in more concrete use cases, such as displaying and touching a museum object or a 3D printed object before it is manufactured. The role of visuo-haptic texture augmentation should also be evaluated in more complex tasks, such as object recognition and assembly, or in more concrete use cases, such as displaying and touching a museum object or a 3D printed object before it is manufactured.
\comans{JG}{As future work, the effect of visual quality of the rendered textures on texture perception could also be of interest.}{A sentence along these lines has been added.}
%Finally, the visual textures used were also simple color captures not meant to be used in an immersive \VE. Finally, the visual textures used were simple color images not intended for use in an \ThreeD \VE, and enhancing their visual quality could improve the perception of visuo-haptic texture augmentation.
\paragraph{Specificities of Direct Touch.} \paragraph{Specificities of Direct Touch.}
@@ -114,7 +119,7 @@ Finally, the virtual texture models should also be adaptable to individual sensi
\paragraph{AR Displays.} \paragraph{AR Displays.}
The visual hand augmentations we evaluated were displayed on the Microsoft HoloLens~2, which is a common \OST-\AR headset \cite{hertel2021taxonomy}. The visual hand augmentations we evaluated were displayed on the Microsoft HoloLens~2, which is a common \OST-\AR headset \cite{hertel2021taxonomy}.
We purposely chose this type of display because in \OST-\AR the lack of mutual occlusion between the hand and the virtual object is the most challenging to solve \cite{macedo2023occlusion}. We purposely chose this type of display, because in \OST-\AR the lack of mutual occlusion between the hand and the virtual object is the most challenging to solve \cite{macedo2023occlusion}.
We therefore hypothesized that a visual hand augmentation would be more beneficial to users with this type of display. We therefore hypothesized that a visual hand augmentation would be more beneficial to users with this type of display.
However, the user's visual perception and experience are different with other types of displays, such as \VST-\AR, where the \RE view is seen through cameras and screens (\secref[related_work]{ar_displays}). However, the user's visual perception and experience are different with other types of displays, such as \VST-\AR, where the \RE view is seen through cameras and screens (\secref[related_work]{ar_displays}).
While the mutual occlusion problem and the hand pose estimation latency could be overcome with \VST-\AR, the visual hand augmentation could still be beneficial to users as it provides depth cues and feedback on the hand tracking, and should be evaluated as such. While the mutual occlusion problem and the hand pose estimation latency could be overcome with \VST-\AR, the visual hand augmentation could still be beneficial to users as it provides depth cues and feedback on the hand tracking, and should be evaluated as such.
@@ -145,7 +150,8 @@ It remains to be explored how to support rendering for different and larger area
\section{Perspectives} \section{Perspectives}
Our goal was to improve direct hand interaction with virtual objects using wearable haptic devices in immersive \AR by providing more plausible and coherent perception and more natural and effective manipulation of the visuo-haptic augmentations. Our goal was to improve direct hand interaction with virtual objects using wearable haptic devices and an \OST-\AR headset.
We aimed to provide more plausible and coherent perception and more natural and effective manipulation of the visuo-haptic augmentations.
Our contributions have enabled progress towards a seamless integration of the virtual into the real world. Our contributions have enabled progress towards a seamless integration of the virtual into the real world.
They also allow us to outline longer-term research perspectives. They also allow us to outline longer-term research perspectives.
@@ -154,7 +160,7 @@ They also allow us to outline longer-term research perspectives.
We saw how complex the sense of touch is (\secref[related_work]{haptic_hand}). We saw how complex the sense of touch is (\secref[related_work]{haptic_hand}).
Multiple sensory receptors all over the skin allow us to perceive different properties of objects, such as their texture, temperature, weight or shape. Multiple sensory receptors all over the skin allow us to perceive different properties of objects, such as their texture, temperature, weight or shape.
Particularly concentrated in the hands, cutaneous sensory feedback, together with the muscles, is crucial for grasping and manipulating objects. Particularly concentrated in the hands, cutaneous sensory feedback, together with the muscles, is crucial for grasping and manipulating objects.
In this manuscript, we showed how wearable haptic devices can provide virtual tactile sensations to support direct hand interaction in immersive \AR. In this manuscript, we showed how wearable haptic devices can provide virtual tactile sensations to support direct hand interaction with an \OST-\AR headset.
We investigated both the visuo-haptic perception of texture augmenting real surfaces (\partref{perception}) and the manipulation of virtual objects with visuo-haptic feedback of hand contact with virtual objects (\partref{manipulation}). We investigated both the visuo-haptic perception of texture augmenting real surfaces (\partref{perception}) and the manipulation of virtual objects with visuo-haptic feedback of hand contact with virtual objects (\partref{manipulation}).
However, unlike the visual sense, which can be fully immersed in the virtual using an \AR/\VR headset, there is no universal wearable haptic device that can reproduce all the haptic properties perceived by the hand (\secref[related_work]{wearable_haptics}). However, unlike the visual sense, which can be fully immersed in the virtual using an \AR/\VR headset, there is no universal wearable haptic device that can reproduce all the haptic properties perceived by the hand (\secref[related_work]{wearable_haptics}).
@@ -176,13 +182,13 @@ These results would enable the design of more universal wearable haptic devices
We reviewed the diversity of \AR and \VR reality displays and their respective characteristics in rendering (\secref[related_work]{ar_displays}) and the manipulation of virtual content with the hand (\chapref{visual_hand}). We reviewed the diversity of \AR and \VR reality displays and their respective characteristics in rendering (\secref[related_work]{ar_displays}) and the manipulation of virtual content with the hand (\chapref{visual_hand}).
The diversity of wearable haptic devices and the different sensations they can provide is even more important (\secref[related_work]{wearable_haptics}) and an active research topic \cite{pacchierotti2017wearable}. The diversity of wearable haptic devices and the different sensations they can provide is even more important (\secref[related_work]{wearable_haptics}) and an active research topic \cite{pacchierotti2017wearable}.
Coupling wearable haptics with immersive \AR also requires the haptic actuator to be placed on the body other than at the hand contact points (\secref[related_work]{vhar_haptics}). Coupling wearable haptics with \AR headsets also requires the haptic actuator to be placed on the body other than at the hand contact points (\secref[related_work]{vhar_haptics}).
In particular, in this thesis we investigated the perception of haptic texture augmentation using a vibrotactile device on the median phalanx (\chapref{vhar_system}) and also compared different positions of the haptics on the hand for manipulating virtual objects (\chapref{visuo_haptic_hand}). In particular, in this thesis we investigated the perception of haptic texture augmentation using a vibrotactile device on the median phalanx (\chapref{vhar_system}) and also compared different positions of the haptics on the hand for manipulating virtual objects (\chapref{visuo_haptic_hand}).
Haptic feedback should be provided close to the point of contact of the hand with the virtual, to enhance the realism of texture augmentation (\chapref{vhar_textures}) and to render contact with virtual objects (\chapref{visuo_haptic_hand}), \eg rendering fingertip contact with a haptic ring worn on the middle or proximal phalanx. Haptic feedback should be provided close to the point of contact of the hand with the virtual, to enhance the realism of texture augmentation (\chapref{vhar_textures}) and to render contact with virtual objects (\chapref{visuo_haptic_hand}), \eg rendering fingertip contact with a haptic ring worn on the middle or proximal phalanx.
However, the task at hand, the user's sensitivity and preferences, the limitations of the tracking system, or the ergonomics of the haptic device may require the use of other form factors and positions, such as the wrist or arm. However, the task at hand, the user's sensitivity and preferences, the limitations of the tracking system, or the ergonomics of the haptic device may require the use of other form factors and positions, such as the wrist or arm.
Similarly, collaborative and transitional experiences between \AR and \VR are becoming more common, and would involve different setups and modalities \cite{roo2017onea}. Similarly, collaborative and transitional experiences between \AR and \VR are becoming more common, and would involve different setups and modalities \cite{roo2017onea}.
Novel \AR/\VR displays are already capable of transitioning from augmented to virtual environments \cite{feld2024simple}, and haptic feedback should also adapt to these transitions (\chapref{xr_perception}). Novel \AR/\VR displays are already capable of transitioning from \AE to \VE \cite{feld2024simple}, and haptic feedback should also adapt to these transitions (\chapref{xr_perception}).
Therefore, a visuo-haptic augmented reality system should be able to adapt to any \AR/\VR display, any wearable haptic device worn anywhere on the body, and support personalization of haptic feedback. Therefore, a visuo-haptic augmented reality system should be able to adapt to any \AR/\VR display, any wearable haptic device worn anywhere on the body, and support personalization of haptic feedback.
In other words, the visuo-haptic rendering system should be designed to be responsive to the context of use. In other words, the visuo-haptic rendering system should be designed to be responsive to the context of use.

View File

@@ -36,13 +36,13 @@ Une \textbf{augmentation haptique est la modification de la perception par l'ajo
Un aspect important de l'illusion de la RA (et de la RV) est la \emph{plausibilité}, c'est-à-dire l'illusion pour un utilisateur que les événements virtuels se produisent vraiment \cite{slater2022separate}. %, même si l'utilisateur sait qu'ils ne sont pas réels. Un aspect important de l'illusion de la RA (et de la RV) est la \emph{plausibilité}, c'est-à-dire l'illusion pour un utilisateur que les événements virtuels se produisent vraiment \cite{slater2022separate}. %, même si l'utilisateur sait qu'ils ne sont pas réels.
Dans ce contexte, nous définissons un \emph{système de RA} comme l'ensemble des dispositifs matériels (dispositifs d'entrée, capteurs, affichages et dispositifs haptiques) et logiciels (suivi, simulation et rendu) qui permettent à l'utilisateur d'interagir avec l'environnement augmenté. Dans ce contexte, nous définissons un \emph{système de RA} comme l'ensemble des dispositifs matériels (dispositifs d'entrée, capteurs, affichages et dispositifs haptiques) et logiciels (suivi, simulation et rendu) qui permettent à l'utilisateur d'interagir avec l'environnement augmenté.
Les casques de RA sont la technologie d'affichage la plus prometteuse, car ils sont portables, fournissent à l'utilisateur un environnement augmenté \emph{immersif} et laissent les mains libres pour interagir \cite{hertel2021taxonomy}. Les visiocasques de RA sont la technologie d'affichage la plus prometteuse, car ils sont portables, fournissent à l'utilisateur un environnement augmenté \emph{immersif} et laissent les mains libres pour interagir \cite{hertel2021taxonomy}.
Un retour haptique est alors indispensable pour assurer une interaction plausible et cohérente avec le contenu visuel virtuel. Un retour haptique est alors indispensable pour assurer une interaction plausible et cohérente avec le contenu visuel virtuel.
C'est pourquoi l'haptique portable semble particulièrement adaptée à la RA immersive. C'est pourquoi l'haptique portable semble particulièrement adaptée aux visiocasques de RA.
\subsectionstarbookmark{Défis de la réalité augmentée visuo-haptique portable} \subsectionstarbookmark{Défis de la réalité augmentée visuo-haptique portable}
L'intégration de l'haptique portable avec la RA immersive pour créer un environnement augmenté visuo-haptique est cependant complexe et présente de nombreux défis. L'intégration de l'haptique portable avec un visiocasque de RA pour créer un environnement augmenté visuo-haptique est cependant complexe et présente de nombreux défis.
Nous proposons de représenter l'expérience de l'utilisateur dans un tel environnement comme une boucle d'interaction, illustrée sur la \figref{interaction-loop-fr} et basée sur les boucles d'interaction avec les systèmes 3D \cite[p.84]{laviolajr20173d}. Nous proposons de représenter l'expérience de l'utilisateur dans un tel environnement comme une boucle d'interaction, illustrée sur la \figref{interaction-loop-fr} et basée sur les boucles d'interaction avec les systèmes 3D \cite[p.84]{laviolajr20173d}.
Un utilisateur interagit avec les environnements virtuels visuels et haptiques via une main virtuelle qui suit ses mouvements et simule l'interaction avec des objets virtuels. Un utilisateur interagit avec les environnements virtuels visuels et haptiques via une main virtuelle qui suit ses mouvements et simule l'interaction avec des objets virtuels.
Les environnements virtuels sont rendus en retour à l'utilisateur avec un casque de RA immersif et de l'haptique portable, qui les perçoit comme co-localisés avec l'environnement réel. Les environnements virtuels sont rendus en retour à l'utilisateur avec un casque de RA immersif et de l'haptique portable, qui les perçoit comme co-localisés avec l'environnement réel.
@@ -80,8 +80,8 @@ Nous proposons donc pour ce premier axe de recherche de concevoir des augmentati
Pour cela, nous~: \textbf{(1) concevons un système d'augmentation visuo-haptique de textures} avec de l'haptique vibrotactile portable~; \textbf{(2) évaluons comment la perception des augmentations haptiques portables de textures est affectée par le retour visuel de la main virtuelle et de l'environnement}~; \textbf{(3) étudions la perception d'augmentations visuelles et haptiques portables de textures}. Pour cela, nous~: \textbf{(1) concevons un système d'augmentation visuo-haptique de textures} avec de l'haptique vibrotactile portable~; \textbf{(2) évaluons comment la perception des augmentations haptiques portables de textures est affectée par le retour visuel de la main virtuelle et de l'environnement}~; \textbf{(3) étudions la perception d'augmentations visuelles et haptiques portables de textures}.
Ces contributions sont détaillées dans la \secref{perception}. Ces contributions sont détaillées dans la \secref{perception}.
Les limitations de rendu de la RA immersive et de l'haptique portable rendent difficile la manipulation d'objets virtuels directement avec la main. Les limitations de rendu des visiocasques de RA et de l'haptique portable rendent difficile la manipulation d'objets virtuels directement avec la main.
Deux retours sensoriels peuvent améliorer cette manipulation, mais n'ont pas été étudiés en RA immersive: le retour visuel de la main virtuelle \cite{prachyabrued2014visual} et le retour haptique relocalisé sur la main \cite{teng2021touch}. Deux retours sensoriels peuvent améliorer cette manipulation, mais n'ont pas été étudiés en RA: le retour visuel de la main virtuelle \cite{prachyabrued2014visual} et le retour haptique relocalisé sur la main \cite{teng2021touch}.
Pour ce second axe de recherche, nous proposons de concevoir des augmentations visuo-haptiques de la main comme des retours sensoriels aux interactions avec les objets virtuels. Pour ce second axe de recherche, nous proposons de concevoir des augmentations visuo-haptiques de la main comme des retours sensoriels aux interactions avec les objets virtuels.
Pour cela, nous étudions l'effet sur la performance et l'expérience de l'utilisateur du \textbf{(1) retour visuel de la main virtuelle en tant qu'augmentation de la main réelle} et de \textbf{(2) différentes relocalisations du retour haptique} avec de l'haptique vibrotactile portable comme retour des contacts de la main avec les objets virtuels, et ce, \textbf{en combinaison avec des augmentations visuelles de la main}. Pour cela, nous étudions l'effet sur la performance et l'expérience de l'utilisateur du \textbf{(1) retour visuel de la main virtuelle en tant qu'augmentation de la main réelle} et de \textbf{(2) différentes relocalisations du retour haptique} avec de l'haptique vibrotactile portable comme retour des contacts de la main avec les objets virtuels, et ce, \textbf{en combinaison avec des augmentations visuelles de la main}.
Ces contributions sont détaillées dans la \secref{manipulation}. Ces contributions sont détaillées dans la \secref{manipulation}.
@@ -97,7 +97,7 @@ Une approche efficace pour créer une texture haptique consiste à générer un
Les vibrations sont générées par un dispositif vibrotactile de type \textit{voice-coil}, qui permet un contrôle indépendant de la fréquence et de l'amplitude du signal. Les vibrations sont générées par un dispositif vibrotactile de type \textit{voice-coil}, qui permet un contrôle indépendant de la fréquence et de l'amplitude du signal.
Ce dispositif est placé dans un outil tenu en main ou directement attaché sur le doigt. Ce dispositif est placé dans un outil tenu en main ou directement attaché sur le doigt.
Lorsqu'elles sont jouées en touchant une surface réelle, ces vibrations augmentent la rugosité perçue, c'est-à-dire les micro-aspérités de la surface \cite{culbertson2015should}. Lorsqu'elles sont jouées en touchant une surface réelle, ces vibrations augmentent la rugosité perçue, c'est-à-dire les micro-aspérités de la surface \cite{culbertson2015should}.
Cependant, cette méthode n'a pas encore été intégrée dans un contexte de RA immersive. Cependant, cette méthode n'a pas encore été intégrée avec un visiocasque de RA.
\begin{subfigs}{vhar-system}{Notre système d'augmentation visuo-haptique portable de textures. }[][ \begin{subfigs}{vhar-system}{Notre système d'augmentation visuo-haptique portable de textures. }[][
\item Le dispositif vibrotactile de type \textit{voice-coil} HapCoil-One, muni d'un marqueur de suivi, et attaché à la phalange moyenne de l'index de l'utilisateur. \item Le dispositif vibrotactile de type \textit{voice-coil} HapCoil-One, muni d'un marqueur de suivi, et attaché à la phalange moyenne de l'index de l'utilisateur.
@@ -192,7 +192,7 @@ Avec l'étude précédente, cela ouvre la voie à de nouvelles applications de R
\label{manipulation} \label{manipulation}
Pour ce second axe de recherche, nous proposons de concevoir et d'évaluer des retours sensoriels visuo-haptiques de la main et de ses interactions avec des objets virtuels. Pour ce second axe de recherche, nous proposons de concevoir et d'évaluer des retours sensoriels visuo-haptiques de la main et de ses interactions avec des objets virtuels.
L'objectif est de faciliter la manipulation d'objets virtuels en RA immersive. L'objectif est de faciliter la manipulation d'objets virtuels avec les visiocasques de RA.
\subsectionstarbookmark{Retour visuel de la main virtuelle en tant qu'augmentation de la main réelle} \subsectionstarbookmark{Retour visuel de la main virtuelle en tant qu'augmentation de la main réelle}
@@ -251,7 +251,7 @@ Cependant, il n'est pas clair quel placement de l'actionneur est le plus bénéf
\subfig[.45]{visuo-haptic-hand-task-grasp-fr} \subfig[.45]{visuo-haptic-hand-task-grasp-fr}
\end{subfigs} \end{subfigs}
C'est pourquoi nous étudions le rôle du \textbf{retour visuo-haptique de la main lors de la manipulation d'objets virtuels} en RA immersive en utilisant de l'haptique vibrotactile portable. C'est pourquoi nous étudions le rôle du \textbf{retour visuo-haptique de la main lors de la manipulation d'objets virtuels} avec un visiocasque de RA en utilisant de l'haptique vibrotactile portable.
Nous avons tout d'abord sélectionné \textbf{quatre placements du dispositif haptique sur la main} qui ont été proposées dans la littérature~: sur les ongles, les phalanges proximales, le poignet et les ongles de la main opposée (\figref{visuo-haptic-hand-locations-fr}). Nous avons tout d'abord sélectionné \textbf{quatre placements du dispositif haptique sur la main} qui ont été proposées dans la littérature~: sur les ongles, les phalanges proximales, le poignet et les ongles de la main opposée (\figref{visuo-haptic-hand-locations-fr}).
Nous nous sommes concentrés sur le retour vibrotactile, car il est présent dans la plupart des dispositifs haptiques portables et est le moins encombrant. Nous nous sommes concentrés sur le retour vibrotactile, car il est présent dans la plupart des dispositifs haptiques portables et est le moins encombrant.
Nous avons utilisé en pratique deux moteurs vibrotactiles de type ERM car ils sont les plus compacts et n'affectent pas le suivi de la main \cite{pacchierotti2016hring}, mais ils permettent seulement de contrôler l'amplitude du signal. Nous avons utilisé en pratique deux moteurs vibrotactiles de type ERM car ils sont les plus compacts et n'affectent pas le suivi de la main \cite{pacchierotti2016hring}, mais ils permettent seulement de contrôler l'amplitude du signal.
@@ -263,7 +263,7 @@ Les résultats ont montré que lorsqu'il était placé à proximité du point de
Cependant, le placement le plus éloigné, sur la main opposée, a donné les meilleures performances, même s'il a été peu apprécié : ce placement inhabituel a probablement incité les participants à prêter plus attention au retour haptique et à se concentrer davantage sur la tâche. Cependant, le placement le plus éloigné, sur la main opposée, a donné les meilleures performances, même s'il a été peu apprécié : ce placement inhabituel a probablement incité les participants à prêter plus attention au retour haptique et à se concentrer davantage sur la tâche.
La technique de vibration au contact a été suffisante comparée à une technique plus élaborée d'intensité de la vibration en fonction de la force de contact. La technique de vibration au contact a été suffisante comparée à une technique plus élaborée d'intensité de la vibration en fonction de la force de contact.
L'augmentation visuelle de la main a été perçue comme moins nécessaire que le retour haptique vibrotactile, mais a tout de même fourni un retour utile sur le suivi de la main. L'augmentation visuelle de la main a été perçue comme moins nécessaire que le retour haptique vibrotactile, mais a tout de même fourni un retour utile sur le suivi de la main.
Cette étude confirme que la relocalisation du retour haptique est une approche simple, mais prometteuse pour l'haptique portable en RA immersive. Cette étude confirme que la relocalisation du retour haptique est une approche simple, mais prometteuse pour l'haptique portable avec les visiocasques de RA.
Si l'intégration avec le système de suivi de la main le permet et si la tâche l'exige, un anneau haptique porté sur la phalange moyenne ou proximale semble préférable : c'est l'approche que nous avons par ailleurs utilisée dans notre axe de recherche sur les augmentations de textures (\secref{perception}). Si l'intégration avec le système de suivi de la main le permet et si la tâche l'exige, un anneau haptique porté sur la phalange moyenne ou proximale semble préférable : c'est l'approche que nous avons par ailleurs utilisée dans notre axe de recherche sur les augmentations de textures (\secref{perception}).
Cependant, un dispositif haptique monté sur le poignet pourra fournir un retour d'information plus riche en intégrant différents dispositifs haptiques, tout en étant potentiellement moins gênant qu'une bague. Cependant, un dispositif haptique monté sur le poignet pourra fournir un retour d'information plus riche en intégrant différents dispositifs haptiques, tout en étant potentiellement moins gênant qu'une bague.
@@ -273,7 +273,7 @@ Elle peut être alors désactivée pendant la phase de saisie pour éviter la re
\section{Conclusion} \section{Conclusion}
\label{conclusion} \label{conclusion}
Dans ce manuscrit de thèse, nous avons montré comment la RA immersive et l'haptique portable peuvent améliorer l'interaction de la main avec des objets virtuels. Dans ce manuscrit de thèse, nous avons montré comment un visiocasque de RA et l'haptique portable peuvent améliorer les interactions de la main avec des objets virtuels et augmentés.
Les dispositifs haptiques portables sont capables de fournir un retour tactile aux objets virtuels et d'augmenter la perception des objets réels touchés avec le doigt, tout en préservant la liberté de mouvement et d'interaction de la main avec l'environnement réel. Les dispositifs haptiques portables sont capables de fournir un retour tactile aux objets virtuels et d'augmenter la perception des objets réels touchés avec le doigt, tout en préservant la liberté de mouvement et d'interaction de la main avec l'environnement réel.
Cependant, leur intégration avec la RA reste encore récente et présente de nombreux défis conceptuels, techniques et d'expérience utilisateur. Cependant, leur intégration avec la RA reste encore récente et présente de nombreux défis conceptuels, techniques et d'expérience utilisateur.
Nous avons structuré cette thèse autour de deux axes de recherche~: \textbf{(I) modifier la perception visuo-haptique de la texture des surfaces réelles} et \textbf{(II) améliorer la manipulation des objets virtuels}. Nous avons structuré cette thèse autour de deux axes de recherche~: \textbf{(I) modifier la perception visuo-haptique de la texture des surfaces réelles} et \textbf{(II) améliorer la manipulation des objets virtuels}.
@@ -299,7 +299,7 @@ Les participants ont systématiquement identifié et fait correspondre \textbf{l
\noindentskip Nous nous sommes également cherché à amélioré la manipulation d'objets virtuels directement avec la main. \noindentskip Nous nous sommes également cherché à amélioré la manipulation d'objets virtuels directement avec la main.
La manipulation d'objets virtuels est une tâche fondamentale dans les systèmes 3D, mais elle reste difficile à effectuer avec la main. La manipulation d'objets virtuels est une tâche fondamentale dans les systèmes 3D, mais elle reste difficile à effectuer avec la main.
Nous avons alors exploré deux retours sensoriels connus pour améliorer ce type d'interaction, mais non étudiés en RA immersive~: le retour visuel de la main virtuelle et le retour haptique relocalisé sur la main. Nous avons alors exploré deux retours sensoriels connus pour améliorer ce type d'interaction, mais non étudiés avec les visiocasques de RA: le retour visuel de la main virtuelle et le retour haptique relocalisé sur la main.
%Notre approche a consisté à concevoir des augmentations visuelles de la main et un retour haptique portable relocalisé, sur la base de la littérature, et à les évaluer dans le cadre d'études sur les utilisateurs. %Notre approche a consisté à concevoir des augmentations visuelles de la main et un retour haptique portable relocalisé, sur la base de la littérature, et à les évaluer dans le cadre d'études sur les utilisateurs.
Nous donc avons d'abord examiné \textbf{(1) l'augmentation visuelle de la main}. Nous donc avons d'abord examiné \textbf{(1) l'augmentation visuelle de la main}.

View File

@@ -35,9 +35,11 @@
% Style the acronyms % Style the acronyms
\renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}}% Hyperlink in black \renewcommand*{\glstextformat}[1]{\textcolor{black}{#1}}% Hyperlink in black
\let\AE\undefined % Command \AE is already defined
\acronym[TIFC]{2IFC}{two-interval forced choice} \acronym[TIFC]{2IFC}{two-interval forced choice}
\acronym[TwoD]{2D}{two-dimensional}
\acronym[ThreeD]{3D}{three-dimensional} \acronym[ThreeD]{3D}{three-dimensional}
\acronym{AE}{augmented environment}
\acronym{ANOVA}{analysis of variance} \acronym{ANOVA}{analysis of variance}
\acronym{ART}{aligned rank transform} \acronym{ART}{aligned rank transform}
\acronym{AR}{augmented reality} \acronym{AR}{augmented reality}
@@ -45,13 +47,12 @@
\acronym{DoF}{degree of freedom} \acronym{DoF}{degree of freedom}
\acronym{ERM}{eccentric rotating mass} \acronym{ERM}{eccentric rotating mass}
\acronym{FoV}{field of view} \acronym{FoV}{field of view}
\acronym{GLMM}{generalized linear mixed models} \acronym{GLMM}{generalized linear mixed model}
\acronym{GM}{geometric mean}
\acronym{HaTT}{Penn Haptic Texture Toolkit} \acronym{HaTT}{Penn Haptic Texture Toolkit}
\acronym{HSD}{honest significant difference} \acronym{HSD}{honest significant difference}
\acronym{JND}{just noticeable difference} \acronym{JND}{just noticeable difference}
\acronym{LRA}{linear resonant actuator} \acronym{LRA}{linear resonant actuator}
\acronym{LMM}{linear mixed models} \acronym{LMM}{linear mixed model}
\acronym{MLE}{maximum-likelihood estimation} \acronym{MLE}{maximum-likelihood estimation}
\acronym{MR}{mixed reality} \acronym{MR}{mixed reality}
\acronym{OST}{optical see-through} \acronym{OST}{optical see-through}

View File

@@ -1,3 +1,14 @@
% Changes
\usepackage[commentmarkup=footnote]{changes}
\definechangesauthor[name=Jens Grubert, color=Dandelion]{JG}
\definechangesauthor[name=Seokhee Jeon, color=Red]{SG}
\newcommand{\comans}[3]{%
\comment[id=#1]{%
#2\\%
\textbf{Answer: }#3%
}%
}
% Images % Images
\usepackage{graphicx} \usepackage{graphicx}
\usepackage{caption}% Point references to the figure not the caption \usepackage{caption}% Point references to the figure not the caption

View File

@@ -23,16 +23,16 @@
} }
% Custom commands % Custom commands
\input{config/acronyms}
\input{utils/commands} \input{utils/commands}
\input{config/thesis_commands} \input{config/thesis_commands}
\input{config/acronyms}
% Document % Document
\begin{document} \begin{document}
\frontmatter \frontmatter
\import{0-front}{cover} \import{0-front}{cover}
%\listofchanges
%\importchapter{0-front}{acknowledgement} %\importchapter{0-front}{acknowledgement}
\importchapter{0-front}{contents} \importchapter{0-front}{contents}
@@ -54,7 +54,9 @@
\appendix \appendix
\importchapter{5-conclusion}{publications} \importchapter{5-conclusion}{publications}
\begin{otherlanguage}{french}
\importchapter{5-conclusion}{résumé} \importchapter{5-conclusion}{résumé}
\end{otherlanguage}
\backmatter \backmatter
\importchapter{6-back}{bibliography} \importchapter{6-back}{bibliography}

View File

@@ -1,11 +1,18 @@
@incollection{abdulali2016datadriven, @inproceedings{abdulali2016datadriven,
title = {Data-{{Driven Modeling}} of {{Anisotropic Haptic Textures}}: {{Data Segmentation}} and {{Interpolation}}}, title = {Data-{{Driven Modeling}} of {{Anisotropic Haptic Textures}}: {{Data Segmentation}} and {{Interpolation}}},
booktitle = {Haptics: {{Perception}}, {{Devices}}, {{Control}}, and {{Applications}}}, booktitle = {{{EuroHaptics}}},
author = {Abdulali, Arsen and Jeon, Seokhee}, author = {Abdulali, Arsen and Jeon, Seokhee},
date = {2016}, date = {2016},
volume = {9775}, pages = {228--239}
pages = {228--239}, }
isbn = {978-3-319-42323-4 978-3-319-42324-1}
@inproceedings{abdulali2016datadrivena,
title = {Data-{{Driven Rendering}} of {{Anisotropic Haptic Textures}}},
booktitle = {{{AsiaHaptics}}},
author = {Abdulali, Arsen and Jeon, Seokhee},
date = {2016},
pages = {401--407},
isbn = {978-981-10-4156-3 978-981-10-4157-0}
} }
@inproceedings{abdulali2017sample, @inproceedings{abdulali2017sample,
@@ -18,16 +25,6 @@
isbn = {978-1-5090-1425-5} isbn = {978-1-5090-1425-5}
} }
@incollection{abdulali2018datadriven,
title = {Data-{{Driven Rendering}} of {{Anisotropic Haptic Textures}}},
booktitle = {Haptic {{Interaction}}},
author = {Abdulali, Arsen and Jeon, Seokhee},
date = {2018},
volume = {432},
pages = {401--407},
isbn = {978-981-10-4156-3 978-981-10-4157-0}
}
@inproceedings{achibet2017flexifingers, @inproceedings{achibet2017flexifingers,
title = {{{FlexiFingers}}: {{Multi-finger}} Interaction in {{VR}} Combining Passive Haptics and Pseudo-Haptics}, title = {{{FlexiFingers}}: {{Multi-finger}} Interaction in {{VR}} Combining Passive Haptics and Pseudo-Haptics},
shorttitle = {{{FlexiFingers}}}, shorttitle = {{{FlexiFingers}}},
@@ -73,7 +70,7 @@
date = {2022}, date = {2022},
pages = {792--801}, pages = {792--801},
doi = {10/gtv3wn}, doi = {10/gtv3wn},
isbn = {978-1-66549-617-9} isbn = {978-1-6654-9617-9}
} }
@article{aggravi2022decentralized, @article{aggravi2022decentralized,
@@ -490,7 +487,7 @@
date = {2015}, date = {2015},
journaltitle = {Found. Trends HumanComputer Interact.}, journaltitle = {Found. Trends HumanComputer Interact.},
volume = {8}, volume = {8},
number = {2-3}, number = {2--3},
pages = {73--272}, pages = {73--272},
doi = {10/gc2hkv} doi = {10/gc2hkv}
} }
@@ -613,7 +610,7 @@
author = {Brooks, Jas and Amin, Noor and Lopes, Pedro}, author = {Brooks, Jas and Amin, Noor and Lopes, Pedro},
date = {2023}, date = {2023},
doi = {10/gt625w}, doi = {10/gt625w},
isbn = {9798400701320} isbn = {979-8-4007-0132-0}
} }
@inproceedings{buchmann2004fingartips, @inproceedings{buchmann2004fingartips,
@@ -713,7 +710,7 @@
date = {2019}, date = {2019},
pages = {1749--1754}, pages = {1749--1754},
doi = {10/gpm2sx}, doi = {10/gpm2sx},
isbn = {978-1-72811-377-7} isbn = {978-1-7281-1377-7}
} }
@article{chinello2017three, @article{chinello2017three,
@@ -812,7 +809,7 @@
date = {2021}, date = {2021},
pages = {613--618}, pages = {613--618},
doi = {10/gsvh3z}, doi = {10/gsvh3z},
isbn = {978-1-66541-871-3} isbn = {978-1-6654-1871-3}
} }
@article{costes2019touchy, @article{costes2019touchy,
@@ -978,7 +975,7 @@
date = {2013}, date = {2013},
journaltitle = {Wear}, journaltitle = {Wear},
volume = {301}, volume = {301},
number = {1-2}, number = {1--2},
pages = {324--329}, pages = {324--329},
doi = {10/f45z4n} doi = {10/f45z4n}
} }
@@ -1011,7 +1008,7 @@
date = {2019}, date = {2019},
pages = {321--330}, pages = {321--330},
doi = {10/gnc6m8}, doi = {10/gnc6m8},
isbn = {978-1-72811-377-7} isbn = {978-1-7281-1377-7}
} }
@article{detinguy2021capacitive, @article{detinguy2021capacitive,
@@ -1126,7 +1123,7 @@
date = {2007}, date = {2007},
journaltitle = {Comput. Vis. Image Underst.}, journaltitle = {Comput. Vis. Image Underst.},
volume = {108}, volume = {108},
number = {1-2}, number = {1--2},
pages = {52--73}, pages = {52--73},
doi = {10/bzm33v} doi = {10/bzm33v}
} }
@@ -1138,7 +1135,7 @@
author = {Feick, Martin and Zenner, André and Ariza, Oscar and Tang, Anthony and Biyikli, Cihan and Krüger, Antonio}, author = {Feick, Martin and Zenner, André and Ariza, Oscar and Tang, Anthony and Biyikli, Cihan and Krüger, Antonio},
date = {2023}, date = {2023},
doi = {10/gtfhf6}, doi = {10/gtfhf6},
isbn = {9798400701320} isbn = {979-8-4007-0132-0}
} }
@article{feix2016grasp, @article{feix2016grasp,
@@ -1888,7 +1885,7 @@
date = {2022-03}, date = {2022-03},
pages = {121--129}, pages = {121--129},
doi = {10/gt7pqr}, doi = {10/gt7pqr},
isbn = {978-1-66549-617-9} isbn = {978-1-6654-9617-9}
} }
@online{kahl2023using, @online{kahl2023using,
@@ -1936,7 +1933,7 @@
date = {2020}, date = {2020},
pages = {275--284}, pages = {275--284},
doi = {10/gjb2c6}, doi = {10/gjb2c6},
isbn = {978-1-72815-608-8} isbn = {978-1-7281-5608-8}
} }
@article{kappers2013haptic, @article{kappers2013haptic,
@@ -2656,7 +2653,7 @@
date = {2019}, date = {2019},
pages = {519--528}, pages = {519--528},
doi = {10/gjpxzw}, doi = {10/gjpxzw},
isbn = {978-1-72811-377-7} isbn = {978-1-7281-1377-7}
} }
@article{ogawa2021effect, @article{ogawa2021effect,
@@ -2792,7 +2789,7 @@
date = {2022}, date = {2022},
pages = {628--633}, pages = {628--633},
doi = {10/grnbtc}, doi = {10/grnbtc},
isbn = {978-1-66547-927-1} isbn = {978-1-6654-7927-1}
} }
@inproceedings{park2011perceptual, @inproceedings{park2011perceptual,
@@ -2830,7 +2827,7 @@
date = {2023}, date = {2023},
pages = {148--155}, pages = {148--155},
doi = {10/gsvpqg}, doi = {10/gsvpqg},
isbn = {9798350399936} isbn = {979-8-3503-9993-6}
} }
@inproceedings{peillard2019studying, @inproceedings{peillard2019studying,
@@ -3000,7 +2997,7 @@
date = {2007}, date = {2007},
journaltitle = {Int. J. Robot. Res.}, journaltitle = {Int. J. Robot. Res.},
volume = {26}, volume = {26},
number = {11-12}, number = {11--12},
pages = {1191--1203}, pages = {1191--1203},
doi = {10/b33wgg} doi = {10/b33wgg}
} }
@@ -3306,6 +3303,15 @@
doi = {10/fj8xvn} doi = {10/fj8xvn}
} }
@inproceedings{son2022effect,
title = {Effect of {{Contact Points Feedback}} on {{Two-Thumb Touch Typing}} in {{Virtual Reality}}},
booktitle = {{{CHI Conf}}. {{Hum}}. {{Factors Comput}}. {{Syst}}.},
author = {Son, Jeongmin and Ahn, Sunggeun and Kim, Sunbum and Lee, Geehyuk},
date = {2022},
doi = {10/g8234n},
isbn = {978-1-4503-9156-6}
}
@inproceedings{speicher2019what, @inproceedings{speicher2019what,
title = {What Is {{Mixed Reality}}?}, title = {What Is {{Mixed Reality}}?},
booktitle = {{{CHI Conf}}. {{Hum}}. {{Factors Comput}}. {{Syst}}.}, booktitle = {{{CHI Conf}}. {{Hum}}. {{Factors Comput}}. {{Syst}}.},
@@ -3458,7 +3464,7 @@
author = {Teng, Shan-Yuan and Gupta, Aryan and Lopes, Pedro}, author = {Teng, Shan-Yuan and Gupta, Aryan and Lopes, Pedro},
date = {2024}, date = {2024},
doi = {10/gwhbpm}, doi = {10/gwhbpm},
isbn = {9798400703300} isbn = {979-8-4007-0330-0}
} }
@article{tong2023survey, @article{tong2023survey,
@@ -3478,7 +3484,7 @@
author = {Tran, Tanh Quang and Langlotz, Tobias and Regenbrecht, Holger}, author = {Tran, Tanh Quang and Langlotz, Tobias and Regenbrecht, Holger},
date = {2024}, date = {2024},
doi = {10/gt56cs}, doi = {10/gt56cs},
isbn = {9798400703300} isbn = {979-8-4007-0330-0}
} }
@article{turk2014multimodal, @article{turk2014multimodal,
@@ -3704,7 +3710,7 @@
date = {2021}, date = {2021},
pages = {385--390}, pages = {385--390},
doi = {10/gvm7sf}, doi = {10/gvm7sf},
isbn = {978-1-66541-871-3} isbn = {978-1-6654-1871-3}
} }
@inproceedings{yoon2020evaluating, @inproceedings{yoon2020evaluating,
@@ -3723,7 +3729,7 @@
date = {2007}, date = {2007},
journaltitle = {Somatosens. Mot. Res.}, journaltitle = {Somatosens. Mot. Res.},
volume = {24}, volume = {24},
number = {1-2}, number = {1--2},
pages = {53--70}, pages = {53--70},
doi = {10/cf9xmk} doi = {10/cf9xmk}
} }

View File

@@ -138,6 +138,8 @@
\newcommand{\wilcoxon}[1]{\measure{W}{#1}{}} \newcommand{\wilcoxon}[1]{\measure{W}{#1}{}}
\newcommand{\ztest}[1]{$\mathrm{Z} = #1$}
% From https://tex.stackexchange.com/a/95842 % From https://tex.stackexchange.com/a/95842
\newenvironment{conditions}{% \newenvironment{conditions}{%
\par% maybe you want the list to start with a new line \par% maybe you want the list to start with a new line
@@ -158,14 +160,20 @@
\newcommand{\footnotemarkrepeat}{\footnotemark[\value{footnote}]} % Repeat the last footnote mark \newcommand{\footnotemarkrepeat}{\footnotemark[\value{footnote}]} % Repeat the last footnote mark
%% Structure %% Structure
\newcommand{\chapterstartoc}[1]{\chapter*{#1}\addcontentsline{toc}{chapter}{#1}} \newcommand{\chapterstartoc}[1]{
\newcommand{\chapterstarbookmark}[1]{\pdfbookmark[chapter]{#1}{#1}\chapter*{#1}} \chapter*{#1}\addcontentsline{toc}{chapter}{#1}}
\newcommand{\chapterstarbookmark}[1]{\pdfbookmark[chapter]{#1}{#1}
\chapter*{#1}}
\newcommand{\sectionstartoc}[1]{\section*{#1}\addcontentsline{toc}{section}{#1}} \newcommand{\sectionstartoc}[1]{
\newcommand{\sectionstarbookmark}[1]{\pdfbookmark[section]{#1}{#1}\section*{#1}} \section*{#1}\addcontentsline{toc}{section}{#1}}
\newcommand{\sectionstarbookmark}[1]{\pdfbookmark[section]{#1}{#1}
\section*{#1}}
\newcommand{\subsectionstartoc}[1]{\subsection*{#1}\addcontentsline{toc}{subsection}{#1}} \newcommand{\subsectionstartoc}[1]{
\newcommand{\subsectionstarbookmark}[1]{\pdfbookmark[subsection]{#1}{#1}\subsection*{#1}} \subsection*{#1}\addcontentsline{toc}{subsection}{#1}}
\newcommand{\subsectionstarbookmark}[1]{\pdfbookmark[subsection]{#1}{#1}
\subsection*{#1}}
\newcommand{\subsubsubsection}[1]{\paragraph*{\textit{#1}}} \newcommand{\subsubsubsection}[1]{\paragraph*{\textit{#1}}}