Compare commits
10 Commits
f75de4439e
...
dee8c241b9
| Author | SHA1 | Date | |
|---|---|---|---|
| dee8c241b9 | |||
| bd9195b084 | |||
| f2ca2f13a8 | |||
| 8eda0465cf | |||
| 897d86c141 | |||
| 7b19effcce | |||
| db42d10747 | |||
| fe0da6a83b | |||
| 0cde049bfc | |||
| 202455e9e8 |
69
1-introduction/figures/icons/hand.svg
Normal file
69
1-introduction/figures/icons/hand.svg
Normal file
@@ -0,0 +1,69 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg version="1.2" width="50mm" height="50mm" viewBox="0 0 5000 5000" preserveAspectRatio="xMidYMid" fill-rule="evenodd" stroke-width="28.222" stroke-linejoin="round" xmlns="http://www.w3.org/2000/svg" xmlns:ooo="http://xml.openoffice.org/svg/export" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:presentation="http://sun.com/xmlns/staroffice/presentation" xmlns:smil="http://www.w3.org/2001/SMIL20/" xmlns:anim="urn:oasis:names:tc:opendocument:xmlns:animation:1.0" xmlns:svg="urn:oasis:names:tc:opendocument:xmlns:svg-compatible:1.0" xml:space="preserve">
|
||||
<defs class="ClipPathGroup">
|
||||
<clipPath id="presentation_clip_path" clipPathUnits="userSpaceOnUse">
|
||||
<rect x="0" y="0" width="5000" height="5000"/>
|
||||
</clipPath>
|
||||
<clipPath id="presentation_clip_path_shrink" clipPathUnits="userSpaceOnUse">
|
||||
<rect x="5" y="5" width="4990" height="4990"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<defs class="TextShapeIndex">
|
||||
<g ooo:slide="id1" ooo:id-list="id3"/>
|
||||
</defs>
|
||||
<defs class="EmbeddedBulletChars">
|
||||
<g id="bullet-char-template-57356" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 580,1141 L 1163,571 580,0 -4,571 580,1141 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-57354" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 8,1128 L 1137,1128 1137,0 8,0 8,1128 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-10146" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 174,0 L 602,739 174,1481 1456,739 174,0 Z M 1358,739 L 309,1346 659,739 1358,739 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-10132" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 2015,739 L 1276,0 717,0 1260,543 174,543 174,936 1260,936 717,1481 1274,1481 2015,739 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-10007" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 0,-2 C -7,14 -16,27 -25,37 L 356,567 C 262,823 215,952 215,954 215,979 228,992 255,992 264,992 276,990 289,987 310,991 331,999 354,1012 L 381,999 492,748 772,1049 836,1024 860,1049 C 881,1039 901,1025 922,1006 886,937 835,863 770,784 769,783 710,716 594,584 L 774,223 C 774,196 753,168 711,139 L 727,119 C 717,90 699,76 672,76 641,76 570,178 457,381 L 164,-76 C 142,-110 111,-127 72,-127 30,-127 9,-110 8,-76 1,-67 -2,-52 -2,-32 -2,-23 -1,-13 0,-2 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-10004" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 285,-33 C 182,-33 111,30 74,156 52,228 41,333 41,471 41,549 55,616 82,672 116,743 169,778 240,778 293,778 328,747 346,684 L 369,508 C 377,444 397,411 428,410 L 1163,1116 C 1174,1127 1196,1133 1229,1133 1271,1133 1292,1118 1292,1087 L 1292,965 C 1292,929 1282,901 1262,881 L 442,47 C 390,-6 338,-33 285,-33 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-9679" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 813,0 C 632,0 489,54 383,161 276,268 223,411 223,592 223,773 276,916 383,1023 489,1130 632,1184 813,1184 992,1184 1136,1130 1245,1023 1353,916 1407,772 1407,592 1407,412 1353,268 1245,161 1136,54 992,0 813,0 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-8226" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 346,457 C 273,457 209,483 155,535 101,586 74,649 74,723 74,796 101,859 155,911 209,963 273,989 346,989 419,989 480,963 531,910 582,859 608,796 608,723 608,648 583,586 532,535 482,483 420,457 346,457 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-8211" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M -4,459 L 1135,459 1135,606 -4,606 -4,459 Z"/>
|
||||
</g>
|
||||
<g id="bullet-char-template-61548" transform="scale(0.00048828125,-0.00048828125)">
|
||||
<path d="M 173,740 C 173,903 231,1043 346,1159 462,1274 601,1332 765,1332 928,1332 1067,1274 1183,1159 1299,1043 1357,903 1357,740 1357,577 1299,437 1183,322 1067,206 928,148 765,148 601,148 462,206 346,322 231,437 173,577 173,740 Z"/>
|
||||
</g>
|
||||
</defs>
|
||||
<g>
|
||||
<g id="id2" class="Master_Slide">
|
||||
<g id="bg-id2" class="Background"/>
|
||||
<g id="bo-id2" class="BackgroundObjects"/>
|
||||
</g>
|
||||
</g>
|
||||
<g class="SlideGroup">
|
||||
<g>
|
||||
<g id="container-id1">
|
||||
<g id="id1" class="Slide" clip-path="url(#presentation_clip_path)">
|
||||
<g class="Page">
|
||||
<g class="com.sun.star.drawing.ClosedBezierShape">
|
||||
<g id="id3">
|
||||
<rect class="BoundingBox" stroke="none" fill="none" x="43" y="990" width="4915" height="3020"/>
|
||||
<path fill="rgb(0,0,0)" stroke="none" d="M 4175,2921 C 4115,3016 3884,3092 3611,3175 3529,3200 3458,3221 3423,3237 3284,3299 2623,3208 2508,3176 2411,3148 2023,3132 1891,3178 1757,3225 1100,3506 867,3650 635,3792 324,4007 320,4009 L 288,3961 C 288,3961 293,3957 301,3952 362,3910 630,3728 837,3601 1077,3452 1733,3172 1873,3123 2019,3073 2416,3090 2523,3120 2646,3155 3286,3235 3399,3185 3437,3167 3507,3146 3595,3120 3715,3083 3874,3034 3990,2983 3963,2948 3923,2886 3915,2812 L 3949,2808 C 3957,2877 3997,2937 4021,2967 4055,2949 4084,2932 4103,2914 4099,2899 4088,2873 4057,2845 L 4081,2819 C 4107,2843 4122,2867 4129,2887 4136,2877 4144,2867 4152,2857 4146,2837 4127,2779 4042,2718 L 4063,2690 C 4133,2741 4163,2791 4177,2824 4207,2787 4243,2748 4279,2709 L 4211,2605 C 4200,2587 4200,2564 4212,2548 4243,2505 4305,2429 4371,2410 4383,2406 4396,2405 4407,2407 4379,2396 4337,2387 4272,2387 4118,2388 3878,2609 3876,2610 3867,2618 3857,2621 3847,2617 3809,2603 3754,2610 3701,2615 3615,2625 3517,2636 3483,2554 3423,2410 3750,2088 4079,1792 4104,1769 4123,1752 4132,1742 4168,1709 4282,1630 4414,1538 4597,1410 4849,1234 4879,1185 4895,1159 4909,1122 4888,1090 4885,1085 4880,1081 4876,1077 4879,1094 4877,1111 4868,1127 4838,1181 4724,1229 4684,1215 4683,1214 4682,1214 4682,1213 4676,1211 4658,1202 4578,1126 4520,1151 4458,1179 4399,1205 4207,1290 4087,1341 4053,1348 4008,1358 3675,1599 3452,1766 L 3418,1719 C 3469,1681 3633,1559 3780,1455 3528,1462 3066,1545 3062,1546 L 3052,1489 C 3073,1485 3580,1395 3825,1397 3824,1393 3824,1389 3824,1386 3813,1359 3787,1341 3757,1339 3572,1323 2986,1434 2981,1435 L 2970,1378 C 2986,1376 3232,1329 3456,1301 3437,1292 3421,1287 3408,1287 3224,1276 2871,1346 2794,1391 2754,1412 2632,1502 2489,1604 2260,1771 1973,1980 1837,2054 1730,2113 1653,2116 1586,2120 1510,2123 1445,2126 1340,2213 1134,2384 119,2717 63,2735 63,2735 62,2736 61,2735 L 44,2681 C 55,2678 1105,2334 1304,2168 1422,2070 1505,2066 1584,2061 1648,2059 1714,2056 1809,2004 1944,1930 2227,1724 2456,1558 2605,1451 2724,1364 2765,1341 2857,1288 3226,1218 3412,1230 3459,1232 3512,1264 3552,1290 3637,1282 3711,1277 3762,1281 3813,1285 3859,1319 3877,1365 3879,1372 3878,1379 3880,1386 3957,1334 4019,1297 4042,1292 4072,1286 4234,1215 4377,1152 4463,1114 4533,1083 4592,1058 4621,1041 4667,1019 4717,1007 4738,999 4754,994 4764,993 4795,988 4828,992 4857,1001 L 4866,1004 C 4896,1016 4920,1035 4936,1058 4953,1084 4975,1138 4928,1215 4895,1271 4707,1403 4446,1585 4322,1671 4204,1753 4173,1783 4162,1794 4143,1811 4117,1834 3594,2306 3512,2474 3536,2531 3552,2571 3607,2568 3694,2558 3748,2552 3803,2546 3849,2558 3902,2510 4116,2330 4271,2330 4379,2330 4453,2350 4494,2394 4516,2418 4528,2450 4526,2485 4524,2498 4518,2514 4508,2532 4507,2534 4508,2536 4507,2538 4486,2602 4402,2683 4347,2720 4280,2792 4212,2865 4175,2921 Z M 4240,2585 L 4315,2698 C 4317,2699 4319,2698 4322,2696 4374,2663 4456,2583 4475,2527 4477,2523 4478,2516 4475,2510 4461,2479 4430,2428 4380,2443 4322,2460 4263,2536 4240,2567 4236,2573 4237,2580 4240,2585 Z M 4697,1182 C 4715,1189 4813,1156 4838,1111 4852,1086 4834,1061 4816,1043 4813,1040 4809,1039 4804,1037 4800,1035 4796,1034 4792,1034 4728,1026 4632,1070 4596,1095 4671,1167 4692,1179 4696,1182 L 4697,1182 Z"/>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 7.5 KiB |
Binary file not shown.
@@ -3,6 +3,8 @@
|
||||
|
||||
\chaptertoc
|
||||
|
||||
\bigskip
|
||||
|
||||
%This PhD manuscript shows how wearable haptics, worn on the outside of the hand, can improve direct hand interaction in immersive \AR by augmenting the perception of the virtual content and its manipulation.
|
||||
In this manuscript thesis, we show how immersive \AR, which integrates visual virtual content into the real world perception, and wearable haptics, which provide tactile sensations on the skin, can improve the free and direct interaction of virtual objects with the hand.
|
||||
Our goal is to enable users to perceive and interact with wearable visuo-haptic augmentations in a more realistic and effective way, as if they were real.
|
||||
@@ -183,7 +185,7 @@ When touching a visually augmenting a real object, the user's hand is physically
|
||||
However, \textbf{manipulating a purely virtual object with the bare hand can be challenging}, especially without good haptic feedback \cite{maisto2017evaluation,meli2018combining}. %, and one will rely on visual and haptic feedback to guide the interaction.
|
||||
In addition, wearable haptic devices are limited to cutaneous feedback, and cannot provide forces to constrain the hand contact with the virtual object \cite{pacchierotti2017wearable}.
|
||||
|
||||
Current \AR systems have visual rendering limitations that also affect interaction with virtual objects. %, due to depth underestimation, a lack of mutual occlusions, and hand tracking latency.
|
||||
Current \AR systems have visual rendering limitations that also affect interaction with virtual objects. %, due to depth underestimation, a lack of mutual occlusions, and hand tracking latency.
|
||||
\AR is the display of superimposed images of the virtual world, synchronized with the user's current view of the real world.
|
||||
However, the depth perception of virtual objects is often underestimated \cite{peillard2019studying,adams2022depth}.
|
||||
There is also often \textbf{a lack of mutual occlusions between the hand and a virtual object}, that is the hand can hide the object or be hidden by the object \cite{macedo2023occlusion}.
|
||||
@@ -288,7 +290,7 @@ The pose estimation of the real hand and the environment is achieved using a vis
|
||||
The visual rendering is done using the immersive \OST-\AR headset Microsoft HoloLens~2.
|
||||
The system allows free visual and haptic exploration of the textures, as if they were real, and forms the basis of the next two chapters.
|
||||
|
||||
In \textbf{\chapref{xr_perception}}, we investigate in a user study how different the perception of haptic texture augmentations is in \AR \vs \VR and when touched by a virtual hand \vs one's own hand.
|
||||
In \textbf{\chapref{xr_perception}}, we investigate in a psychophysical user study how different the perception of haptic texture augmentations is in \AR \vs \VR and when touched by a virtual hand \vs one's own hand.
|
||||
We use psychophysical methods to measure user perception and extensive questionnaires to understand how this perception is affected by the visual feedback of the virtual hand and the environment (real, augmented or virtual).
|
||||
|
||||
In \textbf{\chapref{vhar_textures}}, we evaluate in a user study the perception of visuo-haptic texture augmentations directly touched with the real hand in \AR.
|
||||
|
||||
@@ -91,9 +91,7 @@ As illustrated in \figref{sensorimotor_continuum}, \textcite{jones2006human} del
|
||||
\item \emph{Gestures}, or non-prehensible skilled movements, are motor activities without constant contact with an object. Examples include pointing at a target, typing on a keyboard, accompanying speech with gestures, or signing in sign language \cite{yoon2020evaluating}.
|
||||
\end{itemize}
|
||||
|
||||
\fig[0.65]{sensorimotor_continuum}{
|
||||
The sensorimotor continuum of the hand function proposed by and adapted from \textcite{jones2006human}.
|
||||
}[
|
||||
\fig[0.65]{sensorimotor_continuum}{ The sensorimotor continuum of the hand function proposed by and adapted from \textcite{jones2006human}.}[%
|
||||
Functions of the hand are classified into four categories based on the relative importance of sensory and motor components.
|
||||
\protect\footnotemark
|
||||
]
|
||||
|
||||
@@ -170,7 +170,9 @@ However, they require high voltages to operate, limiting their use in wearable d
|
||||
\label{tactile_rendering}
|
||||
|
||||
Rendering a haptic property consists in modeling and reproducing virtual sensations comparable to those perceived when interacting with real objects \cite{klatzky2013haptic}.
|
||||
As we have just seen, the haptic sense being rich and complex (\secref{haptic_hand}), a wide variety of wearable haptic actuators have been developed (\secref{wearable_haptic_devices}) that each provide a subset of the haptic sensations felt by the hand.
|
||||
As we have just seen, the haptic sense is rich and complex (\secref{haptic_hand}).
|
||||
Thus, a wide variety of wearable haptic actuators have been developed (\secref{wearable_haptic_devices}).
|
||||
However, each actuator is only able to provide a subset of the haptic sensations felt by the hand.
|
||||
We review in this section the rendering methods with wearable haptics to modify perceived roughness and hardness of real objects.
|
||||
|
||||
\subsubsection{Haptic Augmentations}
|
||||
@@ -228,19 +230,23 @@ More complex models have also been developed to be physically accurate and repro
|
||||
Because simulations of realistic virtual textures can be complex to design and to render in real-time, direct capture and models of real textures have been developed \cite{culbertson2018haptics}.
|
||||
|
||||
\textcite{okamura1998vibration} were the first to measure the vibrations produced by the interaction of a stylus dragged over sandpaper and patterned surfaces.
|
||||
They found that the contact vibrations with patterns can be modeled as exponentially decaying sinusoids (\eqref{contact_transient}) that depend on the normal force and the scanning velocity of the stylus on the surface.
|
||||
They found that the contact with patterns when sliding on the texture generates vibrations that can be modelled as exponentially decaying sinusoids (\eqref{contact_transient}) that depend on the normal force and the scanning velocity of the stylus on the surface.
|
||||
This technique was used by \textcite{ando2007fingernailmounted} to augment a smooth sheet of paper with a virtual patterned texture: With a \LRA mounted on the nail, they rendered the virtual finger contacts with \qty{20}{\ms} vibration impulses at \qty{130}{\Hz} (\figref{ando2007fingernailmounted}).
|
||||
Participants matched the virtual textures to the real ones, with \qty{0.25}{\mm} height and \qtyrange{1}{10}{\mm} width, but systematically overestimated the virtual width to be \qty{4}{\mm} longer.
|
||||
Participants were able to match the virtual textures to the real ones (\qty{0.25}{\mm} height and \qtyrange{1}{10}{\mm} widths) but systematically overestimated the virtual width to be \qty{4}{\mm} longer.
|
||||
This model was refined to capture everyday, unpatterned textures as well \cite{guruswamy2011iir}
|
||||
|
||||
Other models have been then developed to capture everyday textures (such as sandpaper) \cite{guruswamy2011iir} with many force and velocity measures \cite{romano2012creating,culbertson2014modeling}.
|
||||
Such data-based models are capable of interpolating from the user's measures of velocity and force as inputs to generate a virtual texture in real time (\secref{vibrotactile_actuators}).
|
||||
More complex models were then created to more systematically capture everyday textures from many stylus scan measures \cite{romano2012creating,culbertson2014modeling}.
|
||||
This led to the release of the \HaTT database, a public set of stylus recordings and models of 100 haptic textures \cite{culbertson2014one}.
|
||||
A similar database, but captured from a direct touch context with the fingertip, has recently been released \cite{balasubramanian2024sens3}.
|
||||
A limitation of these data-driven models is that they can only render \emph{isotropic} textures: their record does not depend on the position of the measure, and the rendering is the same regardless of the direction of the movement.
|
||||
Alternative models have been proposed to both render both isotropic and patterned textures \cite{chan2021hasti}.
|
||||
A similar database, but captured from a direct touch context with the fingertip, has also recently been released \cite{balasubramanian2024sens3}.
|
||||
A common limitation of these data-driven models is that they can only render \emph{isotropic} textures: their record does not depend on the position of the measure, and the rendering is the same regardless of the direction of the movement.
|
||||
This was eventually addressed to include the user's velocity direction into the capture, modelling and rendering of the textures \cite{abdulali2016datadriven,abdulali2018datadriven}.
|
||||
|
||||
%A third approach is to model
|
||||
%Alternative models have been proposed to both render both isotropic and patterned textures \cite{chan2021hasti}., or to simulate the vibrations from the (visual) texture maps used to visually render a \ThreeD object \cite{chan2021hasti}.
|
||||
|
||||
Using the user's velocity magnitude and normal force as input, these data-driven models are able to interpolate from the scan measures to generate a virtual texture in real time as vibrations with a high realism.
|
||||
When comparing real textures felt through a stylus with their virtual models rendered with a voice-coil actuator attached to the stylus (\figref{culbertson2012refined}), the virtual textures were found to accurately reproduce the perception of roughness, but hardness and friction were not rendered properly \cite{culbertson2014modeling}.
|
||||
\textcite{culbertson2015should} further showed that the perceived realism of the virtual textures, and similarity to the real textures, depended mostly on the user's speed but not on the user's force as inputs to the model, \ie responding to speed is sufficient to render isotropic virtual textures.
|
||||
\textcite{culbertson2015should} further showed that the perceived realism of the virtual textures, and similarity to the real textures, depended mostly on the user's velocity magnitude but not on the user's force as inputs to the model, \ie responding to velocity magnitude is sufficient to render isotropic virtual textures.
|
||||
|
||||
\begin{subfigs}{textures_rendering_data}{Augmentating haptic texture perception with voice-coil actuators. }[][
|
||||
\item Increasing and decreasing the perceived roughness of a real patterned texture in direct touch \cite{asano2015vibrotactile}.
|
||||
|
||||
@@ -162,8 +162,8 @@ Choosing useful and efficient \UIs and interaction techniques is crucial for the
|
||||
\subsubsection{Tasks with Virtual Environments}
|
||||
\label{ve_tasks}
|
||||
|
||||
\textcite[p.385]{laviolajr20173d} classify interaction techniques into three categories based on the tasks they enable users to perform: manipulation, navigation, and system control.
|
||||
\textcite{hertel2021taxonomy} proposed a taxonomy of interaction techniques specifically for immersive \AR.
|
||||
\textcite{laviolajr20173d} (p.385) classify interaction techniques into three categories based on the tasks they enable users to perform: manipulation, navigation, and system control.
|
||||
\textcite{hertel2021taxonomy} proposed a similar taxonomy of interaction techniques specifically for immersive \AR.
|
||||
|
||||
The \emph{manipulation tasks} are the most fundamental tasks in \AR and \VR systems, and the building blocks for more complex interactions.
|
||||
\emph{Selection} is the identification or acquisition of a specific virtual object, \eg pointing at a target as in \figref{grubert2015multifi}, touching a button with a finger, or grasping an object with a hand.
|
||||
@@ -177,7 +177,7 @@ Wayfinding is the cognitive planning of the movement, such as path finding or ro
|
||||
|
||||
The \emph{system control tasks} are changes to the system state through commands or menus such as creating, deleting, or modifying virtual objects, \eg as in \figref{roo2017onea}. It is also the input of text, numbers, or symbols.
|
||||
|
||||
In this thesis we focus on manipulation tasks of virtual content directly with the hands, more specifically on touching visuo-haptic textures with a finger (\partref{perception}) and positioning and rotating virtual objects pushed and grasp by the hand.
|
||||
In this thesis we focus on manipulation tasks of virtual content directly with the hands, more specifically on touching visuo-haptic textures with a finger (\partref{perception}) and positioning and rotating virtual objects pushed and grasp by the hand (\partref{manipulation}).
|
||||
|
||||
\begin{subfigs}{interaction-techniques}{Interaction techniques in \AR. }[][
|
||||
\item Spatial selection of virtual item of an extended display using a hand-held smartphone \cite{grubert2015multifi}.
|
||||
@@ -247,7 +247,7 @@ Similarly, in \secref{tactile_rendering} we described how a material property (\
|
||||
|
||||
%can track the user's movements and use them as inputs to the \VE \textcite[p.172]{billinghurst2015survey}.
|
||||
Initially tracked by active sensing devices such as gloves or controllers, it is now possible to track hands in real time using passive sensing (\secref{interaction_techniques}) and computer vision algorithms natively integrated into \AR/\VR headsets \cite{tong2023survey}.
|
||||
Our hands allow us to manipulate real everyday objects (\secref{grasp_types}), so virtual hand interaction techniques seem to be the most natural way to manipulate virtual objects \cite[p.400]{laviolajr20173d}.
|
||||
Our hands allow us to manipulate real everyday objects (\secref{grasp_types}), hence virtual hand interaction techniques seem to be the most natural way to manipulate virtual objects \cite[p.400]{laviolajr20173d}.
|
||||
|
||||
The user's hand being tracked is reconstructed as a \emph{virtual hand} model in the \VE \cite[p.405]{laviolajr20173d}.
|
||||
The simplest models represent the hand as a rigid \ThreeD object that follows the movements of the real hand with \qty{6}{DoF} (position and orientation in space) \cite{talvas2012novel}.
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
\chaptertoc
|
||||
|
||||
\bigskip
|
||||
|
||||
This chapter reviews previous work on the perception and manipulation of virtual and augmented objects directly with the hand, using either wearable haptics, \AR, or their combination.
|
||||
%Experiencing a visual, haptic, or visuo-haptic augmented environment relies on one to many interaction loops between a user and the environment, as shown in \figref[introduction]{interaction-loop}, and each main step must be addressed and understood: the tracking and modelling of the \RE into a \VE, the interaction techniques to act on the \VE, the rendering of the \VE to the user through visual and haptic user interfaces, and, finally, the user's perception and actions on the overall augmented environment.
|
||||
First, we review how the hand senses and interacts with its environment to perceive and manipulate the haptic properties of real everyday objects.
|
||||
|
||||
@@ -18,15 +18,15 @@ The visuo-haptic texture rendering system is based on:
|
||||
The system consists of three main components: the pose estimation of the tracked real elements, the visual rendering of the \VE, and the vibrotactile signal generation and rendering.
|
||||
|
||||
\figwide{diagram}{Diagram of the visuo-haptic texture rendering system. }[
|
||||
\setstretch{1.2}
|
||||
Fiducial markers attached to the voice-coil actuator and to augmented surfaces to track are captured by a camera.
|
||||
The positions and rotations (the poses) ${}^c\mathbf{T}_i$, $i=1..n$ of the $n$ defined markers in the camera frame $\poseFrame{c}$ are estimated, then filtered with an adaptive low-pass filter.
|
||||
%These poses are transformed to the \AR/\VR headset frame $\poseFrame{h}$ and applied to the virtual model replicas to display them superimposed and aligned with the \RE.
|
||||
These poses are used to move and display the virtual model replicas aligned with the \RE.
|
||||
A collision detection algorithm detects a contact of the virtual hand with the virtual textures.
|
||||
If so, the velocity of the finger marker ${}^c\dot{\mathbf{X}}_f$ is estimated using discrete derivative of position and adaptive low-pass filtering, then transformed onto the texture frame $\poseFrame{t}$.
|
||||
The vibrotactile signal $s_k$ is generated by modulating the (scalar) finger velocity ${}^t\hat{\dot{X}}_f$ in the texture direction with the texture period $\lambda$ (\eqref{signal}).
|
||||
The signal is sampled at 48~kHz and sent to the voice-coil actuator via an audio amplifier.
|
||||
All computation steps except signal sampling are performed at 60~Hz and in separate threads to parallelize them.
|
||||
The vibrotactile signal $r$ is generated by modulating the (scalar) finger velocity ${}^t\hat{\dot{X}}_f$ in the texture direction with the texture period $\lambda$ (\eqref{signal}).
|
||||
The signal is sampled at \qty{48}{\kilo\hertz} and sent to the voice-coil actuator via an audio amplifier.
|
||||
All computation steps except signal sampling are performed at \qty{60}{\hertz} and in separate threads to parallelize them.
|
||||
]
|
||||
|
||||
\section{Description of the System Components}
|
||||
@@ -74,7 +74,7 @@ The amplifier is connected to the audio output of a computer that generates the
|
||||
|
||||
The represented haptic texture is a 1D series of parallels virtual grooves and ridges, similar to the real linear grating textures manufactured for psychophysical roughness perception studies \secref[related_work]{roughness}. %\cite{friesen2024perceived,klatzky2003feeling,unger2011roughness}.
|
||||
It is generated as a square wave audio signal $r$, sampled at \qty{48}{\kilo\hertz}, with a texture period $\lambda$ and an amplitude $A$, similar to \eqref[related_work]{grating_rendering}.
|
||||
Its frequency is a ratio of the absolute finger filtered (scalar) velocity $x_f = \poseX{s}{|\hat{\dot{X}}|}{f}$, and the texture period $\lambda$ \cite{friesen2024perceived}.
|
||||
Its frequency is a ratio of the absolute finger filtered (scalar) velocity $\dot{x} = \poseX{s}{|\hat{\dot{X}}|}{f}$, and the texture period $\lambda$ \cite{friesen2024perceived}.
|
||||
As the finger is moving horizontally on the texture, only the $X$ component of the velocity is used.
|
||||
This velocity modulation strategy is necessary as the finger position is estimated at a far lower rate (\qty{60}{\hertz}) than the audio signal (unlike high-fidelity force-feedback devices \cite{unger2011roughness}).
|
||||
|
||||
@@ -82,14 +82,14 @@ This velocity modulation strategy is necessary as the finger position is estimat
|
||||
%
|
||||
%The best strategy instead is to modulate the frequency of the signal as a ratio of the filtered finger velocity ${}^t\hat{\dot{\mathbf{X}}}_f$ and the texture period $\lambda$ \cite{friesen2024perceived}.
|
||||
%
|
||||
When a new finger velocity $x_f\,(t_j)$ is estimated at time $t_j$, the phase $\phi\,(t_j)$ of the signal $r$ needs also to be adjusted to ensure a continuity in the signal.
|
||||
When a new finger velocity $\dot{x}\,(t_j)$ is estimated at time $t_j$, the phase $\phi\,(t_j)$ of the signal $r$ needs also to be adjusted to ensure a continuity in the signal.
|
||||
In other words, the sampling of the audio signal runs at \qty{48}{\kilo\hertz}, and its frequency and phase is updated at a far lower rate of \qty{60}{\hertz} when a new finger velocity is estimated.
|
||||
A sample $r(x_f, t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >= t_j$, is thus given by:
|
||||
A sample $r(t_j, t_k)$ of the audio signal at sampling time $t_k$, with $t_k >= t_j$, is thus given by:
|
||||
\begin{subequations}
|
||||
\label{eq:signal}
|
||||
\begin{align}
|
||||
r(x_f, t_j, t_k) & = A\, \text{sgn} ( \sin (2 \pi \frac{x_f\,(t_j)}{\lambda} t_k + \phi(t_j) ) ) & \label{eq:signal_speed} \\
|
||||
\phi(t_j) & = \phi(t_{j-1}) + 2 \pi \frac{x_f\,(t_j) - x_f\,(t_j - 1)}{\lambda} t_k & \label{eq:signal_phase}
|
||||
r(t_j, t_k) & = A\, \text{sgn} ( \sin (2 \pi \frac{\dot{x}\,(t_j)}{\lambda} t_k + \phi(t_j) ) ) & \label{eq:signal_speed} \\
|
||||
\phi(t_j) & = \phi(t_{j-1}) + 2 \pi \frac{\dot{x}\,(t_j) - \dot{x}\,(t_{j-1})}{\lambda} t_k & \label{eq:signal_phase}
|
||||
\end{align}
|
||||
\end{subequations}
|
||||
|
||||
@@ -97,7 +97,7 @@ This rendering preserves the sensation of a constant spatial frequency of the vi
|
||||
%
|
||||
%Note that the finger position and velocity are transformed from the camera frame $\poseFrame{c}$ to the texture frame $\poseFrame{t}$, with the $x$ axis aligned with the texture direction.
|
||||
%
|
||||
%However, when a new finger position is estimated at time $t_j$, the phase $\phi_j$ needs to be adjusted as well with the frequency to ensure a continuity in the signal as described in \eqref{signal_phase}.
|
||||
%However, when a new finger position is estimated at time $t_j$, the phase $\phi(t_j)$ needs to be adjusted as well with the frequency to ensure a continuity in the signal as described in \eqref{signal_phase}.
|
||||
%
|
||||
The phase matching avoids sudden changes in the actuator movement thus affecting the texture perception in an uncontrolled way (\figref{phase_adjustment}) and, contrary to previous work \cite{asano2015vibrotactile,ujitoko2019modulating}, it enables a free exploration of the texture by the user with no constraints on the finger speed.
|
||||
A square wave is chosen to get a rendering closer to a real grating texture with the sensation of crossing edges \cite{ujitoko2019modulating}, and because the roughness perception of sine wave textures has been shown not to reproduce the roughness perception of real grating textures \cite{unger2011roughness}.
|
||||
|
||||
@@ -19,12 +19,12 @@ We aimed to assess \textbf{which haptic textures were matched with which visual
|
||||
\item A user study evaluating with 20 participants the coherence, realism and perceived roughness of nine pairs of these visuo-haptic texture augmentations.
|
||||
\end{itemize}
|
||||
|
||||
\noindentskip In the next sections, we first describe the apparatus of the user study experimental design, including the two tasks performed. We then present the results obtained and discuss them before concluding.
|
||||
\smallskip
|
||||
|
||||
\bigskip
|
||||
|
||||
\fig[0.65]{experiment/view}{First person view of the user study.}[
|
||||
As seen through the immersive \AR headset Microsoft HoloLens~2.
|
||||
\fig[0.55]{experiment/view}{First person view of the user study.}[
|
||||
%As seen through the immersive \AR headset.
|
||||
The visual texture overlays were statically displayed on the surfaces, allowing the user to move around to view them from different angles.
|
||||
The haptic texture augmentations were generated based on \HaTT data-driven texture models and finger speed, and were rendered on the middle index phalanx as it slides on the considered surface.
|
||||
The haptic texture augmentations were generated based on \HaTT data-driven texture models and finger speed, and were rendered on the middle index phalanx.% as it slides on the considered surface.
|
||||
]
|
||||
|
||||
\noindentskip In the next sections, we first describe the apparatus of the user study experimental design, including the two tasks performed. We then present the results obtained and discuss them before concluding.
|
||||
|
||||
@@ -95,7 +95,7 @@ Each modality level was ranked once per participant following the fixed order li
|
||||
\subsection{Participants}
|
||||
\label{participants}
|
||||
|
||||
Twenty participants took part in the user study (12 males, 7 females, 1 preferred not to say), aged between 20 and 60 years (\mean{29.1}, \sd{9.4}).
|
||||
Twenty participants took part in the user study (12 males, 7 females, 1 preferred not to say), aged between 20 and 60 years (\median{26.5}, \iqr{9}{}).
|
||||
One participant was left-handed, all others were right-handed; they all performed the user study with their dominant hand.
|
||||
All participants had normal or corrected-to-normal vision and none of them had a known hand or finger impairment.
|
||||
They rated their experience with haptics, \AR, and \VR (\enquote{I use it every month or more}); 10 were experienced with haptics, 2 with \AR, and 10 with \VR.
|
||||
|
||||
@@ -38,32 +38,33 @@ To verify that the difficulty with all the visual textures was the same on the \
|
||||
As the \response{Completion Time} results were Gamma distributed, they were transformed with a log to approximate a normal distribution.
|
||||
A \LMM on the log \response{Completion Time} with the \factor{Visual Texture} as fixed effect and the participant as random intercept was performed.
|
||||
Normality was verified with a QQ-plot of the model residuals.
|
||||
No statistical significant effect of \factor{Visual Texture} was found (\anova{8}{512}{1.9}, \p{0.06}) on \response{Completion Time} (\geomean{44}{\s}, \ci{42}{46}), indicating an equal difficulty and participant behaviour for all the visual textures.
|
||||
No statistical significant effect of \factor{Visual Texture} was found (\anova{8}{512}{1.9}, \p{0.06}) on \response{Completion Time} (\geomean{44}{\s} \ci{42}{46}), indicating an equal difficulty and participant behaviour for all the visual textures.
|
||||
|
||||
\subsection{Textures Ranking}
|
||||
\label{results_ranking}
|
||||
|
||||
\figref{results/ranking_mean_ci} presents the results of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs.
|
||||
For each ranking, a Friedman test was performed with post-hoc Wilcoxon signed-rank tests and Holm-Bonferroni adjustment.
|
||||
|
||||
\paragraph{Haptic Textures Ranking}
|
||||
|
||||
Almost all the texture pairs in the haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{146}, \pinf{0.001}; \pinf{0.05} for each comparison), except between (\level{Metal Mesh}, \level{Sandpaper~100}), (\level{Cork}, \level{Brick~2}), (\level{Cork}, \level{Sandpaper~320}) (\level{Plastic Mesh~1}, \level{Velcro Hooks}), and (\level{Plastic Mesh~1}, \level{Terra Cotta}).
|
||||
Average Kendall's Tau correlations between the participants indicated a high consensus (\kendall{0.82}, \ci{0.81}{0.84}) showing that participants perceived similarly the roughness of the haptic textures.
|
||||
Average Kendall's Tau correlations between the participants indicated a high consensus (\kendall{0.82} \ci{0.81}{0.84}) showing that participants perceived similarly the roughness of the haptic textures.
|
||||
|
||||
\paragraph{Visual Textures Ranking}
|
||||
|
||||
Most of the texture pairs in the visual textures ranking results were also statistically significantly different (\chisqr{8}{20}{119}, \pinf{0.001}; \pinf{0.05} for each comparison), except for the following groups: \{\level{Metal Mesh}, \level{Cork}, \level{Plastic Mesh~1}\}; \{\level{Sandpaper~100}, \level{Brick~2}, \level{Plastic Mesh~1}, \level{Velcro Hooks}\}; \{\level{Cork}, \level{Velcro Hooks}\}; \{\level{Sandpaper~320}, \level{Terra Cotta}\}; and \{\level{Sandpaper~320}, \level{Coffee Filter}\}.
|
||||
Even though the consensus was high (\kendall{0.61}, \ci{0.58}{0.64}), the roughness of the visual textures were more difficult to estimate, in particular for \level{Plastic Mesh~1} and \level{Velcro Hooks}.
|
||||
Even though the consensus was high (\kendall{0.61} \ci{0.58}{0.64}), the roughness of the visual textures were more difficult to estimate, in particular for \level{Plastic Mesh~1} and \level{Velcro Hooks}.
|
||||
|
||||
\paragraph{Visuo-Haptic Textures Ranking}
|
||||
|
||||
Also, almost all the texture pairs in the visuo-haptic textures ranking results were statistically significantly different (\chisqr{8}{20}{140}, \pinf{0.001}; \pinf{0.05} for each comparison), except for the following groups: \{\level{Sandpaper~100}, \level{Cork}\}; \{\level{Cork}, \level{Brick~2}\}; and \{\level{Plastic Mesh~1}, \level{Velcro Hooks}, \level{Sandpaper~320}\}.
|
||||
The consensus between the participants was also high \kendall{0.77}, \ci{0.74}{0.79}.
|
||||
Finally, calculating the similarity of the three rankings of each participant, the \textit{Visuo-Haptic Textures Ranking} was on average highly similar to the \textit{Haptic Textures Ranking} (\kendall{0.79}, \ci{0.72}{0.86}) and moderately to the \textit{Visual Textures Ranking} (\kendall{0.48}, \ci{0.39}{0.56}).
|
||||
The consensus between the participants was also high \kendall{0.77} \ci{0.74}{0.79}.
|
||||
Finally, calculating the similarity of the three rankings of each participant, the \textit{Visuo-Haptic Textures Ranking} was on average highly similar to the \textit{Haptic Textures Ranking} (\kendall{0.79} \ci{0.72}{0.86}) and moderately to the \textit{Visual Textures Ranking} (\kendall{0.48} \ci{0.39}{0.56}).
|
||||
A Wilcoxon signed-rank test indicated that this difference was statistically significant (\wilcoxon{190}, \p{0.002}).
|
||||
These results indicate that the two haptic and visual modalities were integrated together, the resulting roughness ranking being between the two rankings of the modalities alone, but with haptics predominating.
|
||||
|
||||
\fig[0.6]{results/ranking_mean_ci}{Means with bootstrap \percent{95} \CI of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs. }[
|
||||
\fig[0.7]{results/ranking_mean_ci}{Means with bootstrap \percent{95} \CI of the three rankings of the haptic textures alone, the visual textures alone, and the visuo-haptic texture pairs. }[
|
||||
A lower rank means that the texture was considered rougher, a higher rank means smoother.
|
||||
]
|
||||
|
||||
@@ -137,13 +138,14 @@ This shows that the participants consistently identified the roughness of each v
|
||||
\figref{results_questions} presents the questionnaire results of the \level{Matching} and \level{Ranking} tasks.
|
||||
A non-parametric \ANOVA on \ART models were used for the \response{Difficulty} and \response{Realism} question results.
|
||||
The other question results were analyzed using Wilcoxon signed-rank tests, with Holm-Bonferroni adjustment.
|
||||
The results are shown as mean $\pm$ standard deviation.
|
||||
|
||||
On \response{Difficulty}, there were statistically significant effects of \factor{Task} (\anova{1}{57}{13}, \pinf{0.001}) and of \factor{Modality} (\anova{1}{57}{8}, \p{0.007}), but no interaction effect. % \factor{Task} \x \factor{Modality} (\anova{1}{57}{2}, \ns).
|
||||
The \level{Ranking} task was found easier (\mean{2.9}, \sd{1.2}) than the \level{Matching} task (\mean{3.9}, \sd{1.5}), and the Haptic textures were found easier to discriminate (\mean{3.0}, \sd{1.3}) than the Visual ones (\mean{3.8}, \sd{1.5}).
|
||||
The \level{Ranking} task was found easier (\num{2.9 \pm 1.2}) than the \level{Matching} task (\num{3.9 \pm 1.5}), and the Haptic textures were found easier to discriminate (\num{3.0 \pm 1.3}) than the Visual ones (\num{3.8 \pm 1.5}).
|
||||
|
||||
Both haptic and visual textures were judged moderately realistic for both tasks (\mean{4.2}, \sd{1.3}), with no statistically significant effect of \factor{Task}, \factor{Modality} or their interaction on \response{Realism}.
|
||||
Both haptic and visual textures were judged moderately realistic for both tasks (\num{4.2 \pm 1.3}), with no statistically significant effect of \factor{Task}, \factor{Modality} or their interaction on \response{Realism}.
|
||||
No statistically significant effects of \factor{Task} on \response{Textures Match} and \response{Uncomfort} were found either.
|
||||
The coherence of the texture pairs was considered moderate (\mean{4.6}, \sd{1.2}) and the haptic device was not felt uncomfortable (\mean{2.4}, \sd{1.4}).
|
||||
The coherence of the texture pairs was considered moderate (\num{4.6 \pm 1.2}) and the haptic device was not felt uncomfortable (\num{2.4 \pm 1.4}).
|
||||
|
||||
\begin{subfigs}{results_questions}{Boxplots of the questionnaire results for each visual hand rendering.}[
|
||||
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}.
|
||||
|
||||
@@ -16,7 +16,7 @@ Indeed, the majority of users explained that, based on the roughness, granularit
|
||||
Several strategies were used, as some participants reported using vibration frequency and/or amplitude to match a haptic texture.
|
||||
It should be noted that the task was rather difficult (\figref{results_questions}), as participants had no prior knowledge of the textures, there were no additional visual cues such as the shape of an object, and the term \enquote{roughness} had not been used by the experimenter prior to the \level{Ranking} task.
|
||||
|
||||
The correspondence analysis (\figref{results/matching_correspondence_analysis}) highlighted that participants did indeed match visual and haptic textures primarily on the basis of their perceived roughness (\percent{60} of variance), which is in line with previous perception studies on real \cite{baumgartner2013visual} and virtual \cite{culbertson2014modeling} textures.
|
||||
The correspondence analysis (\figref{results/matching_correspondence_analysis}) highlighted that participants did indeed match visual and haptic textures primarily on the basis of their perceived roughness (\percent{60} of variance), which is in line with previous perception studies on real textures \cite{baumgartner2013visual} and virtual textures \cite{culbertson2014modeling}.
|
||||
The rankings (\figref{results/ranking_mean_ci}) confirmed that the participants all perceived the roughness of haptic textures very similarly, but that there was less consensus for visual textures, which is also in line with roughness rankings for real haptic and visual textures \cite{bergmanntiest2007haptic}.
|
||||
These results made it possible to identify and name groups of textures in the form of clusters (\figref{results_clusters}), and to construct confusion matrices between these clusters and between visual texture ranks with haptic clusters (\figref{results/haptic_visual_clusters_confusion_matrices}), showing that participants consistently identified and matched haptic and visual textures.
|
||||
\percent{30} of the matching variance of the correspondence analysis was also captured with a second dimension, opposing the roughest textures (\level{Metal Mesh}, \level{Sandpaper~100}), and to a lesser extent the smoothest (\level{Coffee Filter}, \level{Sandpaper~320}), with all other textures (\figref{results/matching_correspondence_analysis}).
|
||||
|
||||
BIN
3-perception/vhar-textures/figures/results/rankings.pdf
Normal file
BIN
3-perception/vhar-textures/figures/results/rankings.pdf
Normal file
Binary file not shown.
@@ -2,11 +2,11 @@
|
||||
\label{intro}
|
||||
|
||||
In the previous chapter, we presented a system for augmenting the visuo-haptic texture perception of real surfaces directly touched with the finger, using wearable vibrotactile haptics and an immersive \AR headset.
|
||||
In this and the next chapter, we evaluate the user's perception of such wearable haptic texture augmentation under different visual rendering conditions.
|
||||
In this chapter and the next one, we evaluate the user's perception of such wearable haptic texture augmentation under different visual rendering conditions.
|
||||
|
||||
Most of the haptic augmentations of real surfaces using with wearable haptic devices, including roughness of textures (\secref[related_work]{texture_rendering}), have been studied without a visual feedback, and none have considered the influence of the visual rendering on their perception or integrated them in \AR and \VR (\secref[related_work]{texture_rendering}).
|
||||
Still, it is known that the visual rendering of an object can influence the perception of its haptic properties (\secref[related_work]{visual_haptic_influence}), and that the perception of same haptic force-feedback or vibrotactile rendering can differ between \AR and \VR, probably due to difference in perceived simultaneity between visual and haptic stimuli (\secref[related_work]{ar_vr_haptic}).
|
||||
Indeed, in \AR, the user can see their own hand touching, the haptic device worn and the \RE, while in \VR they are hidden by the \VE.
|
||||
In \AR, the user can see their own hand touching, the haptic device worn, and the \RE, while in \VR they are hidden by the \VE.
|
||||
|
||||
In this chapter, we investigate the \textbf{role of the visual feedback of the virtual hand and of the environment (real or virtual) on the perception of a real surface whose haptic roughness is augmented} with wearable vibrotactile haptics. %voice-coil device worn on the finger.
|
||||
To do so, we used the visuo-haptic system presented in \chapref{vhar_system} to render virtual vibrotactile patterned textures (\secref[related_work]{texture_rendering}) to augment the real surface being touched. % touched by the finger.% that can be directly touched with the bare finger.
|
||||
@@ -19,16 +19,12 @@ To control for the influence of the visual rendering, the real surface was not v
|
||||
\item A discussion and recommendations on the integration of wearable haptic augmentations in direct touch context with \AR and \VR.
|
||||
\end{itemize}
|
||||
|
||||
\noindentskip In the remainder of this chapter, we first describe the experimental design and apparatus of the user study.
|
||||
\noindentskip In the next sections, we first describe the experimental design and apparatus of the user study.
|
||||
We then present the results obtained, discuss them, and outline recommendations for future \AR/\VR works using wearable haptic augmentations.
|
||||
|
||||
%First, we present a system for rendering virtual vibrotactile textures in real time without constraints on hand movements and integrated with an immersive visual \AR/\VR headset to provide a coherent visuo-haptic augmentation of the \RE.
|
||||
%An experimental setup is then presented to compare haptic roughness augmentation with an optical \AR headset (Microsoft HoloLens~2) that can be transformed into a \VR headset using a cardboard mask.
|
||||
%We then conduct a psychophysical study with 20 participants, where various virtual haptic textures on a real surface directly touched with the finger are compared in a two-alternative forced choice (2AFC) task in three visual rendering conditions: (1) without visual augmentation, (2) with a realistic virtual hand rendering in \AR, and (3) with the same virtual hand in \VR.
|
||||
\smallskip
|
||||
|
||||
\bigskip
|
||||
|
||||
\fig[0.9]{teaser/teaser2}{
|
||||
\fig[0.55]{teaser/teaser2}{
|
||||
Vibrotactile textures were rendered in real time on a real surface using a wearable vibrotactile device worn on the finger.
|
||||
}[%
|
||||
Participants explored this haptic roughness augmentation with (\level{Real}) their real hand alone, (\level{Mixed}) a realistic virtual hand overlay in \AR, and (\level{Virtual}) the same virtual hand in \VR.
|
||||
|
||||
BIN
3-perception/xr-perception/figures/experiment/headset2.jpg
Normal file
BIN
3-perception/xr-perception/figures/experiment/headset2.jpg
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 351 KiB |
@@ -6,7 +6,7 @@ Touching, grasping and manipulating virtual objects are fundamental interactions
|
||||
Manipulation of virtual objects is achieved using a virtual hand interaction technique that represents the user's hand in the \VE and simulates interaction with virtual objects (\secref[related_work]{ar_virtual_hands}).
|
||||
The visual feedback of the virtual hand is a key element for interacting and manipulating virtual objects in \VR \cite{prachyabrued2014visual,grubert2018effects}.
|
||||
Some work has also investigated the visual feedback of the virtual hand in \AR, but not in an immersive context of virtual object manipulation \cite{blaga2017usability,yoon2020evaluating} or was limited to a single visual hand augmentation \cite{piumsomboon2014graspshell,maisto2017evaluation}.
|
||||
\OST-\AR also has significant perceptual differences from \VR due the lack of mutual occlusion between the hand and the virtual object in \OST-\AR (\secref[related_work]{ar_displays}), and the inherent delays between the user's hand and the result of the interaction simulation (\secref[related_work]{ar_virtual_hands}).
|
||||
\Gls{OST}-\AR also has significant perceptual differences from \VR due the lack of mutual occlusion between the hand and the virtual object in \OST-\AR (\secref[related_work]{ar_displays}), and the inherent delays between the user's hand and the result of the interaction simulation (\secref[related_work]{ar_virtual_hands}).
|
||||
|
||||
In this chapter, we investigate the \textbf{visual rendering of the virtual hand as augmentation of the real hand} for direct hand manipulation of virtual objects in \OST-\AR.
|
||||
To this end, we selected in the literature and compared the most popular visual hand augmentation used to interact with virtual objects in \AR.
|
||||
|
||||
@@ -3,14 +3,14 @@
|
||||
|
||||
\figref{results_questions} presents the questionnaire results for each visual hand augmentation.
|
||||
Friedman tests indicated that all questions had statistically significant differences (\pinf{0.001}).
|
||||
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment were then used each question results:
|
||||
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment are reported (results are shown as mean $\pm$ standard deviation):
|
||||
\begin{itemize}
|
||||
\item \response{Difficulty}: \level{Occlusion} was considered more difficult than \level{Contour} (\p{0.02}), \level{Skeleton} (\p{0.01}), and \level{Mesh} (\p{0.03}).
|
||||
\item \response{Fatigue}: \level{None} was found more fatiguing than \level{Mesh} (\p{0.04}); And \level{Occlusion} more than \level{Skeleton} (\p{0.02}) and \level{Mesh} (\p{0.02}).
|
||||
\item \response{Precision}: \level{None} was considered less precise than \level{Skeleton} (\p{0.02}) and \level{Mesh} (\p{0.02}); And \level{Occlusion} more than \level{Contour} (\p{0.02}), \level{Skeleton} (\p{0.006}), and \level{Mesh} (\p{0.02}).
|
||||
\item \response{Performance}: \level{Occlusion} was lower than \level{Contour} (\p{0.02}), \level{Skeleton} (\p{0.006}), and \level{Mesh} (\p{0.03}).
|
||||
\item \response{Efficiency}: \level{Occlusion} was found less efficient than \level{Contour} (\p{0.01}), \level{Skeleton} (\p{0.02}), and \level{Mesh} (\p{0.02}).
|
||||
\item \response{Rating}: \level{Occlusion} was rated lower than \level{Contour} (\p{0.02}) and \level{Skeleton} (\p{0.03}).
|
||||
\item \response{Difficulty}: \level{Occlusion} (\num{4.7 \pm 1.5}) was considered more difficult than \level{Contour} (\num{3.4 \pm 1.6}, \p{0.02}), \level{Skeleton} (\num{3.3 \pm 1.3}, \p{0.01}), and \level{Mesh} (\num{3.3 \pm 1.3}, \p{0.03}). No difference evidences were found for \level{None} (\num{4.6 \pm 1.9}) or \level{Tips} (\num{4.3 \pm 1.7}).
|
||||
\item \response{Fatigue}: \level{None} (\num{4.9 \pm 1.3}) was found more fatiguing than \level{Mesh} (\num{3.6 \pm 1.3}, \p{0.04}); And \level{Occlusion} (\num{5.0 \pm 1.4}) more than \level{Skeleton} (\num{3.7 \pm 1.4}, \p{0.02}) and \level{Mesh} (\p{0.02}). No difference evidences were found for \level{Tips} (\num{4.5 \pm 1.6}) or \level{Contour} (\num{3.9 \pm 1.5}).
|
||||
\item \response{Precision}: \level{None} (\num{2.8 \pm 1.8}) was considered less precise than \level{Skeleton} (\num{4.6 \pm 1.1}, \p{0.02}) and \level{Mesh} (\num{4.6 \pm 1.6}, \p{0.02}); And \level{Occlusion} (\num{2.8 \pm 1.7}) more than \level{Contour} (\num{4.6 \pm 1.8}, \p{0.02}), \level{Skeleton} (\p{0.006}), and \level{Mesh} (\p{0.02}). No difference evidences were found for \level{Tips} (\num{3.8 \pm 1.7}).
|
||||
\item \response{Performance}: \level{Occlusion} (\num{3.0 \pm 1.5}) was lower than \level{Contour} (\num{4.9 \pm 1.6}, \p{0.02}), \level{Skeleton} (\num{4.9 \pm 1.4}, \p{0.006}), and \level{Mesh} (\num{4.6 \pm 1.4}, \p{0.03}). No difference evidences were found for \level{None} (\num{3.4 \pm 1.8}) or \level{Tips} (\num{4.2 \pm 1.6}).
|
||||
\item \response{Efficiency}: \level{Occlusion} (\num{3.2 \pm 1.7}) was found less efficient than \level{Contour} (\num{4.9 \pm 1.7}, \p{0.01}), \level{Skeleton} (\num{4.9 \pm 1.4}, \p{0.02}), and \level{Mesh} (\num{4.8 \pm 1.3}, \p{0.02}). No difference evidences were found for \level{None} (\num{3.5 \pm 1.8}) or \level{Tips} (\num{4.3 \pm 1.5}).
|
||||
\item \response{Rating}: \level{Occlusion} (\num{2.9 \pm 1.6}) was rated lower than \level{Contour} (\num{5.2 \pm 1.9}, \p{0.02}) and \level{Skeleton} (\num{4.9 \pm 1.9}, \p{0.03}). No difference evidences were found for \level{None} (\num{3.5 \pm 2.0}),\level{Tips} (\num{3.6 \pm 2.1}) or \level{Mesh} (\num{4.6 \pm 1.6}).
|
||||
\end{itemize}
|
||||
|
||||
In summary, \level{Occlusion} was worse than \level{Skeleton} for all questions, and worse than \level{Contour} and \level{Mesh} on 5 over 6 questions.
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
We evaluated six visual hand augmentations, as described in \secref{hands}, displayed on top of the real hand, in two virtual object manipulation tasks in \AR.
|
||||
|
||||
During the \level{Push} task, the \level{Skeleton} hand rendering was the fastest (\figref{results/Push-CompletionTime}), as participants employed fewer and longer contacts to adjust the cube inside the target volume (\figref{results/Push-ContactsCount} and \figref{results/Push-MeanContactTime}).
|
||||
Participants consistently used few and continuous contacts for all visual hand augmentations (Fig. 3b), with only less than ten trials, carried out by two participants, quickly completed with multiple discrete touches.
|
||||
%Participants consistently used few and continuous contacts for all visual hand augmentations (\figref{results/Push-ContactsCount}), with only less than ten trials, carried out by two participants, quickly completed with multiple discrete touches.
|
||||
However, during the \level{Grasp} task, despite no difference in \response{Completion Time}, providing no visible hand rendering (\level{None} and \level{Occlusion} renderings) led to more failed grasps or cube drops (\figref{results/Grasp-ContactsCount} and \figref{results/Grasp-MeanContactTime}).
|
||||
Indeed, participants found the \level{None} and \level{Occlusion} renderings less effective (\figref{results/Ranks-Grasp}) and less precise (\figref{results_questions}).
|
||||
To understand whether the participants' previous experience might have played a role, we also carried out an additional statistical analysis considering \VR experience as an additional between-subjects factor, \ie \VR novices vs. \VR experts (\enquote{I use it every week}, see \secref{participants}).
|
||||
|
||||
@@ -27,10 +27,10 @@ They are described as follows, with the corresponding abbreviation in brackets:
|
||||
When a fingertip contacts the virtual cube, we activate the corresponding vibrating actuator.
|
||||
We considered two representative contact vibration techniques, \ie two ways of rendering such contacts through vibrations:
|
||||
\begin{itemize}
|
||||
\item \level{Impact} (Impa): a \qty{200}{\ms}--long vibration burst is applied when the fingertip makes contact with the object.
|
||||
\item \level{Impact}: a \qty{200}{\ms}--long vibration burst is applied when the fingertip makes contact with the object.
|
||||
The amplitude of the vibration is proportional to the speed of the fingertip at the moment of the contact.
|
||||
This technique is inspired by the impact vibrations modelled by tapping on real surfaces, as described in \secref[related_work]{hardness_rendering}.
|
||||
\item \level{Distance} (Dist): a continuous vibration is applied whenever the fingertip is in contact with the object.
|
||||
\item \level{Distance}: a continuous vibration is applied whenever the fingertip is in contact with the object.
|
||||
The amplitude of the vibration is proportional to the interpenetration between the fingertip and the virtual cube surface.
|
||||
\end{itemize}
|
||||
|
||||
@@ -38,8 +38,8 @@ The implementation of these two techniques have been tuned according to the resu
|
||||
Three participants were asked to carry out a series of push and grasp tasks similar to those used in the actual experiment.
|
||||
Results showed that \percent{95} of the contacts between the fingertip and the virtual cube happened at speeds below \qty{1.5}{\m\per\s}.
|
||||
We also measured the perceived minimum amplitude to be \percent{15} (\qty{0.6}{\g}) of the maximum amplitude of the motors we used.
|
||||
For this reason, we designed the Impact vibration technique (Impa) so that contact speeds from \qtyrange{0}{1.5}{\m\per\s} are linearly mapped into \qtyrange{15}{100}{\%} amplitude commands for the motors.
|
||||
Similarly, we designed the distance vibration technique (Dist) so that interpenetrations from \qtyrange{0}{2.5}{\cm} are linearly mapped into \qtyrange{15}{100}{\%} amplitude commands for the motors, recalling that the virtual cube has an edge of \qty{5}{\cm}.
|
||||
For this reason, we designed the \level{Impact} vibration technique so that contact speeds from \qtyrange{0}{1.5}{\m\per\s} are linearly mapped into \qtyrange{15}{100}{\%} amplitude commands for the motors.
|
||||
Similarly, we designed the \level{Distance} vibration technique so that interpenetrations from \qtyrange{0}{2.5}{\cm} are linearly mapped into \qtyrange{15}{100}{\%} amplitude commands for the motors, recalling that the virtual cube has an edge of \qty{5}{\cm}.
|
||||
|
||||
\section{User Study}
|
||||
\label{method}
|
||||
@@ -60,7 +60,8 @@ We considered the same two \level{Push} and \level{Grasp} tasks as described in
|
||||
\begin{itemize}
|
||||
\item left-bottom (\level{LB}) and left-right (\level{LF}) during the \level{Push} task; and
|
||||
\item right-bottom (\level{RB}), left-bottom (\level{LB}), left-right (\level{LF}) and right-front (\level{RF}) during the \level{Grasp} task.
|
||||
\end{itemize}. We considered these targets because they presented different difficulties.
|
||||
\end{itemize}
|
||||
We considered these targets because they presented different difficulties in the previous user study (\chapref{visual_hand}).
|
||||
\end{itemize}
|
||||
|
||||
\begin{subfigs}{tasks}{The two manipulation tasks of the user study.}[
|
||||
@@ -114,10 +115,10 @@ Preliminary tests confirmed this approach.
|
||||
\subsection{Participants}
|
||||
\label{participants}
|
||||
|
||||
Twenty subjects participated in the study (mean age = 26.8, \sd{4.1}; 19~males, 1~female).
|
||||
Twenty participants were recruited for the study (19 males, 1 female), aged between 20 and 35 years (\median{26}{}, \iqr{5.3}{}).
|
||||
One was left-handed, while the other nineteen were right-handed. They all used their dominant hand during the trials.
|
||||
They all had a normal or corrected-to-normal vision.
|
||||
Thirteen subjects participated also in the previous experiment.
|
||||
Thirteen participants participated also in the previous experiment.
|
||||
|
||||
Participants rated their expertise (\enquote{I use it more than once a year}) with \VR, \AR, and haptics in a pre-experiment questionnaire.
|
||||
There were twelve experienced with \VR, eight experienced with \AR, and ten experienced with haptics.
|
||||
@@ -137,5 +138,5 @@ They then rated the ten combinations of \factor{Positioning} \x \factor{Vibratio
|
||||
\item \response{Realism}: How realistic was each vibrotactile rendering?
|
||||
\end{itemize}
|
||||
|
||||
Finally, they rated the ten combinations of \factor{Positioning} \x factor{Hand} on a 7-item Likert scale (1=Not at all, 7=Extremely):
|
||||
Finally, they rated the ten combinations of \factor{Positioning} \x \factor{Hand} on a 7-item Likert scale (1=Not at all, 7=Extremely):
|
||||
\response{Positioning \x Hand Rating}: How much do you like each combination of vibrotactile location for each visual hand rendering?
|
||||
|
||||
@@ -33,7 +33,7 @@ This showed different strategies to adjust the cube inside the target volume, wi
|
||||
It was also shorter with \level{None} than with \level{Skeleton} (\percent{-9}, \pinf{0.001}).
|
||||
This indicates, as for the \chapref{visual_hand}, more confidence with a visual hand augmentation.
|
||||
|
||||
\begin{subfigs}{push_results}{Results of the grasp task performance metrics.}[
|
||||
\begin{subfigs}{push_results}{Results of the push task performance metrics.}[
|
||||
Geometric means with bootstrap \percent{95} \CI for each vibrotactile positioning (a, b and c) or visual hand augmentation (d)
|
||||
and Tukey's \HSD pairwise comparisons: *** is \pinf{0.001}, ** is \pinf{0.01}, and * is \pinf{0.05}.
|
||||
][
|
||||
|
||||
@@ -16,36 +16,35 @@ Although the \level{Distance} technique provided additional feedback on the inte
|
||||
The results of each question were analyzed using non-parametric \ANOVA on an \ART model.
|
||||
Statistically significant effects were further analyzed with post-hoc pairwise comparisons with Holm-Bonferroni adjustment.
|
||||
Wilcoxon signed-rank tests were used for main effects and \ART contrasts procedure for interaction effects.
|
||||
Only significant results are reported.
|
||||
Only significant tests are reported, and results are shown as mean $\pm$ standard deviation.
|
||||
|
||||
\paragraph{Vibrotactile Rendering Rating}
|
||||
\label{vibration_ratings}
|
||||
|
||||
There was a main effect of \factor{Positioning} (\anova{4}{171}{27.0}, \pinf{0.001}, see \figref{results/Question-Vibration Rating-Positioning-Overall}).
|
||||
Participants preferred \level{Fingertips} more than \level{Wrist} (\p{0.01}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
\level{Proximal} more than \level{Wrist} (\p{0.007}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
And \level{Wrist} more than \level{Opposite} (\p{0.01}) and \level{Nowhere} (\pinf{0.001}).
|
||||
Participants preferred \level{Fingertips} (\num{5.2 \pm 1.2}) and \level{Proximal} (\num{5.2 \pm 1.1}) more than \level{Wrist} (\num{4.3 \pm 1.4}, \p{0.01}), \level{Opposite} (\num{3.4 \pm 1.5}, \pinf{0.01}\pinf{0.001}), and \level{Nowhere} (\num{2.8 \pm 1.4}, \pinf{0.001}).
|
||||
\level{Wrist} was also preferred over \level{Opposite} (\p{0.01}) and \level{Nowhere} (\pinf{0.001}).
|
||||
|
||||
\paragraph{Positioning \x Hand Rating}
|
||||
\label{positioning_hand}
|
||||
|
||||
There were two main effects of \factor{Positioning} (\anova{4}{171}{20.6}, \pinf{0.001}) and of \factor{Hand} (\anova{1}{171}{12.2}, \pinf{0.001}).
|
||||
Participants preferred \level{Fingertips} more than \level{Wrist} (\p{0.03}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
\level{Proximal} more than \level{Wrist} (\p{0.003}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
\level{Wrist} more than \level{Opposite} (\p{0.03}) and \level{Nowhere} (\pinf{0.001});
|
||||
And \level{Skeleton} more than \level{No Hand} (\pinf{0.001}).
|
||||
There were two main effects of \factor{Positioning} (\anova{4}{171}{20.6}, \pinf{0.001}) and of \factor{Hand} (\anova{1}{171}{12.2}, \pinf{0.001}), but no interaction effect.
|
||||
Participants preferred \level{Fingertips} (\num{5.3 \pm 1.4}) and \level{Proximal} (\num{5.2 \pm 1.0}) more than \level{Wrist} (\num{4.5 \pm 1.2}, \p{0.03}), \level{Opposite} (\num{3.8 \pm 1.6}, \pinf{0.001}), and \level{Nowhere} (\num{3.2 \pm 1.7}, \pinf{0.001});
|
||||
And \level{Wrist} more than \level{Opposite} (\p{0.03}) and \level{Nowhere} (\pinf{0.001}).
|
||||
\level{Skeleton} (\num{4.7 \pm 1.6}) was also preferred over \level{No Hand} (\num{4.1 \pm 1.6}, \pinf{0.001}).
|
||||
|
||||
\paragraph{Workload}
|
||||
\label{workload}
|
||||
|
||||
There was a main effect of \factor{Positioning} (\anova{4}{171}{3.9}, \p{0.004}, see \figref{results/Question-Workload-Positioning-Overall}).
|
||||
Participants found \level{Opposite} more fatiguing than \level{Fingertips} (\p{0.01}), \level{Proximal} (\p{0.003}), and \level{Wrist} (\p{0.02}).
|
||||
Participants found \level{Opposite} (\num{4.1 \pm 1.9}) more fatiguing than \level{Fingertips} (\num{2.9 \pm 1.7}, \p{0.01}), \level{Proximal} (\num{3.0 \pm 1.1}, \p{0.003}), and \level{Wrist} (\num{3.2 \pm 1.5}, \p{0.02}).
|
||||
No evidence of a difference of \level{Nowhere} (\num{3.8 \pm 2.3}) on the other positioning was found.
|
||||
|
||||
\paragraph{Usefulness}
|
||||
\label{usefulness}
|
||||
|
||||
There was a main effect of \factor{Positioning} (\anova{4}{171}{38.0}, \p{0.041}, see \figref{results/Question-Usefulness-Positioning-Overall}).
|
||||
Participants found \level{Fingertips} the most useful, more than \level{Proximal} (\p{0.02}), \level{Wrist} (\pinf{0.001}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
Participants found \level{Fingertips} (\num{5.7 \pm 1.0}) the most useful, more than \level{Proximal} (\num{5.3 \pm 1.1}, \p{0.02}), \level{Wrist} (\num{4.5 \pm 1.2}, \pinf{0.001}), \level{Opposite} (\num{3.8 \pm 1.8}, \pinf{0.001}), and \level{Nowhere} (\num{2.2 \pm 1.8}, \pinf{0.001});
|
||||
\level{Proximal} more than \level{Wrist} (\p{0.008}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
\level{Wrist} more than \level{Opposite} (\p{0.008}) and \level{Nowhere} (\pinf{0.001});
|
||||
And \level{Opposite} more than \level{Nowhere} (\p{0.004}).
|
||||
@@ -54,15 +53,15 @@ And \level{Opposite} more than \level{Nowhere} (\p{0.004}).
|
||||
\label{realism}
|
||||
|
||||
There was a main effect of \factor{Positioning} (\anova{4}{171}{28.8}, \pinf{0.001}, see \figref{results/Question-Realism-Positioning-Overall}).
|
||||
Participants found \level{Fingertips} the most realistic, more than \level{Proximal} (\p{0.05}), \level{Wrist} (\p{0.004}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
Participants found \level{Fingertips} (\num{5.1 \pm 1.2}) the most realistic, more than \level{Proximal} (\num{4.7 \pm 1.3}, \p{0.05}), \level{Wrist} (\num{3.8 \pm 1.5}, \p{0.004}), \level{Opposite} (\num{3.2 \pm 1.8}, \pinf{0.001}), and \level{Nowhere} (\num{2.2 \pm 2.1}, \pinf{0.001});
|
||||
\level{Proximal} more than \level{Wrist} (\p{0.03}), \level{Opposite} (\pinf{0.001}), and \level{Nowhere} (\pinf{0.001});
|
||||
\level{Wrist} more than \level{Opposite} (\p{0.03}) and \level{Nowhere} (\pinf{0.001});
|
||||
And \level{Opposite} more than \level{Nowhere} (\p{0.03}).
|
||||
|
||||
\begin{subfigs}{results_questions}{Boxplots of the questionnaire results for each vibrotactile positioning.}[
|
||||
Pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: *** is \pinf{0.001}, ** is \pinf{0.01}, and * is \pinf{0.05}.
|
||||
Higher is better for \textbf{(a)} vibrotactile rendering rating, \textbf{(c)} usefulness and \textbf{(c)} fatigue.
|
||||
Lower is better for \textbf{(d)} workload.
|
||||
Higher is better for \textbf{(a)} vibrotactile rendering rating, \textbf{(c)} usefulness and \textbf{(d)} realism.
|
||||
Lower is better for \textbf{(b)} workload.
|
||||
]
|
||||
\subfig[0.24]{results/Question-Vibration Rating-Positioning-Overall}
|
||||
\subfig[0.24]{results/Question-Workload-Positioning-Overall}
|
||||
|
||||
@@ -26,7 +26,7 @@ This seemed inversely correlated with the performance, except for the \level{Now
|
||||
|
||||
Considering the two tasks, no clear difference in performance or appreciation was found between the two contact vibration techniques.
|
||||
While the majority of participants discriminated the two different techniques, only a minority identified them correctly (\secref{technique_results}).
|
||||
It seemed that the Impact technique was sufficient to provide contact information compared to the \level{Distance} technique, which provided additional feedback on interpenetration, as reported by participants.
|
||||
It seemed that the \level{Impact} technique was sufficient to provide contact information compared to the \level{Distance} technique, which provided additional feedback on interpenetration, as reported by participants.
|
||||
|
||||
No difference in performance was found between the two visual hand augmentations, except for the \level{Push} task, where the \level{Skeleton} hand rendering resulted again in longer contacts.
|
||||
Additionally, the \level{Skeleton} rendering was appreciated and perceived as more effective than having no visual hand augmentation, confirming the results of our \chapref{visual_hand}.
|
||||
@@ -46,5 +46,5 @@ On the one hand, participants behave differently when the haptic rendering was g
|
||||
This behavior has likely given them a better experience of the tasks and more confidence in their actions, as well as leading to a lower interpenetration/force applied to the cube \cite{pacchierotti2015cutaneous}.
|
||||
On the other hand, the unfamiliarity of the contralateral hand positioning (\level{Opposite}) caused participants to spend more time understanding the haptic stimuli, which might have made them more focused on performing the task.
|
||||
In terms of the contact vibration technique, the continuous vibration technique on the finger interpenetration (\level{Distance}) did not make a difference to performance, although it provided more information.
|
||||
Participants felt that vibration bursts were sufficient (\level{Distance}) to confirm contact with the virtual object.
|
||||
Participants felt that vibration bursts were sufficient (\level{Impact}) to confirm contact with the virtual object.
|
||||
Finally, it was interesting to note that the visual hand augmentation was appreciated but felt less necessary when provided together with vibrotactile hand rendering, as the latter was deemed sufficient for acknowledging the contact.
|
||||
|
||||
@@ -103,10 +103,10 @@ The role of visuo-haptic texture augmentation should also be evaluated in more c
|
||||
\paragraph{Specificities of Direct Touch.}
|
||||
|
||||
The haptic textures used were recordings and models of the vibrations of a hand-held probe sliding over real surfaces.
|
||||
We generated the vibrotactile textures only from finger speed \cite{culbertson2015should}, but the perceived roughness of real textures also depends on other factors such as the contact force, angle, posture or surface of the contact \cite{schafer2017transfer}.
|
||||
We generated the vibrotactile textures from velocity magnitude of the finger, but the perceived roughness of real textures also depends on other factors such as the contact force, angle, posture or surface of the contact \cite{schafer2017transfer}.
|
||||
The respective importance of these factors on the haptic texture perception is not yet fully understood \cite{richardson2022learning}.
|
||||
It would be interesting to determine the importance of these factors on the perceived realism of virtual vibrotactile textures in the context of bare finger touch.
|
||||
We finger based captures of real textures should also be considered \cite{balasubramanian2024sens3}.
|
||||
Finger based captures of real textures should also be considered \cite{balasubramanian2024sens3}.
|
||||
Finally, the virtual texture models should also be adaptable to individual sensitivities \cite{malvezzi2021design,young2020compensating}.
|
||||
|
||||
\subsection*{Visual Augmentation of the Hand for Manipulating virtual objects in AR}
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -5,7 +5,7 @@
|
||||
|
||||
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
||||
\enquote{Visuo-Haptic Rendering of the Hand during 3D Manipulation in Augmented Reality}.
|
||||
In: \textit{IEEE Transactions on Haptics}. 27.4 (2024), pp. 2481--2487.
|
||||
In: \textit{IEEE Transactions on Haptics (ToH)}. 27.4 (2024), pp. 2481--2487.
|
||||
%\textsc{doi}: \href{https://doi.org/10/gtqcfz}{10/gtqcfz}
|
||||
|
||||
\section*{International Conferences}
|
||||
@@ -18,5 +18,5 @@ In: \textit{EuroHaptics}. Lille, France, July 2024. pp. 469--484.
|
||||
\noindentskip
|
||||
\textbf{Erwan Normand}, Claudio Pacchierotti, Eric Marchand, and Maud Marchal.
|
||||
\enquote{How Different Is the Perception of Vibrotactile Texture Roughness in Augmented versus Virtual Reality?}.
|
||||
In: \textit{ACM Symposium on Virtual Reality Software and Technology}. Trier, Germany, October 2024. pp. 287--296.
|
||||
In: \textit{ACM Symposium on Virtual Reality Software and Technology (VRST)}. Trier, Germany, October 2024. pp. 287--296.
|
||||
%\textsc{doi}: \href{https://doi.org/10/g5rr49}{10/g5rr49}
|
||||
|
||||
@@ -3,9 +3,16 @@
|
||||
|
||||
\nochaptertoc
|
||||
|
||||
% Résumé vulgarisé en 700 caractères
|
||||
% Les dispositifs haptiques portables procurent des sensations tactiles tout en étant compacts.
|
||||
% Mais ils ont été peu utilisés en réalité augmentée (RA), où le contenu virtuel est intégré à la perception du monde réel.
|
||||
% Cette thèse explore leur potentiel pour améliorer les interactions de la main avec des objets virtuels en RA.
|
||||
% Nous étudions d'abord l'impact du rendu visuel sur la perception des textures vibrotactiles virtuelles qui augmentent des surfaces réelles touchées directement par le doigt.
|
||||
% Nous étudions ensuite comment des retours sensoriels visuo-haptiques augmentant la main améliorent les performances et l’expérience utilisateur lors de la manipulation d'objets virtuels en RA.
|
||||
|
||||
\selectlanguage{french}
|
||||
|
||||
Dans ce manuscrit de thèse, nous montrons comment la \emph{réalité augmentée (RA)}, qui intègre un contenu visuel virtuel dans la perception du monde réel, et l'\emph{haptique portable}, qui fournit des sensations tactiles sur la peau, peuvent améliorer l'interaction de la main avec des objets virtuels.
|
||||
Dans ce manuscrit de thèse, nous montrons comment la \emph{réalité augmentée (RA)}, qui intègre un contenu visuel virtuel dans la perception du monde réel, et l'\emph{haptique portable}, qui fournit des sensations tactiles sur la peau, peuvent améliorer les interactions de la main avec des objets virtuels et augmentés.
|
||||
Notre objectif est de permettre aux utilisateurs de percevoir et de manipuler des augmentations visuo-haptiques portables, comme si elles étaient réelles, directement avec leurs mains.
|
||||
|
||||
\section{Introduction}
|
||||
@@ -31,7 +38,7 @@ Un aspect important de l'illusion de la RA (et de la RV) est la \emph{plausibili
|
||||
Dans ce contexte, nous définissons un \emph{système de RA} comme l'ensemble des dispositifs matériels (dispositifs d'entrée, capteurs, affichages et dispositifs haptiques) et logiciels (suivi, simulation et rendu) qui permettent à l'utilisateur d'interagir avec l'environnement augmenté.
|
||||
Les casques de RA sont la technologie d'affichage la plus prometteuse, car ils sont portables, fournissent à l'utilisateur un environnement augmenté \emph{immersif} et laissent les mains libres pour interagir \cite{hertel2021taxonomy}.
|
||||
Un retour haptique est alors indispensable pour assurer une interaction plausible et cohérente avec le contenu visuel virtuel.
|
||||
C'est pourquoi l'haptique portable semble pRAticulièrement adaptée à la RA immersive.
|
||||
C'est pourquoi l'haptique portable semble particulièrement adaptée à la RA immersive.
|
||||
|
||||
\subsectionstarbookmark{Défis de la réalité augmentée visuo-haptique portable}
|
||||
|
||||
|
||||
@@ -31,6 +31,7 @@
|
||||
\begin{landscape}%
|
||||
\fig[#1][p]{#2}{#3}[#4]%
|
||||
\end{landscape}%
|
||||
\clearpage%
|
||||
}%
|
||||
}
|
||||
|
||||
@@ -141,7 +142,7 @@
|
||||
\usepackage{titletoc}
|
||||
\newcommand{\chaptertoc}{% Print the table of contents for the chapter
|
||||
\section*{Contents}% Add a section title
|
||||
\vspace{-1.5em}%
|
||||
\vspace{-2em}%
|
||||
\horizontalrule%
|
||||
|
||||
\startcontents% Start referencing the contents
|
||||
@@ -150,7 +151,7 @@
|
||||
|
||||
\vspace{-0.5em}%
|
||||
\horizontalrule%
|
||||
\vspace{2em}%
|
||||
%\vspace{2em}%
|
||||
}
|
||||
\newcommand{\nochaptertoc}{%
|
||||
\stopcontents%
|
||||
|
||||
BIN
covers.odt
BIN
covers.odt
Binary file not shown.
BIN
covers.pdf
BIN
covers.pdf
Binary file not shown.
@@ -1,3 +1,33 @@
|
||||
@incollection{abdulali2016datadriven,
|
||||
title = {Data-{{Driven Modeling}} of {{Anisotropic Haptic Textures}}: {{Data Segmentation}} and {{Interpolation}}},
|
||||
booktitle = {Haptics: {{Perception}}, {{Devices}}, {{Control}}, and {{Applications}}},
|
||||
author = {Abdulali, Arsen and Jeon, Seokhee},
|
||||
date = {2016},
|
||||
volume = {9775},
|
||||
pages = {228--239},
|
||||
isbn = {978-3-319-42323-4 978-3-319-42324-1}
|
||||
}
|
||||
|
||||
@inproceedings{abdulali2017sample,
|
||||
title = {Sample Selection of Multi-Trial Data for Data-Driven Haptic Texture Modeling},
|
||||
booktitle = {{{IEEE World Haptics Conf}}.},
|
||||
author = {Abdulali, Arsen and Hassan, Waseem and Jeon, Seokhee},
|
||||
date = {2017},
|
||||
pages = {66--71},
|
||||
doi = {10/g8t7zg},
|
||||
isbn = {978-1-5090-1425-5}
|
||||
}
|
||||
|
||||
@incollection{abdulali2018datadriven,
|
||||
title = {Data-{{Driven Rendering}} of {{Anisotropic Haptic Textures}}},
|
||||
booktitle = {Haptic {{Interaction}}},
|
||||
author = {Abdulali, Arsen and Jeon, Seokhee},
|
||||
date = {2018},
|
||||
volume = {432},
|
||||
pages = {401--407},
|
||||
isbn = {978-981-10-4156-3 978-981-10-4157-0}
|
||||
}
|
||||
|
||||
@inproceedings{achibet2017flexifingers,
|
||||
title = {{{FlexiFingers}}: {{Multi-finger}} Interaction in {{VR}} Combining Passive Haptics and Pseudo-Haptics},
|
||||
shorttitle = {{{FlexiFingers}}},
|
||||
@@ -2584,7 +2614,7 @@
|
||||
title = {Enlarging a {{Smartphone}} with {{AR}} to {{Create}} a {{Handheld VESAD}} ({{Virtually Extended Screen-Aligned Display}})},
|
||||
booktitle = {{{IEEE Int}}. {{Symp}}. {{Mix}}. {{Augment}}. {{Real}}.},
|
||||
author = {Normand, Erwan and McGuffin, Michael J.},
|
||||
date = {2018},
|
||||
date = {2018-10},
|
||||
pages = {123--133},
|
||||
doi = {10/gm7xsc}
|
||||
}
|
||||
@@ -2600,7 +2630,7 @@
|
||||
@article{normand2024visuohaptic,
|
||||
title = {Visuo-{{Haptic Rendering}} of the {{Hand}} during {{3D Manipulation}} in {{Augmented Reality}}},
|
||||
author = {Normand, Erwan and Pacchierotti, Claudio and Marchand, Eric and Marchal, Maud},
|
||||
date = {2024},
|
||||
date = {2024-01},
|
||||
journaltitle = {IEEE Trans. Haptics},
|
||||
volume = {17},
|
||||
number = {2},
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
[subrepo]
|
||||
remote = https://gitlab.inria.fr/whar/packages/latex-utils
|
||||
branch = main
|
||||
commit = 8efe944a5dad0c8d2f2b844a3ca7389b9e8966f7
|
||||
parent = d28555ffcb920d6a6148a8b484ef601f7a9a9fb2
|
||||
commit = eb2bf7a751f36bcd487fa8d6d186c7e8249187dc
|
||||
parent = f2ca2f13a8c32f7eb221b56b7cd0f349d9e7b182
|
||||
method = rebase
|
||||
cmdver = 0.4.6
|
||||
|
||||
Reference in New Issue
Block a user