From 913a35ef9dba248224b6c9d45e5d892361c1f5c9 Mon Sep 17 00:00:00 2001 From: Erwan Normand Date: Fri, 12 Jul 2024 10:05:37 +0200 Subject: [PATCH] Override subfigs commands --- 2-perception/ar-textures/2-experiment.tex | 10 +++-- 2-perception/xr-perception/2-method.tex | 20 ++++----- 2-perception/xr-perception/3-experiment.tex | 6 +-- 2-perception/xr-perception/4-results.tex | 45 +++++++++---------- 3-manipulation/visual-hand/1-introduction.tex | 12 ++--- 3-manipulation/visual-hand/2-method.tex | 13 +++--- 3-manipulation/visual-hand/3-3-ranks.tex | 4 +- 3-manipulation/visual-hand/3-results.tex | 14 +++--- 3-manipulation/visuo-haptic-hand/2-method.tex | 12 ++--- .../visuo-haptic-hand/3-results.tex | 8 ++-- config/thesis_commands.tex | 43 ++++++++++++++++++ 11 files changed, 112 insertions(+), 75 deletions(-) diff --git a/2-perception/ar-textures/2-experiment.tex b/2-perception/ar-textures/2-experiment.tex index 1bde343..a18316f 100644 --- a/2-perception/ar-textures/2-experiment.tex +++ b/2-perception/ar-textures/2-experiment.tex @@ -2,15 +2,17 @@ \label{sec:experiment} \begin{subfigs}{setup}{% - (Left) The nine visuo-haptic textures used in the user study, selected from the HaTT database~\autocite{culbertson2014one}. % + User Study. + }[% + \item The nine visuo-haptic textures used in the user study, selected from the HaTT database~\autocite{culbertson2014one}. % The texture names were never shown, so as to prevent the use of the user's visual or haptic memory of the textures. - (Middle) Experimental setup. % + \item Experimental setup. % Participant sat in front of the tangible surfaces, which were augmented with visual textures displayed by the HoloLens~2 AR headset and haptic roughness textures rendered by the vibrotactile haptic device placed on the middle index phalanx. % A webcam above the surfaces tracked the finger movements. - (Right) First person view of the user study, as seen through the immersive AR headset HoloLens~2. % + \item First person view of the user study, as seen through the immersive AR headset HoloLens~2. % The visual texture overlays are statically displayed on the surfaces, allowing the user to move around to view them from different angles. % The haptic roughness texture is generated based on HaTT data-driven texture models and finger speed, and it is rendered on the middle index phalanx as it slides on the considered surface.% - } + ] \subfig[0.32]{experiment/textures}% \subfig[0.32]{experiment/setup}% \subfig[0.32]{experiment/view}% diff --git a/2-perception/xr-perception/2-method.tex b/2-perception/xr-perception/2-method.tex index 60eb727..4a74866 100644 --- a/2-perception/xr-perception/2-method.tex +++ b/2-perception/xr-perception/2-method.tex @@ -44,20 +44,16 @@ The system is composed of three main components: the pose estimation of the trac \subsection{Pose Estimation and Virtual Environment Alignment} \label{sec:virtual_real_alignment} -\begin{subfigs}{setup}{% - Visuo-haptic texture rendering system setup. - % - (a) HapCoil-One voice-coil actuator with a fiducial marker on top attached to a participant's right index finger. % - % - (b) HoloLens~2 AR headset, the two cardboard masks to switch the real or virtual environments with the same field of view, and the 3D-printed piece for attaching the masks to the headset. % - % - (c) User exploring a virtual vibrotactile texture on a tangible sheet of paper. - } +\begin{subfigs}{setup}{Visuo-haptic texture rendering system setup}[% + \item HapCoil-One voice-coil actuator with a fiducial marker on top attached to a participant's right index finger. % + \item HoloLens~2 AR headset, the two cardboard masks to switch the real or virtual environments with the same field of view, and the 3D-printed piece for attaching the masks to the headset. % + \item User exploring a virtual vibrotactile texture on a tangible sheet of paper. + ] \hidesubcaption - \subfig[0.325]{method/device}[] - \subfig[0.65]{method/headset}[] + \subfig[0.325]{method/device} + \subfig[0.65]{method/headset} \par\vspace{2.5pt} - \subfig[0.992]{method/apparatus}[] + \subfig[0.992]{method/apparatus} \end{subfigs} A fiducial marker (AprilTag) is glued to the top of the actuator (see \figref{method/device}) to track the finger pose with a camera (StreamCam, Logitech) which is placed above the experimental setup and capturing \qtyproduct{1280 x 720}{px} images at \qty{60}{\hertz} (see \figref{method/apparatus}). diff --git a/2-perception/xr-perception/3-experiment.tex b/2-perception/xr-perception/3-experiment.tex index eb922cb..fc3e4f5 100644 --- a/2-perception/xr-perception/3-experiment.tex +++ b/2-perception/xr-perception/3-experiment.tex @@ -17,9 +17,9 @@ %The pictures are captured directly from the Microsoft HoloLens 2 headset. } \hidesubcaption - \subfig[0.32]{experiment/real}[] - \subfig[0.32]{experiment/mixed}[] - \subfig[0.32]{experiment/virtual}[] + \subfig[0.32]{experiment/real} + \subfig[0.32]{experiment/mixed} + \subfig[0.32]{experiment/virtual} \end{subfigswide} Our visuo-haptic rendering system, described in \secref{method}, allows free exploration of virtual vibrotactile textures on tangible surfaces directly touched with the bare finger to simulate roughness augmentation, while the visual rendering of the hand and environment can be controlled to be in AR or VR. diff --git a/2-perception/xr-perception/4-results.tex b/2-perception/xr-perception/4-results.tex index 4ed775d..09e0c56 100644 --- a/2-perception/xr-perception/4-results.tex +++ b/2-perception/xr-perception/4-results.tex @@ -36,20 +36,17 @@ All pairwise differences were statistically significant. \begin{subfigs}{discrimination_accuracy}{% Generalized Linear Mixed Model (GLMM) results in the vibrotactile texture roughness discrimination task, with non-parametric bootstrap 95\% confidence intervals. - % - (a) Percentage of trials in which the comparison texture was perceived as rougher than the reference texture, as a function of the amplitude difference between the two textures and the visual rendering. - % + }[% + \item Percentage of trials in which the comparison texture was perceived as rougher than the reference texture, as a function of the amplitude difference between the two textures and the visual rendering. Curves represent predictions from the GLMM (probit link function) and points are estimated marginal means. - % - (b) Estimated points of subjective equality (PSE) of each visual rendering. + \item Estimated points of subjective equality (PSE) of each visual rendering. %, defined as the amplitude difference at which both reference and comparison textures are perceived to be equivalent, \ie the accuracy in discriminating vibrotactile roughness. - % - (c) Estimated just-noticeable difference (JND) of each visual rendering. + \item Estimated just-noticeable difference (JND) of each visual rendering. %, defined as the minimum perceptual amplitude difference, \ie the sensitivity to vibrotactile roughness differences. - } - \subfig[0.85]{results/trial_predictions}[]\\ - \subfig[0.45]{results/trial_pses}[] - \subfig[0.45]{results/trial_jnds}[] + ] + \subfig[0.85]{results/trial_predictions}\\ + \subfig[0.45]{results/trial_pses} + \subfig[0.45]{results/trial_jnds} \end{subfigs} @@ -82,16 +79,14 @@ All pairwise differences were statistically significant: \level{Real} \vs \level \begin{subfigs}{results_finger}{% Boxplots and geometric means of response time at the end of a trial, and finger position and finger speed measures when exploring the comparison texture, with pairwise Tukey's HSD tests: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}. - % - (a) Response time of a trial. - % - (b) Distance traveled by the finger in a trial. - % - (b) Speed of the finger in a trial. - } - \subfig[0.32]{results/trial_response_times}[] - \subfig[0.32]{results/trial_distances}[] - \subfig[0.32]{results/trial_speeds}[] + }[% + \item Response time of a trial. + \item Distance traveled by the finger in a trial. + \item Speed of the finger in a trial. + ] + \subfig[0.32]{results/trial_response_times} + \subfig[0.32]{results/trial_distances} + \subfig[0.32]{results/trial_speeds} \end{subfigs} @@ -135,8 +130,8 @@ The vibrations were felt a slightly weak overall (\response{Vibration Strength}, \begin{subfigs}{question_plots}{% Boxplots of responses to questions with significant differences and pairwise Wilcoxon signed-rank tests with Holm-Bonferroni adjustment: * is \pinf{0.05}, ** is \pinf{0.01} and *** is \pinf{0.001}. } - \subfig[0.24]{results/questions_hand_ownership}[] - \subfig[0.24]{results/questions_hand_latency}[] - \subfig[0.24]{results/questions_hand_reference}[] - \subfig[0.24]{results/questions_hand_distraction}[] + \subfig[0.24]{results/questions_hand_ownership} + \subfig[0.24]{results/questions_hand_latency} + \subfig[0.24]{results/questions_hand_reference} + \subfig[0.24]{results/questions_hand_distraction} \end{subfigs} diff --git a/3-manipulation/visual-hand/1-introduction.tex b/3-manipulation/visual-hand/1-introduction.tex index 68d94e5..957dad7 100644 --- a/3-manipulation/visual-hand/1-introduction.tex +++ b/3-manipulation/visual-hand/1-introduction.tex @@ -13,12 +13,12 @@ fingers' joints and phalanges \emph{(Skeleton, Skel)}, and % semi-transparent 3D hand model \emph{(Mesh)}. } - \subfig[0.15]{method/hands-none}[None] - \subfig[0.15]{method/hands-occlusion}[Occlusion (Occl)] - \subfig[0.15]{method/hands-tips}[Tips] - \subfig[0.15]{method/hands-contour}[Contour (Cont)] - \subfig[0.15]{method/hands-skeleton}[Skeleton (Skel)] - \subfig[0.15]{method/hands-mesh}[Mesh] + \subfig[0.15]{method/hands-none}%[None] + \subfig[0.15]{method/hands-occlusion}%[Occlusion (Occl)] + \subfig[0.15]{method/hands-tips}%[Tips] + \subfig[0.15]{method/hands-contour}%[Contour (Cont)] + \subfig[0.15]{method/hands-skeleton}%[Skeleton (Skel)] + \subfig[0.15]{method/hands-mesh}%[Mesh] \end{subfigswide} Augmented reality (AR) integrates virtual content into our real-world surroundings, giving the illusion of one unique environment and promising natural and seamless interactions with real and virtual objects. diff --git a/3-manipulation/visual-hand/2-method.tex b/3-manipulation/visual-hand/2-method.tex index e371d21..83bdd79 100644 --- a/3-manipulation/visual-hand/2-method.tex +++ b/3-manipulation/visual-hand/2-method.tex @@ -76,15 +76,16 @@ It can be seen as a filled version of the Contour hand rendering, thus partially \label{sec:tasks} \begin{subfigs}{tasks}{% - Experiment \#1. The two manipulation tasks: % - (a) pushing a virtual cube along a table towards a target placed on the same surface; % - (b) grasping and lifting a virtual cube towards a target placed on a 20-cm-higher plane. % + Experiment \#1. The two manipulation tasks: +}[ + \item pushing a virtual cube along a table towards a target placed on the same surface; % + \item grasping and lifting a virtual cube towards a target placed on a 20-cm-higher plane. % Both pictures show the cube to manipulate in the middle (5-cm-edge and opaque) and the eight possible targets to reach (7-cm-edge volume and semi-transparent). % Only one target at a time was shown during the experiments.% - } - \subfig[0.23]{method/task-push}[Push task] - \subfig[0.23]{method/task-grasp}[Grasp task] +] + \subfig[0.23]{method/task-push} + \subfig[0.23]{method/task-grasp} \end{subfigs} Following the guidelines of \textcite{bergstrom2021how} for designing object manipulation tasks, we considered two variations of a 3D pick-and-place task, commonly found in interaction and manipulation studies~\autocite{prachyabrued2014visual, maisto2017evaluation, meli2018combining, blaga2017usability, vanveldhuizen2021effect}. diff --git a/3-manipulation/visual-hand/3-3-ranks.tex b/3-manipulation/visual-hand/3-3-ranks.tex index 4b781a1..9cb454f 100644 --- a/3-manipulation/visual-hand/3-3-ranks.tex +++ b/3-manipulation/visual-hand/3-3-ranks.tex @@ -8,8 +8,8 @@ % ** is \pinf{0.01} and * is \pinf{0.05}. } - \subfig[0.24]{results/Ranks-Push}[Push Task] - \subfig[0.24]{results/Ranks-Grasp}[Grasp Task] + \subfig[0.24]{results/Ranks-Push} + \subfig[0.24]{results/Ranks-Grasp} \end{subfigs} \figref{ranks} shows the ranking of each visual hand rendering for the Push and Grasp tasks. diff --git a/3-manipulation/visual-hand/3-results.tex b/3-manipulation/visual-hand/3-results.tex index 2cc8558..d288766 100644 --- a/3-manipulation/visual-hand/3-results.tex +++ b/3-manipulation/visual-hand/3-results.tex @@ -8,10 +8,10 @@ % and Tukey's HSD pairwise comparisons: *** is \pinf{0.001}, ** is \pinf{0.01}, and * is \pinf{0.05}. } - \subfig[0.24]{results/Push-CompletionTime-Hand-Overall-Means}[Time to complete a trial.] - \subfig[0.24]{results/Push-ContactsCount-Hand-Overall-Means}[Number of contacts with the cube.] + \subfig[0.24]{results/Push-CompletionTime-Hand-Overall-Means}%[Time to complete a trial.] + \subfig[0.24]{results/Push-ContactsCount-Hand-Overall-Means}%[Number of contacts with the cube.] \hspace*{10mm} - \subfig[0.24]{results/Push-MeanContactTime-Hand-Overall-Means}[Mean time spent on each contact.] + \subfig[0.24]{results/Push-MeanContactTime-Hand-Overall-Means}%[Mean time spent on each contact.] \end{subfigs} \begin{subfigswide}{grasp_results}{% @@ -21,10 +21,10 @@ % and Tukey's HSD pairwise comparisons: *** is \pinf{0.001}, ** is \pinf{0.01}, and * is \pinf{0.05}. } - \subfig[0.24]{results/Grasp-CompletionTime-Hand-Overall-Means}[Time to complete a trial.] - \subfig[0.24]{results/Grasp-ContactsCount-Hand-Overall-Means}[Number of contacts with the cube.] - \subfig[0.24]{results/Grasp-MeanContactTime-Hand-Overall-Means}[Mean time spent on each contact.] - \subfig[0.24]{results/Grasp-GripAperture-Hand-Overall-Means}[\centering Distance between thumb and the other fingertips when grasping.] + \subfig[0.24]{results/Grasp-CompletionTime-Hand-Overall-Means}%[Time to complete a trial.] + \subfig[0.24]{results/Grasp-ContactsCount-Hand-Overall-Means}%[Number of contacts with the cube.] + \subfig[0.24]{results/Grasp-MeanContactTime-Hand-Overall-Means}%[Mean time spent on each contact.] + \subfig[0.24]{results/Grasp-GripAperture-Hand-Overall-Means}%[\centering Distance between thumb and the other fingertips when grasping.] \end{subfigswide} Results of each trials measure were analyzed with a linear mixed model (LMM), with the order of the two manipulation tasks and the six visual hand renderings (Order), the visual hand renderings (Hand), the target volume position (Target), and their interactions as fixed effects and the Participant as random intercept. diff --git a/3-manipulation/visuo-haptic-hand/2-method.tex b/3-manipulation/visuo-haptic-hand/2-method.tex index 7f440d8..02f556a 100644 --- a/3-manipulation/visuo-haptic-hand/2-method.tex +++ b/3-manipulation/visuo-haptic-hand/2-method.tex @@ -79,8 +79,8 @@ Similarly, we designed the distance vibration technique (Dist) so that interpene Both pictures show the cube to manipulate in the middle (\qty{5}{\cm} and opaque) and the eight possible targets to reach (\qty{7}{\cm} cube and semi-transparent). % Only one target at a time was shown during the experiments.% } - \subfig[0.23]{method/task-push}[Push task] - \subfig[0.23]{method/task-grasp}[Grasp task] + \subfig[0.23]{method/task-push} + \subfig[0.23]{method/task-grasp} \end{subfigs} \begin{subfigswide}{push_results}{% @@ -90,10 +90,10 @@ Similarly, we designed the distance vibration technique (Dist) so that interpene % and Tukey's HSD pairwise comparisons: *** is \pinf{0.001}, ** is \pinf{0.01}, and * is \pinf{0.05}. } - \subfig[0.24]{results/Push-CompletionTime-Location-Overall-Means}[Time to complete a trial.] - \subfig[0.24]{results/Push-Contacts-Location-Overall-Means}[Number of contacts with the cube.] - \subfig[0.24]{results/Push-TimePerContact-Location-Overall-Means}[Mean time spent on each contact.] - \subfig[0.24]{results/Push-TimePerContact-Hand-Overall-Means}[Mean time spent on each contact.] + \subfig[0.24]{results/Push-CompletionTime-Location-Overall-Means}%[Time to complete a trial.] + \subfig[0.24]{results/Push-Contacts-Location-Overall-Means}%[Number of contacts with the cube.] + \subfig[0.24]{results/Push-TimePerContact-Location-Overall-Means}%[Mean time spent on each contact.] + \subfig[0.24]{results/Push-TimePerContact-Hand-Overall-Means}%[Mean time spent on each contact.] \end{subfigswide} We considered the same two tasks as in Experiment \#1, described in \secref[visual_hand]{tasks}, that we analyzed separately, considering four independent, within-subject variables: diff --git a/3-manipulation/visuo-haptic-hand/3-results.tex b/3-manipulation/visuo-haptic-hand/3-results.tex index dec541d..b168037 100644 --- a/3-manipulation/visuo-haptic-hand/3-results.tex +++ b/3-manipulation/visuo-haptic-hand/3-results.tex @@ -8,10 +8,10 @@ % and Tukey's HSD pairwise comparisons: *** is \pinf{0.001}, ** is \pinf{0.01}, and * is \pinf{0.05}. } - \subfig[0.24]{results/Grasp-CompletionTime-Location-Overall-Means}[Time to complete a trial.] - \subfig[0.24]{results/Grasp-Contacts-Location-Overall-Means}[Number of contacts with the cube.] - \subfig[0.24]{results/Grasp-TimePerContact-Location-Overall-Means}[Mean time spent on each contact.] - \subfig[0.24]{results/Grasp-GripAperture-Location-Overall-Means}[\centering Distance between thumb and the other fingertips when grasping.] + \subfig[0.24]{results/Grasp-CompletionTime-Location-Overall-Means}%[Time to complete a trial.] + \subfig[0.24]{results/Grasp-Contacts-Location-Overall-Means}%[Number of contacts with the cube.] + \subfig[0.24]{results/Grasp-TimePerContact-Location-Overall-Means}%[Mean time spent on each contact.] + \subfig[0.24]{results/Grasp-GripAperture-Location-Overall-Means}%[\centering Distance between thumb and the other fingertips when grasping.] \end{subfigswide} Results were analyzed similarly as for the first experiment (see \secref{results}). diff --git a/config/thesis_commands.tex b/config/thesis_commands.tex index 4129f0c..666fe8e 100644 --- a/config/thesis_commands.tex +++ b/config/thesis_commands.tex @@ -28,3 +28,46 @@ \newcommand{\partref}[1]{Part~\ref{#1}} \renewcommand{\secref}[2][\labelprefix]{Section~\ref{#1:sec:#2}} \renewcommand{\tabref}[2][\labelprefix]{Table~\ref{#1:tab:#2}} + +% Images +% example: +% \begin{subfigs}{label}{Fig title}[Subfig titles] +% \subfig{subfig1}% +% \subfig[1][htbp]{subfig2}[caption]% +% \end{subfigs} +% reference later with: \figref{label} +\RenewDocumentEnvironment{subfigs}{O{htbp} m m o}{% + \begin{figure}[#1]% + \centering% + }{% + \caption[#3]{% + #3% + \IfValueTF{#4}{ % voluntary space before the subfig titles + \begin{enumerate*}[label=\textbf{(\alph*)}]% + #4% + \end{enumerate*}% + }% + }% + \label{fig:#2}% + \end{figure}% +} + +\NewDocumentCommand{\subfigsheight}{m}{% + \setkeys{Gin}{height=#1}% +} + +\RenewDocumentCommand{\subfig}{o O{b} m O{}}{% + \hfill% + \subfloat[#4\label{fig:#3}]{% + \IfValueTF{#1}{% + \includegraphics[width=#1\linewidth, valign=#2, keepaspectratio]{#3}% + }{% + \includegraphics[valign=#2, keepaspectratio]{#3}% + }% + }% + \hfill% +} + +\captionsetup{% + labelfont={sf,bf,up}, % sans-serif, bold, upright +} \ No newline at end of file