diff --git "a/valset.json" "b/valset.json" --- "a/valset.json" +++ "b/valset.json" @@ -15,9 +15,9 @@ "Conditional Discriminator", "Transformer Encoders" ], - "figure_path": "./MISSQA/figures/0_2410.20359v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.20359v2_figure_2.png", "paperid": "2410.20359v2", - "paper_path": "./MISSQA/papers/2410.20359v2.json", + "paper_path": "./MISS-QA/papers/2410.20359v2.json", "figure_id": "2410.20359v2_figure_2.png", "caption": "Figure 2: Network architecture. During training, we introduce a GAN structure based on conditional denoising diffusion to capture the complex distribution of gesture sequences in a multi-step process, enabling larger sampling step sizes. During inference, we use large step sizes and fewer steps for sampling, according to the input audio control signal, to achieve fast, high-quality gesture sequences, thus supporting real-time tasks.", "qtype": "Design_Rationale" @@ -37,9 +37,9 @@ "masked_elements": [ "Posterior Sampling" ], - "figure_path": "./MISSQA/figures/1_2410.20359v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.20359v2_figure_2.png", "paperid": "2410.20359v2", - "paper_path": "./MISSQA/papers/2410.20359v2.json", + "paper_path": "./MISS-QA/papers/2410.20359v2.json", "figure_id": "2410.20359v2_figure_2.png", "caption": "Figure 2: Network architecture. During training, we introduce a GAN structure based on conditional denoising diffusion to capture the complex distribution of gesture sequences in a multi-step process, enabling larger sampling step sizes. During inference, we use large step sizes and fewer steps for sampling, according to the input audio control signal, to achieve fast, high-quality gesture sequences, thus supporting real-time tasks.", "qtype": "Design_Rationale" @@ -64,9 +64,9 @@ "masked_elements": [ "pre-trained LLM" ], - "figure_path": "./MISSQA/figures/0_2408.11330v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.11330v1_figure_1.png", "paperid": "2408.11330v1", - "paper_path": "./MISSQA/papers/2408.11330v1.json", + "paper_path": "./MISS-QA/papers/2408.11330v1.json", "figure_id": "2408.11330v1_figure_1.png", "caption": "Figure 1: Overview of the proposed LAPT. This framework consists of two stages. In the learning stage of design principles, LLM is driven by specific prompts to learn general design principles from a set of architectures. In the architecture search stage, the learned principles are applied to initialize the search space for each new task. Then, architectures found in the refined search space are used to update these principles, aiming to build the task-specific search space.", "qtype": "Design_Rationale" @@ -92,9 +92,9 @@ "masked_elements": [ "principle adaptation" ], - "figure_path": "./MISSQA/figures/1_2408.11330v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.11330v1_figure_1.png", "paperid": "2408.11330v1", - "paper_path": "./MISSQA/papers/2408.11330v1.json", + "paper_path": "./MISS-QA/papers/2408.11330v1.json", "figure_id": "2408.11330v1_figure_1.png", "caption": "Figure 1: Overview of the proposed LAPT. This framework consists of two stages. In the learning stage of design principles, LLM is driven by specific prompts to learn general design principles from a set of architectures. In the architecture search stage, the learned principles are applied to initialize the search space for each new task. Then, architectures found in the refined search space are used to update these principles, aiming to build the task-specific search space.", "qtype": "Design_Rationale" @@ -119,9 +119,9 @@ "masked_elements": [ "Prompt Architecture Implementation" ], - "figure_path": "./MISSQA/figures/2_2408.11330v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.11330v1_figure_1.png", "paperid": "2408.11330v1", - "paper_path": "./MISSQA/papers/2408.11330v1.json", + "paper_path": "./MISS-QA/papers/2408.11330v1.json", "figure_id": "2408.11330v1_figure_1.png", "caption": "Figure 1: Overview of the proposed LAPT. This framework consists of two stages. In the learning stage of design principles, LLM is driven by specific prompts to learn general design principles from a set of architectures. In the architecture search stage, the learned principles are applied to initialize the search space for each new task. Then, architectures found in the refined search space are used to update these principles, aiming to build the task-specific search space.", "qtype": "Implementation_Details" @@ -146,9 +146,9 @@ "masked_elements": [ "Principle Adaptation" ], - "figure_path": "./MISSQA/figures/3_2408.11330v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2408.11330v1_figure_1.png", "paperid": "2408.11330v1", - "paper_path": "./MISSQA/papers/2408.11330v1.json", + "paper_path": "./MISS-QA/papers/2408.11330v1.json", "figure_id": "2408.11330v1_figure_1.png", "caption": "Figure 1: Overview of the proposed LAPT. This framework consists of two stages. In the learning stage of design principles, LLM is driven by specific prompts to learn general design principles from a set of architectures. In the architecture search stage, the learned principles are applied to initialize the search space for each new task. Then, architectures found in the refined search space are used to update these principles, aiming to build the task-specific search space.", "qtype": "Implementation_Details" @@ -176,9 +176,9 @@ "Ground-aware Depth Assumption", "Virtual Camera Projection" ], - "figure_path": "./MISSQA/figures/0_2410.13864v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.13864v1_figure_2.png", "paperid": "2410.13864v1", - "paper_path": "./MISSQA/papers/2410.13864v1.json", + "paper_path": "./MISS-QA/papers/2410.13864v1.json", "figure_id": "2410.13864v1_figure_2.png", "caption": "Figure 2: Overview of UniDrive framework. We transform the input images into a unified virtual camera space to achieve universal driving perception. To estimate the depth of pixels in the virtual view for projection, we propose a ground-aware depth assumption strategy. To obtain the most effective virtual camera space for multiple real camera configurations, we propose a data-driven CMA-ES (Hansen, 2016) based optimization strategy. To evaluate the efficacy of our framework, we propose an automatic data generation platform in CARLA (Dosovitskiy et al., 2017).", "qtype": "Implementation_Details" @@ -207,9 +207,9 @@ "masked_elements": [ "Iterative Optimization" ], - "figure_path": "./MISSQA/figures/1_2410.13864v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.13864v1_figure_2.png", "paperid": "2410.13864v1", - "paper_path": "./MISSQA/papers/2410.13864v1.json", + "paper_path": "./MISS-QA/papers/2410.13864v1.json", "figure_id": "2410.13864v1_figure_2.png", "caption": "Figure 2: Overview of UniDrive framework. We transform the input images into a unified virtual camera space to achieve universal driving perception. To estimate the depth of pixels in the virtual view for projection, we propose a ground-aware depth assumption strategy. To obtain the most effective virtual camera space for multiple real camera configurations, we propose a data-driven CMA-ES (Hansen, 2016) based optimization strategy. To evaluate the efficacy of our framework, we propose an automatic data generation platform in CARLA (Dosovitskiy et al., 2017).", "qtype": "Implementation_Details" @@ -238,9 +238,9 @@ "Ground-aware Depth Assumption", "Minimizing Projection Error" ], - "figure_path": "./MISSQA/figures/2_2410.13864v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.13864v1_figure_2.png", "paperid": "2410.13864v1", - "paper_path": "./MISSQA/papers/2410.13864v1.json", + "paper_path": "./MISS-QA/papers/2410.13864v1.json", "figure_id": "2410.13864v1_figure_2.png", "caption": "Figure 2: Overview of UniDrive framework. We transform the input images into a unified virtual camera space to achieve universal driving perception. To estimate the depth of pixels in the virtual view for projection, we propose a ground-aware depth assumption strategy. To obtain the most effective virtual camera space for multiple real camera configurations, we propose a data-driven CMA-ES (Hansen, 2016) based optimization strategy. To evaluate the efficacy of our framework, we propose an automatic data generation platform in CARLA (Dosovitskiy et al., 2017).", "qtype": "Literature_Background" @@ -267,9 +267,9 @@ "masked_elements": [ "Adaptive Feature Refiner" ], - "figure_path": "./MISSQA/figures/0_2408.13712v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.13712v1_figure_2.png", "paperid": "2408.13712v1", - "paper_path": "./MISSQA/papers/2408.13712v1.json", + "paper_path": "./MISS-QA/papers/2408.13712v1.json", "figure_id": "2408.13712v1_figure_2.png", "caption": "Figure 2: The overall architecture of RMARN proposed in this article. The Global Pooling Similarity module directly calculates the cosine distance between the text feature sequence and the point cloud feature sequence after pooling, while the Riemann Local Similarity module considers the point cloud sequence and the text sequence as two fields on a manifold and calculates the similarity between any two token pairs. Among them, Tμ(Pi)superscriptsubscript𝑇𝜇subscript𝑃𝑖T_{\\mu}^{(P_{i})}italic_T start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_P start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT and Pμ(Qi)superscriptsubscript𝑃𝜇subscript𝑄𝑖P_{\\mu}^{(Q_{i})}italic_P start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_Q start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT represent the i-th token of the text and point cloud feature sequence, respectively.", "qtype": "Literature_Background" @@ -295,9 +295,9 @@ "masked_elements": [ "Riemann Local Similarity" ], - "figure_path": "./MISSQA/figures/1_2408.13712v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.13712v1_figure_2.png", "paperid": "2408.13712v1", - "paper_path": "./MISSQA/papers/2408.13712v1.json", + "paper_path": "./MISS-QA/papers/2408.13712v1.json", "figure_id": "2408.13712v1_figure_2.png", "caption": "Figure 2: The overall architecture of RMARN proposed in this article. The Global Pooling Similarity module directly calculates the cosine distance between the text feature sequence and the point cloud feature sequence after pooling, while the Riemann Local Similarity module considers the point cloud sequence and the text sequence as two fields on a manifold and calculates the similarity between any two token pairs. Among them, Tμ(Pi)superscriptsubscript𝑇𝜇subscript𝑃𝑖T_{\\mu}^{(P_{i})}italic_T start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_P start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT and Pμ(Qi)superscriptsubscript𝑃𝜇subscript𝑄𝑖P_{\\mu}^{(Q_{i})}italic_P start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_Q start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT represent the i-th token of the text and point cloud feature sequence, respectively.", "qtype": "Literature_Background" @@ -324,9 +324,9 @@ "masked_elements": [ "Low Rank Filter" ], - "figure_path": "./MISSQA/figures/2_2408.13712v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.13712v1_figure_2.png", "paperid": "2408.13712v1", - "paper_path": "./MISSQA/papers/2408.13712v1.json", + "paper_path": "./MISS-QA/papers/2408.13712v1.json", "figure_id": "2408.13712v1_figure_2.png", "caption": "Figure 2: The overall architecture of RMARN proposed in this article. The Global Pooling Similarity module directly calculates the cosine distance between the text feature sequence and the point cloud feature sequence after pooling, while the Riemann Local Similarity module considers the point cloud sequence and the text sequence as two fields on a manifold and calculates the similarity between any two token pairs. Among them, Tμ(Pi)superscriptsubscript𝑇𝜇subscript𝑃𝑖T_{\\mu}^{(P_{i})}italic_T start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_P start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT and Pμ(Qi)superscriptsubscript𝑃𝜇subscript𝑄𝑖P_{\\mu}^{(Q_{i})}italic_P start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_Q start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT represent the i-th token of the text and point cloud feature sequence, respectively.", "qtype": "Experimental_Results" @@ -352,9 +352,9 @@ "masked_elements": [ "Adaptive Feature Refiner" ], - "figure_path": "./MISSQA/figures/3_2408.13712v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2408.13712v1_figure_2.png", "paperid": "2408.13712v1", - "paper_path": "./MISSQA/papers/2408.13712v1.json", + "paper_path": "./MISS-QA/papers/2408.13712v1.json", "figure_id": "2408.13712v1_figure_2.png", "caption": "Figure 2: The overall architecture of RMARN proposed in this article. The Global Pooling Similarity module directly calculates the cosine distance between the text feature sequence and the point cloud feature sequence after pooling, while the Riemann Local Similarity module considers the point cloud sequence and the text sequence as two fields on a manifold and calculates the similarity between any two token pairs. Among them, Tμ(Pi)superscriptsubscript𝑇𝜇subscript𝑃𝑖T_{\\mu}^{(P_{i})}italic_T start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_P start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT and Pμ(Qi)superscriptsubscript𝑃𝜇subscript𝑄𝑖P_{\\mu}^{(Q_{i})}italic_P start_POSTSUBSCRIPT italic_μ end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_Q start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) end_POSTSUPERSCRIPT represent the i-th token of the text and point cloud feature sequence, respectively.", "qtype": "Experimental_Results" @@ -381,9 +381,9 @@ "masked_elements": [ "Execution-based Representative Sample Selection" ], - "figure_path": "./MISSQA/figures/0_2408.12159v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.12159v1_figure_1.png", "paperid": "2408.12159v1", - "paper_path": "./MISSQA/papers/2408.12159v1.json", + "paper_path": "./MISS-QA/papers/2408.12159v1.json", "figure_id": "2408.12159v1_figure_1.png", "caption": "Figure 1: The overview of SBLLM.", "qtype": "Experimental_Results" @@ -402,9 +402,9 @@ "masked_elements": [ "Execution-based Representative Sample Selection" ], - "figure_path": "./MISSQA/figures/1_2408.12159v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.12159v1_figure_1.png", "paperid": "2408.12159v1", - "paper_path": "./MISSQA/papers/2408.12159v1.json", + "paper_path": "./MISS-QA/papers/2408.12159v1.json", "figure_id": "2408.12159v1_figure_1.png", "caption": "Figure 1: The overview of SBLLM.", "qtype": "Others" @@ -423,9 +423,9 @@ "masked_elements": [ "Genetic Operator-inspired Chain-of-thought Prompting" ], - "figure_path": "./MISSQA/figures/2_2408.12159v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.12159v1_figure_1.png", "paperid": "2408.12159v1", - "paper_path": "./MISSQA/papers/2408.12159v1.json", + "paper_path": "./MISS-QA/papers/2408.12159v1.json", "figure_id": "2408.12159v1_figure_1.png", "caption": "Figure 1: The overview of SBLLM.", "qtype": "Others" @@ -450,9 +450,9 @@ "Foldseek", "protein 3Di sequences" ], - "figure_path": "./MISSQA/figures/0_2411.15215v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.15215v1_figure_1.png", "paperid": "2411.15215v1", - "paper_path": "./MISSQA/papers/2411.15215v1.json", + "paper_path": "./MISS-QA/papers/2411.15215v1.json", "figure_id": "2411.15215v1_figure_1.png", "caption": "Figure 1: Overview of the proposed hierarchical pre-training paradigm containing two stages. a, In stage I, S2ALM aims at general sequence-structure learning with protein sequences and structures. In stage II, S2ALM learns antibody specific multi-level knowledge using antibody sequences and structures. b, Masked Language Modeling (MLM) reconstructs the masked tokens based on the contextualized information. c, Sequence-Structure Matching (SSM) identifies the matching relationships between 1D and 3Di sequences. d, Cross-Level Reconstruction (CLR) reconstructs the corrupted tokens based on hybrid information from both 1D and 3Di sequences.", "qtype": "Others" @@ -472,9 +472,9 @@ "masked_elements": [ "IgFold" ], - "figure_path": "./MISSQA/figures/1_2411.15215v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.15215v1_figure_1.png", "paperid": "2411.15215v1", - "paper_path": "./MISSQA/papers/2411.15215v1.json", + "paper_path": "./MISS-QA/papers/2411.15215v1.json", "figure_id": "2411.15215v1_figure_1.png", "caption": "Figure 1: Overview of the proposed hierarchical pre-training paradigm containing two stages. a, In stage I, S2ALM aims at general sequence-structure learning with protein sequences and structures. In stage II, S2ALM learns antibody specific multi-level knowledge using antibody sequences and structures. b, Masked Language Modeling (MLM) reconstructs the masked tokens based on the contextualized information. c, Sequence-Structure Matching (SSM) identifies the matching relationships between 1D and 3Di sequences. d, Cross-Level Reconstruction (CLR) reconstructs the corrupted tokens based on hybrid information from both 1D and 3Di sequences.", "qtype": "Others" @@ -502,9 +502,9 @@ "1D sequences", "3Di sequences" ], - "figure_path": "./MISSQA/figures/2_2411.15215v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.15215v1_figure_1.png", "paperid": "2411.15215v1", - "paper_path": "./MISSQA/papers/2411.15215v1.json", + "paper_path": "./MISS-QA/papers/2411.15215v1.json", "figure_id": "2411.15215v1_figure_1.png", "caption": "Figure 1: Overview of the proposed hierarchical pre-training paradigm containing two stages. a, In stage I, S2ALM aims at general sequence-structure learning with protein sequences and structures. In stage II, S2ALM learns antibody specific multi-level knowledge using antibody sequences and structures. b, Masked Language Modeling (MLM) reconstructs the masked tokens based on the contextualized information. c, Sequence-Structure Matching (SSM) identifies the matching relationships between 1D and 3Di sequences. d, Cross-Level Reconstruction (CLR) reconstructs the corrupted tokens based on hybrid information from both 1D and 3Di sequences.", "qtype": "Design_Rationale" @@ -532,9 +532,9 @@ "masked_elements": [ "Sequence-Structure Matching" ], - "figure_path": "./MISSQA/figures/3_2411.15215v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.15215v1_figure_1.png", "paperid": "2411.15215v1", - "paper_path": "./MISSQA/papers/2411.15215v1.json", + "paper_path": "./MISS-QA/papers/2411.15215v1.json", "figure_id": "2411.15215v1_figure_1.png", "caption": "Figure 1: Overview of the proposed hierarchical pre-training paradigm containing two stages. a, In stage I, S2ALM aims at general sequence-structure learning with protein sequences and structures. In stage II, S2ALM learns antibody specific multi-level knowledge using antibody sequences and structures. b, Masked Language Modeling (MLM) reconstructs the masked tokens based on the contextualized information. c, Sequence-Structure Matching (SSM) identifies the matching relationships between 1D and 3Di sequences. d, Cross-Level Reconstruction (CLR) reconstructs the corrupted tokens based on hybrid information from both 1D and 3Di sequences.", "qtype": "Design_Rationale" @@ -562,9 +562,9 @@ "masked_elements": [ "Low-rank Approx." ], - "figure_path": "./MISSQA/figures/0_2411.17150v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.17150v1_figure_1.png", "paperid": "2411.17150v1", - "paper_path": "./MISSQA/papers/2411.17150v1.json", + "paper_path": "./MISS-QA/papers/2411.17150v1.json", "figure_id": "2411.17150v1_figure_1.png", "caption": "Figure 1: \nWe present CASS, object-level Context-Aware training-free open-vocabulary Semantic Segmentation model. (a) Overview:\nOur method distills the vision foundation model’s (VFM) object-level contextual spectral graph into CLIP’s attention and refines query text embeddings towards object-specific semantics.\n(b) Object-Level Context:\nThis result illustrates how incorporating object-level context improves segmentation accuracy by unifying object-wise components into a user-defined object class; for example, (top row) our approach precisely segments the truck’s body, wheels, and cargo area and (bottom row) accurately groups elements into a single object, such as the sheep, whereas baselines [16, 25] often fail to achieve this unified segmentation.", "qtype": "Design_Rationale" @@ -591,9 +591,9 @@ "Text Embeddings", "Adjusted Text Embeddings" ], - "figure_path": "./MISSQA/figures/1_2411.17150v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.17150v1_figure_1.png", "paperid": "2411.17150v1", - "paper_path": "./MISSQA/papers/2411.17150v1.json", + "paper_path": "./MISS-QA/papers/2411.17150v1.json", "figure_id": "2411.17150v1_figure_1.png", "caption": "Figure 1: \nWe present CASS, object-level Context-Aware training-free open-vocabulary Semantic Segmentation model. (a) Overview:\nOur method distills the vision foundation model’s (VFM) object-level contextual spectral graph into CLIP’s attention and refines query text embeddings towards object-specific semantics.\n(b) Object-Level Context:\nThis result illustrates how incorporating object-level context improves segmentation accuracy by unifying object-wise components into a user-defined object class; for example, (top row) our approach precisely segments the truck’s body, wheels, and cargo area and (bottom row) accurately groups elements into a single object, such as the sheep, whereas baselines [16, 25] often fail to achieve this unified segmentation.", "qtype": "Design_Rationale" @@ -618,9 +618,9 @@ "masked_elements": [ "Graph Matching" ], - "figure_path": "./MISSQA/figures/2_2411.17150v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.17150v1_figure_1.png", "paperid": "2411.17150v1", - "paper_path": "./MISSQA/papers/2411.17150v1.json", + "paper_path": "./MISS-QA/papers/2411.17150v1.json", "figure_id": "2411.17150v1_figure_1.png", "caption": "Figure 1: \nWe present CASS, object-level Context-Aware training-free open-vocabulary Semantic Segmentation model. (a) Overview:\nOur method distills the vision foundation model’s (VFM) object-level contextual spectral graph into CLIP’s attention and refines query text embeddings towards object-specific semantics.\n(b) Object-Level Context:\nThis result illustrates how incorporating object-level context improves segmentation accuracy by unifying object-wise components into a user-defined object class; for example, (top row) our approach precisely segments the truck’s body, wheels, and cargo area and (bottom row) accurately groups elements into a single object, such as the sheep, whereas baselines [16, 25] often fail to achieve this unified segmentation.", "qtype": "Implementation_Details" @@ -639,9 +639,9 @@ "masked_elements": [ "Corrupt Module" ], - "figure_path": "./MISSQA/figures/0_2409.12730v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.12730v1_figure_2.png", "paperid": "2409.12730v1", - "paper_path": "./MISSQA/papers/2409.12730v1.json", + "paper_path": "./MISS-QA/papers/2409.12730v1.json", "figure_id": "2409.12730v1_figure_2.png", "caption": "Figure 2: An overview of proposed AEL.", "qtype": "Implementation_Details" @@ -667,9 +667,9 @@ "Sparse Gating Network", "Parent-AEs" ], - "figure_path": "./MISSQA/figures/1_2409.12730v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.12730v1_figure_2.png", "paperid": "2409.12730v1", - "paper_path": "./MISSQA/papers/2409.12730v1.json", + "paper_path": "./MISS-QA/papers/2409.12730v1.json", "figure_id": "2409.12730v1_figure_2.png", "caption": "Figure 2: An overview of proposed AEL.", "qtype": "Implementation_Details" @@ -694,9 +694,9 @@ "masked_elements": [ "Corrupt Module" ], - "figure_path": "./MISSQA/figures/2_2409.12730v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.12730v1_figure_2.png", "paperid": "2409.12730v1", - "paper_path": "./MISSQA/papers/2409.12730v1.json", + "paper_path": "./MISS-QA/papers/2409.12730v1.json", "figure_id": "2409.12730v1_figure_2.png", "caption": "Figure 2: An overview of proposed AEL.", "qtype": "Literature_Background" @@ -723,9 +723,9 @@ "Sparse Gating Network", "Parent-AEs" ], - "figure_path": "./MISSQA/figures/3_2409.12730v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.12730v1_figure_2.png", "paperid": "2409.12730v1", - "paper_path": "./MISSQA/papers/2409.12730v1.json", + "paper_path": "./MISS-QA/papers/2409.12730v1.json", "figure_id": "2409.12730v1_figure_2.png", "caption": "Figure 2: An overview of proposed AEL.", "qtype": "Literature_Background" @@ -751,9 +751,9 @@ "masked_elements": [ "Gated Fusion" ], - "figure_path": "./MISSQA/figures/0_2408.08709v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.08709v1_figure_2.png", "paperid": "2408.08709v1", - "paper_path": "./MISSQA/papers/2408.08709v1.json", + "paper_path": "./MISS-QA/papers/2408.08709v1.json", "figure_id": "2408.08709v1_figure_2.png", "caption": "Figure 2: The overall query-based entity-object transformer architecture.", "qtype": "Literature_Background" @@ -778,9 +778,9 @@ "masked_elements": [ "Transformer Decoder" ], - "figure_path": "./MISSQA/figures/1_2408.08709v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.08709v1_figure_2.png", "paperid": "2408.08709v1", - "paper_path": "./MISSQA/papers/2408.08709v1.json", + "paper_path": "./MISS-QA/papers/2408.08709v1.json", "figure_id": "2408.08709v1_figure_2.png", "caption": "Figure 2: The overall query-based entity-object transformer architecture.", "qtype": "Literature_Background" @@ -807,9 +807,9 @@ "gated fusion", "selective attention" ], - "figure_path": "./MISSQA/figures/2_2408.08709v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.08709v1_figure_2.png", "paperid": "2408.08709v1", - "paper_path": "./MISSQA/papers/2408.08709v1.json", + "paper_path": "./MISS-QA/papers/2408.08709v1.json", "figure_id": "2408.08709v1_figure_2.png", "caption": "Figure 2: The overall query-based entity-object transformer architecture.", "qtype": "Experimental_Results" @@ -830,9 +830,9 @@ "Camera Movement Perception", "Shot Size Perception" ], - "figure_path": "./MISSQA/figures/0_2411.10979v3_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.10979v3_figure_2.png", "paperid": "2411.10979v3", - "paper_path": "./MISSQA/papers/2411.10979v3.json", + "paper_path": "./MISS-QA/papers/2411.10979v3.json", "figure_id": "2411.10979v3_figure_2.png", "caption": "Figure 2: VidComposition comprises 15 categories of high-quality QA pairs, focusing on five aspects of compositions in compiled videos: cinematography, character, narrative, scene, and making. The correct answers are highlighted.", "qtype": "Experimental_Results" @@ -851,9 +851,9 @@ "masked_elements": [ "Network's Graph Representation" ], - "figure_path": "./MISSQA/figures/0_2407.08479v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.08479v1_figure_1.png", "paperid": "2407.08479v1", - "paper_path": "./MISSQA/papers/2407.08479v1.json", + "paper_path": "./MISS-QA/papers/2407.08479v1.json", "figure_id": "2407.08479v1_figure_1.png", "caption": "Figure 1. \nRobustGANTT generates schedules for backscatter networks using a GNN-based Transformer model.\nStep 1: collect MAC and routing protocol information. Step 2: build the IoT network s graph representation, only including edges strong enough for carrier provisioning (e.g., -75 dBm). Step 3: generate the schedule through iterative one-shot node classification. Step 4: disseminate the schedule using existing network flooding mechanisms and append it to the IoT device s normal schedule.", "qtype": "Others" @@ -874,9 +874,9 @@ "Cloud/Edge Server", "RobustGANTT" ], - "figure_path": "./MISSQA/figures/1_2407.08479v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.08479v1_figure_1.png", "paperid": "2407.08479v1", - "paper_path": "./MISSQA/papers/2407.08479v1.json", + "paper_path": "./MISS-QA/papers/2407.08479v1.json", "figure_id": "2407.08479v1_figure_1.png", "caption": "Figure 1. \nRobustGANTT generates schedules for backscatter networks using a GNN-based Transformer model.\nStep 1: collect MAC and routing protocol information. Step 2: build the IoT network s graph representation, only including edges strong enough for carrier provisioning (e.g., -75 dBm). Step 3: generate the schedule through iterative one-shot node classification. Step 4: disseminate the schedule using existing network flooding mechanisms and append it to the IoT device s normal schedule.", "qtype": "Others" @@ -897,9 +897,9 @@ "masked_elements": [ "Verifier" ], - "figure_path": "./MISSQA/figures/0_2407.06552v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.06552v1_figure_1.png", "paperid": "2407.06552v1", - "paper_path": "./MISSQA/papers/2407.06552v1.json", + "paper_path": "./MISS-QA/papers/2407.06552v1.json", "figure_id": "2407.06552v1_figure_1.png", "caption": "Figure 1: Overview of the proposed D L O V E𝐷𝐿𝑂𝑉𝐸DLOVEitalic_D italic_L italic_O italic_V italic_E attack leveraging Adversarial Machine Learning to a create well-crafted perturbation to overwrite the original watermark with the target watermark.", "qtype": "Design_Rationale" @@ -920,9 +920,9 @@ "masked_elements": [ "Unknown Encoder" ], - "figure_path": "./MISSQA/figures/1_2407.06552v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.06552v1_figure_1.png", "paperid": "2407.06552v1", - "paper_path": "./MISSQA/papers/2407.06552v1.json", + "paper_path": "./MISS-QA/papers/2407.06552v1.json", "figure_id": "2407.06552v1_figure_1.png", "caption": "Figure 1: Overview of the proposed D L O V E𝐷𝐿𝑂𝑉𝐸DLOVEitalic_D italic_L italic_O italic_V italic_E attack leveraging Adversarial Machine Learning to a create well-crafted perturbation to overwrite the original watermark with the target watermark.", "qtype": "Implementation_Details" @@ -950,9 +950,9 @@ "EVE", "Well-Crafted Perturbations" ], - "figure_path": "./MISSQA/figures/2_2407.06552v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.06552v1_figure_1.png", "paperid": "2407.06552v1", - "paper_path": "./MISSQA/papers/2407.06552v1.json", + "paper_path": "./MISS-QA/papers/2407.06552v1.json", "figure_id": "2407.06552v1_figure_1.png", "caption": "Figure 1: Overview of the proposed D L O V E𝐷𝐿𝑂𝑉𝐸DLOVEitalic_D italic_L italic_O italic_V italic_E attack leveraging Adversarial Machine Learning to a create well-crafted perturbation to overwrite the original watermark with the target watermark.", "qtype": "Implementation_Details" @@ -979,9 +979,9 @@ "Expert-guided", "Prompts For Finetuning" ], - "figure_path": "./MISSQA/figures/0_2409.03946v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.03946v1_figure_1.png", "paperid": "2409.03946v1", - "paper_path": "./MISSQA/papers/2409.03946v1.json", + "paper_path": "./MISS-QA/papers/2409.03946v1.json", "figure_id": "2409.03946v1_figure_1.png", "caption": "Figure 1: An overview of our approach for LLM-based tabular data generation. Our contributions include designing new prompt construction strategies and investigating their role in improving the quality of synthesized samples.", "qtype": "Implementation_Details" @@ -1006,9 +1006,9 @@ "masked_elements": [ "Novel-mapping" ], - "figure_path": "./MISSQA/figures/1_2409.03946v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.03946v1_figure_1.png", "paperid": "2409.03946v1", - "paper_path": "./MISSQA/papers/2409.03946v1.json", + "paper_path": "./MISS-QA/papers/2409.03946v1.json", "figure_id": "2409.03946v1_figure_1.png", "caption": "Figure 1: An overview of our approach for LLM-based tabular data generation. Our contributions include designing new prompt construction strategies and investigating their role in improving the quality of synthesized samples.", "qtype": "Implementation_Details" @@ -1034,9 +1034,9 @@ "LLM-guided", "Expert-guided" ], - "figure_path": "./MISSQA/figures/2_2409.03946v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2409.03946v1_figure_1.png", "paperid": "2409.03946v1", - "paper_path": "./MISSQA/papers/2409.03946v1.json", + "paper_path": "./MISS-QA/papers/2409.03946v1.json", "figure_id": "2409.03946v1_figure_1.png", "caption": "Figure 1: An overview of our approach for LLM-based tabular data generation. Our contributions include designing new prompt construction strategies and investigating their role in improving the quality of synthesized samples.", "qtype": "Literature_Background" @@ -1057,9 +1057,9 @@ "LLM", "Random Forest/Decision Tree Models" ], - "figure_path": "./MISSQA/figures/3_2409.03946v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2409.03946v1_figure_1.png", "paperid": "2409.03946v1", - "paper_path": "./MISSQA/papers/2409.03946v1.json", + "paper_path": "./MISS-QA/papers/2409.03946v1.json", "figure_id": "2409.03946v1_figure_1.png", "caption": "Figure 1: An overview of our approach for LLM-based tabular data generation. Our contributions include designing new prompt construction strategies and investigating their role in improving the quality of synthesized samples.", "qtype": "Literature_Background" @@ -1080,9 +1080,9 @@ "Reference Selector", "Offline MCP" ], - "figure_path": "./MISSQA/figures/0_2410.13817v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.13817v1_figure_2.png", "paperid": "2410.13817v1", - "paper_path": "./MISSQA/papers/2410.13817v1.json", + "paper_path": "./MISS-QA/papers/2410.13817v1.json", "figure_id": "2410.13817v1_figure_2.png", "caption": "Figure 2: The loco-manipulation planner [10] generates references in the form of multi-modal plans consisting of continuous trajectories 𝐗∗superscript𝐗\\bf{X}^{*}bold_X start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and manipulation schedules 𝐌∗superscript𝐌\\bf{M}^{*}bold_M start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT. These are used by the demonstration-guided controller to select 𝐱∗superscript𝐱\\bf{x}^{*}bold_x start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and 𝐦∗superscript𝐦\\bf{m}^{*}bold_m start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT adaptively based on the task phase ϕitalic-ϕ\\phiitalic_ϕ and track them robustly.The controller receives full-state feedback and sends joint position commands to the robot.", "qtype": "Literature_Background" @@ -1109,9 +1109,9 @@ "Adaptive Phase Dynamics", "Policy" ], - "figure_path": "./MISSQA/figures/1_2410.13817v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.13817v1_figure_2.png", "paperid": "2410.13817v1", - "paper_path": "./MISSQA/papers/2410.13817v1.json", + "paper_path": "./MISS-QA/papers/2410.13817v1.json", "figure_id": "2410.13817v1_figure_2.png", "caption": "Figure 2: The loco-manipulation planner [10] generates references in the form of multi-modal plans consisting of continuous trajectories 𝐗∗superscript𝐗\\bf{X}^{*}bold_X start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and manipulation schedules 𝐌∗superscript𝐌\\bf{M}^{*}bold_M start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT. These are used by the demonstration-guided controller to select 𝐱∗superscript𝐱\\bf{x}^{*}bold_x start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and 𝐦∗superscript𝐦\\bf{m}^{*}bold_m start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT adaptively based on the task phase ϕitalic-ϕ\\phiitalic_ϕ and track them robustly.The controller receives full-state feedback and sends joint position commands to the robot.", "qtype": "Literature_Background" @@ -1138,9 +1138,9 @@ "Adaptive Phase Dynamics", "Policy" ], - "figure_path": "./MISSQA/figures/2_2410.13817v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.13817v1_figure_2.png", "paperid": "2410.13817v1", - "paper_path": "./MISSQA/papers/2410.13817v1.json", + "paper_path": "./MISS-QA/papers/2410.13817v1.json", "figure_id": "2410.13817v1_figure_2.png", "caption": "Figure 2: The loco-manipulation planner [10] generates references in the form of multi-modal plans consisting of continuous trajectories 𝐗∗superscript𝐗\\bf{X}^{*}bold_X start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and manipulation schedules 𝐌∗superscript𝐌\\bf{M}^{*}bold_M start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT. These are used by the demonstration-guided controller to select 𝐱∗superscript𝐱\\bf{x}^{*}bold_x start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and 𝐦∗superscript𝐦\\bf{m}^{*}bold_m start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT adaptively based on the task phase ϕitalic-ϕ\\phiitalic_ϕ and track them robustly.The controller receives full-state feedback and sends joint position commands to the robot.", "qtype": "Literature_Background" @@ -1166,9 +1166,9 @@ "masked_elements": [ "Policy" ], - "figure_path": "./MISSQA/figures/3_2410.13817v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.13817v1_figure_2.png", "paperid": "2410.13817v1", - "paper_path": "./MISSQA/papers/2410.13817v1.json", + "paper_path": "./MISS-QA/papers/2410.13817v1.json", "figure_id": "2410.13817v1_figure_2.png", "caption": "Figure 2: The loco-manipulation planner [10] generates references in the form of multi-modal plans consisting of continuous trajectories 𝐗∗superscript𝐗\\bf{X}^{*}bold_X start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and manipulation schedules 𝐌∗superscript𝐌\\bf{M}^{*}bold_M start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT. These are used by the demonstration-guided controller to select 𝐱∗superscript𝐱\\bf{x}^{*}bold_x start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT and 𝐦∗superscript𝐦\\bf{m}^{*}bold_m start_POSTSUPERSCRIPT ∗ end_POSTSUPERSCRIPT adaptively based on the task phase ϕitalic-ϕ\\phiitalic_ϕ and track them robustly.The controller receives full-state feedback and sends joint position commands to the robot.", "qtype": "Experimental_Results" @@ -1195,9 +1195,9 @@ "masked_elements": [ "Metric monitoring & logging" ], - "figure_path": "./MISSQA/figures/1_2407.12797v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.12797v1_figure_1.png", "paperid": "2407.12797v1", - "paper_path": "./MISSQA/papers/2407.12797v1.json", + "paper_path": "./MISS-QA/papers/2407.12797v1.json", "figure_id": "2407.12797v1_figure_1.png", "caption": "Figure 1: Workflow of benchmarking LLM pipelines using CEBench.", "qtype": "Experimental_Results" @@ -1217,9 +1217,9 @@ "masked_elements": [ "VectorDB" ], - "figure_path": "./MISSQA/figures/2_2407.12797v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.12797v1_figure_1.png", "paperid": "2407.12797v1", - "paper_path": "./MISSQA/papers/2407.12797v1.json", + "paper_path": "./MISS-QA/papers/2407.12797v1.json", "figure_id": "2407.12797v1_figure_1.png", "caption": "Figure 1: Workflow of benchmarking LLM pipelines using CEBench.", "qtype": "Others" @@ -1238,9 +1238,9 @@ "masked_elements": [ "Inference Interface" ], - "figure_path": "./MISSQA/figures/3_2407.12797v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.12797v1_figure_1.png", "paperid": "2407.12797v1", - "paper_path": "./MISSQA/papers/2407.12797v1.json", + "paper_path": "./MISS-QA/papers/2407.12797v1.json", "figure_id": "2407.12797v1_figure_1.png", "caption": "Figure 1: Workflow of benchmarking LLM pipelines using CEBench.", "qtype": "Others" @@ -1259,9 +1259,9 @@ "masked_elements": [ "Patch Localization" ], - "figure_path": "./MISSQA/figures/0_2409.09406v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.09406v1_figure_2.png", "paperid": "2409.09406v1", - "paper_path": "./MISSQA/papers/2409.09406v1.json", + "paper_path": "./MISS-QA/papers/2409.09406v1.json", "figure_id": "2409.09406v1_figure_2.png", "caption": "Figure 2: Pipeline of DIFFender.\nDIFFender utilizes a unified diffusion model to seamlessly coordinate the localization and restoration of adversarial patch attacks, integrating a prompt-tuning module to enable efficient and precise tuning.", "qtype": "Others" @@ -1288,9 +1288,9 @@ "masked_elements": [ "Prompt Tuning" ], - "figure_path": "./MISSQA/figures/1_2409.09406v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.09406v1_figure_2.png", "paperid": "2409.09406v1", - "paper_path": "./MISSQA/papers/2409.09406v1.json", + "paper_path": "./MISS-QA/papers/2409.09406v1.json", "figure_id": "2409.09406v1_figure_2.png", "caption": "Figure 2: Pipeline of DIFFender.\nDIFFender utilizes a unified diffusion model to seamlessly coordinate the localization and restoration of adversarial patch attacks, integrating a prompt-tuning module to enable efficient and precise tuning.", "qtype": "Others" @@ -1318,9 +1318,9 @@ "Patch Localization", "Patch Restoration" ], - "figure_path": "./MISSQA/figures/2_2409.09406v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.09406v1_figure_2.png", "paperid": "2409.09406v1", - "paper_path": "./MISSQA/papers/2409.09406v1.json", + "paper_path": "./MISS-QA/papers/2409.09406v1.json", "figure_id": "2409.09406v1_figure_2.png", "caption": "Figure 2: Pipeline of DIFFender.\nDIFFender utilizes a unified diffusion model to seamlessly coordinate the localization and restoration of adversarial patch attacks, integrating a prompt-tuning module to enable efficient and precise tuning.", "qtype": "Design_Rationale" @@ -1347,9 +1347,9 @@ "masked_elements": [ "IDC token" ], - "figure_path": "./MISSQA/figures/3_2409.09406v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.09406v1_figure_2.png", "paperid": "2409.09406v1", - "paper_path": "./MISSQA/papers/2409.09406v1.json", + "paper_path": "./MISS-QA/papers/2409.09406v1.json", "figure_id": "2409.09406v1_figure_2.png", "caption": "Figure 2: Pipeline of DIFFender.\nDIFFender utilizes a unified diffusion model to seamlessly coordinate the localization and restoration of adversarial patch attacks, integrating a prompt-tuning module to enable efficient and precise tuning.", "qtype": "Design_Rationale" @@ -1375,9 +1375,9 @@ "Global Retrieval", "Local Retrieval" ], - "figure_path": "./MISSQA/figures/0_2407.18614v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.18614v1_figure_1.png", "paperid": "2407.18614v1", - "paper_path": "./MISSQA/papers/2407.18614v1.json", + "paper_path": "./MISS-QA/papers/2407.18614v1.json", "figure_id": "2407.18614v1_figure_1.png", "caption": "Figure 1: Upper half illustrates pipeline of our proposed open framework for image-based fact verification: It comprises two phases: forgery identification and fact retrieval. The modules therein can be replaced with almost any open-source toolbox. Lower half shows examples of two specific forgery types.", "qtype": "Design_Rationale" @@ -1405,9 +1405,9 @@ "masked_elements": [ "Forgery Segmentation" ], - "figure_path": "./MISSQA/figures/1_2407.18614v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.18614v1_figure_1.png", "paperid": "2407.18614v1", - "paper_path": "./MISSQA/papers/2407.18614v1.json", + "paper_path": "./MISS-QA/papers/2407.18614v1.json", "figure_id": "2407.18614v1_figure_1.png", "caption": "Figure 1: Upper half illustrates pipeline of our proposed open framework for image-based fact verification: It comprises two phases: forgery identification and fact retrieval. The modules therein can be replaced with almost any open-source toolbox. Lower half shows examples of two specific forgery types.", "qtype": "Design_Rationale" @@ -1428,9 +1428,9 @@ "Forgery Detection", "Metadata" ], - "figure_path": "./MISSQA/figures/2_2407.18614v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.18614v1_figure_1.png", "paperid": "2407.18614v1", - "paper_path": "./MISSQA/papers/2407.18614v1.json", + "paper_path": "./MISS-QA/papers/2407.18614v1.json", "figure_id": "2407.18614v1_figure_1.png", "caption": "Figure 1: Upper half illustrates pipeline of our proposed open framework for image-based fact verification: It comprises two phases: forgery identification and fact retrieval. The modules therein can be replaced with almost any open-source toolbox. Lower half shows examples of two specific forgery types.", "qtype": "Implementation_Details" @@ -1457,9 +1457,9 @@ "Normal SDS", "Learned Jacobians" ], - "figure_path": "./MISSQA/figures/0_2409.07454v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.07454v1_figure_2.png", "paperid": "2409.07454v1", - "paper_path": "./MISSQA/papers/2409.07454v1.json", + "paper_path": "./MISS-QA/papers/2409.07454v1.json", "figure_id": "2409.07454v1_figure_2.png", "caption": "Figure 2: An overview of our DreamMesh that fully capitalizes on explicit 3D scene representation (triangle meshes) for text-to-3D generation in a coarse-to-fine scheme. In the first coarse stage, DreamMesh learns text-guided Jacobians matrices to deform a base mesh into the coarse mesh, and then textures it through a tuning-free process. In the second fine stage, both coarse mesh and texture are jointly optimized, yielding high-quality mesh with high-fidelity texture.", "qtype": "Implementation_Details" @@ -1485,9 +1485,9 @@ "Image-to-Image Refiner", "Differentiable Rendering Pipeline" ], - "figure_path": "./MISSQA/figures/1_2409.07454v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.07454v1_figure_2.png", "paperid": "2409.07454v1", - "paper_path": "./MISSQA/papers/2409.07454v1.json", + "paper_path": "./MISS-QA/papers/2409.07454v1.json", "figure_id": "2409.07454v1_figure_2.png", "caption": "Figure 2: An overview of our DreamMesh that fully capitalizes on explicit 3D scene representation (triangle meshes) for text-to-3D generation in a coarse-to-fine scheme. In the first coarse stage, DreamMesh learns text-guided Jacobians matrices to deform a base mesh into the coarse mesh, and then textures it through a tuning-free process. In the second fine stage, both coarse mesh and texture are jointly optimized, yielding high-quality mesh with high-fidelity texture.", "qtype": "Implementation_Details" @@ -1514,9 +1514,9 @@ "Normal SDS", "Learned Jacobians" ], - "figure_path": "./MISSQA/figures/2_2409.07454v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.07454v1_figure_2.png", "paperid": "2409.07454v1", - "paper_path": "./MISSQA/papers/2409.07454v1.json", + "paper_path": "./MISS-QA/papers/2409.07454v1.json", "figure_id": "2409.07454v1_figure_2.png", "caption": "Figure 2: An overview of our DreamMesh that fully capitalizes on explicit 3D scene representation (triangle meshes) for text-to-3D generation in a coarse-to-fine scheme. In the first coarse stage, DreamMesh learns text-guided Jacobians matrices to deform a base mesh into the coarse mesh, and then textures it through a tuning-free process. In the second fine stage, both coarse mesh and texture are jointly optimized, yielding high-quality mesh with high-fidelity texture.", "qtype": "Literature_Background" @@ -1545,9 +1545,9 @@ "Differentiable Rendering Pipeline", "Image-to-Image Refiner" ], - "figure_path": "./MISSQA/figures/3_2409.07454v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.07454v1_figure_2.png", "paperid": "2409.07454v1", - "paper_path": "./MISSQA/papers/2409.07454v1.json", + "paper_path": "./MISS-QA/papers/2409.07454v1.json", "figure_id": "2409.07454v1_figure_2.png", "caption": "Figure 2: An overview of our DreamMesh that fully capitalizes on explicit 3D scene representation (triangle meshes) for text-to-3D generation in a coarse-to-fine scheme. In the first coarse stage, DreamMesh learns text-guided Jacobians matrices to deform a base mesh into the coarse mesh, and then textures it through a tuning-free process. In the second fine stage, both coarse mesh and texture are jointly optimized, yielding high-quality mesh with high-fidelity texture.", "qtype": "Literature_Background" @@ -1576,9 +1576,9 @@ "masked_elements": [ "Pose Optimization" ], - "figure_path": "./MISSQA/figures/0_2407.14001v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.14001v2_figure_2.png", "paperid": "2407.14001v2", - "paper_path": "./MISSQA/papers/2407.14001v2.json", + "paper_path": "./MISS-QA/papers/2407.14001v2.json", "figure_id": "2407.14001v2_figure_2.png", "caption": "Figure 2: Overview of our proposed solution for the Craft Assembly Task. Given the RGB image of the target object, the visible parts are segmented and classified. Labeled template meshes of the detected class are retrieved from a prepared database and their pose are optimized through a differentiable renderer using the segmentation results as the target. The parts of the best aligned mesh are simplified to primitive shapes. Finally each part is matched with an object in the scene (input) using a search algorithm, generating the final Craft Proposal.", "qtype": "Literature_Background" @@ -1602,9 +1602,9 @@ "masked_elements": [ "Primitive shape proposal" ], - "figure_path": "./MISSQA/figures/1_2407.14001v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2407.14001v2_figure_2.png", "paperid": "2407.14001v2", - "paper_path": "./MISSQA/papers/2407.14001v2.json", + "paper_path": "./MISS-QA/papers/2407.14001v2.json", "figure_id": "2407.14001v2_figure_2.png", "caption": "Figure 2: Overview of our proposed solution for the Craft Assembly Task. Given the RGB image of the target object, the visible parts are segmented and classified. Labeled template meshes of the detected class are retrieved from a prepared database and their pose are optimized through a differentiable renderer using the segmentation results as the target. The parts of the best aligned mesh are simplified to primitive shapes. Finally each part is matched with an object in the scene (input) using a search algorithm, generating the final Craft Proposal.", "qtype": "Literature_Background" @@ -1625,9 +1625,9 @@ "Part Segmentation", "Pose Optimization" ], - "figure_path": "./MISSQA/figures/2_2407.14001v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2407.14001v2_figure_2.png", "paperid": "2407.14001v2", - "paper_path": "./MISSQA/papers/2407.14001v2.json", + "paper_path": "./MISS-QA/papers/2407.14001v2.json", "figure_id": "2407.14001v2_figure_2.png", "caption": "Figure 2: Overview of our proposed solution for the Craft Assembly Task. Given the RGB image of the target object, the visible parts are segmented and classified. Labeled template meshes of the detected class are retrieved from a prepared database and their pose are optimized through a differentiable renderer using the segmentation results as the target. The parts of the best aligned mesh are simplified to primitive shapes. Finally each part is matched with an object in the scene (input) using a search algorithm, generating the final Craft Proposal.", "qtype": "Experimental_Results" @@ -1647,9 +1647,9 @@ "masked_elements": [ "Scene Matching" ], - "figure_path": "./MISSQA/figures/3_2407.14001v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2407.14001v2_figure_2.png", "paperid": "2407.14001v2", - "paper_path": "./MISSQA/papers/2407.14001v2.json", + "paper_path": "./MISS-QA/papers/2407.14001v2.json", "figure_id": "2407.14001v2_figure_2.png", "caption": "Figure 2: Overview of our proposed solution for the Craft Assembly Task. Given the RGB image of the target object, the visible parts are segmented and classified. Labeled template meshes of the detected class are retrieved from a prepared database and their pose are optimized through a differentiable renderer using the segmentation results as the target. The parts of the best aligned mesh are simplified to primitive shapes. Finally each part is matched with an object in the scene (input) using a search algorithm, generating the final Craft Proposal.", "qtype": "Experimental_Results" @@ -1671,9 +1671,9 @@ "public data", "contrastive learning" ], - "figure_path": "./MISSQA/figures/0_2407.16289v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.16289v1_figure_1.png", "paperid": "2407.16289v1", - "paper_path": "./MISSQA/papers/2407.16289v1.json", + "paper_path": "./MISS-QA/papers/2407.16289v1.json", "figure_id": "2407.16289v1_figure_1.png", "caption": "Figure 1: Pipelines of federated learning-based face recognition methods including our proposed method. (a) The server collects class embedding of client c (e.g. FedFace). (b) Client c continuously downloads public data from the server (e.g. FedFR). (c) Our proposed method(FedFS), client c performs intra-subject self-supervised learning without any additional work.", "qtype": "Experimental_Results" @@ -1699,9 +1699,9 @@ "masked_elements": [ "intra-subject self-supervised learning" ], - "figure_path": "./MISSQA/figures/1_2407.16289v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.16289v1_figure_1.png", "paperid": "2407.16289v1", - "paper_path": "./MISSQA/papers/2407.16289v1.json", + "paper_path": "./MISS-QA/papers/2407.16289v1.json", "figure_id": "2407.16289v1_figure_1.png", "caption": "Figure 1: Pipelines of federated learning-based face recognition methods including our proposed method. (a) The server collects class embedding of client c (e.g. FedFace). (b) Client c continuously downloads public data from the server (e.g. FedFR). (c) Our proposed method(FedFS), client c performs intra-subject self-supervised learning without any additional work.", "qtype": "Experimental_Results" @@ -1728,9 +1728,9 @@ "masked_elements": [ "intra-subject self-supervised learning" ], - "figure_path": "./MISSQA/figures/2_2407.16289v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.16289v1_figure_1.png", "paperid": "2407.16289v1", - "paper_path": "./MISSQA/papers/2407.16289v1.json", + "paper_path": "./MISS-QA/papers/2407.16289v1.json", "figure_id": "2407.16289v1_figure_1.png", "caption": "Figure 1: Pipelines of federated learning-based face recognition methods including our proposed method. (a) The server collects class embedding of client c (e.g. FedFace). (b) Client c continuously downloads public data from the server (e.g. FedFR). (c) Our proposed method(FedFS), client c performs intra-subject self-supervised learning without any additional work.", "qtype": "Others" @@ -1755,9 +1755,9 @@ "masked_elements": [ "download public data" ], - "figure_path": "./MISSQA/figures/3_2407.16289v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.16289v1_figure_1.png", "paperid": "2407.16289v1", - "paper_path": "./MISSQA/papers/2407.16289v1.json", + "paper_path": "./MISS-QA/papers/2407.16289v1.json", "figure_id": "2407.16289v1_figure_1.png", "caption": "Figure 1: Pipelines of federated learning-based face recognition methods including our proposed method. (a) The server collects class embedding of client c (e.g. FedFace). (b) Client c continuously downloads public data from the server (e.g. FedFR). (c) Our proposed method(FedFS), client c performs intra-subject self-supervised learning without any additional work.", "qtype": "Others" @@ -1776,9 +1776,9 @@ "masked_elements": [ "EEG Diffusion Transformer" ], - "figure_path": "./MISSQA/figures/0_2407.20253v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.20253v2_figure_1.png", "paperid": "2407.20253v2", - "paper_path": "./MISSQA/papers/2407.20253v2.json", + "paper_path": "./MISS-QA/papers/2407.20253v2.json", "figure_id": "2407.20253v2_figure_1.png", "caption": "Figure 1: The Illustration of the Proposed Method.", "qtype": "Others" @@ -1798,9 +1798,9 @@ "masked_elements": [ "Label Smooth" ], - "figure_path": "./MISSQA/figures/1_2407.20253v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.20253v2_figure_1.png", "paperid": "2407.20253v2", - "paper_path": "./MISSQA/papers/2407.20253v2.json", + "paper_path": "./MISS-QA/papers/2407.20253v2.json", "figure_id": "2407.20253v2_figure_1.png", "caption": "Figure 1: The Illustration of the Proposed Method.", "qtype": "Others" @@ -1826,9 +1826,9 @@ "masked_elements": [ "Generated-Original Signals Reassemble" ], - "figure_path": "./MISSQA/figures/2_2407.20253v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.20253v2_figure_1.png", "paperid": "2407.20253v2", - "paper_path": "./MISSQA/papers/2407.20253v2.json", + "paper_path": "./MISS-QA/papers/2407.20253v2.json", "figure_id": "2407.20253v2_figure_1.png", "caption": "Figure 1: The Illustration of the Proposed Method.", "qtype": "Design_Rationale" @@ -1856,9 +1856,9 @@ "supervised prompt learning", "self-supervised prompt learning" ], - "figure_path": "./MISSQA/figures/0_2408.05775v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.05775v1_figure_1.png", "paperid": "2408.05775v1", - "paper_path": "./MISSQA/papers/2408.05775v1.json", + "paper_path": "./MISS-QA/papers/2408.05775v1.json", "figure_id": "2408.05775v1_figure_1.png", "caption": "Figure 1: TPT versus Self-TPT. (a) TPT learns prompts from source data (stage 1), then adapts them to individual samples for prediction (stages 2&3). (b) Self-TPT employs text-oriented self-supervised learning (SSL) for joint training (stage 1) and for new class adaptation (stage 2), followed by direct predictions for each image (stage 3). (c) We present the frame per second (FPS) and graphics memory usage for each method when applied to CLIP-B/16 using the same A100-80G GPU. The y-axis represents the average cross-dataset accuracy.", "qtype": "Design_Rationale" @@ -1882,9 +1882,9 @@ "supervised and self-supervised prompt learning", "source data training" ], - "figure_path": "./MISSQA/figures/1_2408.05775v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.05775v1_figure_1.png", "paperid": "2408.05775v1", - "paper_path": "./MISSQA/papers/2408.05775v1.json", + "paper_path": "./MISS-QA/papers/2408.05775v1.json", "figure_id": "2408.05775v1_figure_1.png", "caption": "Figure 1: TPT versus Self-TPT. (a) TPT learns prompts from source data (stage 1), then adapts them to individual samples for prediction (stages 2&3). (b) Self-TPT employs text-oriented self-supervised learning (SSL) for joint training (stage 1) and for new class adaptation (stage 2), followed by direct predictions for each image (stage 3). (c) We present the frame per second (FPS) and graphics memory usage for each method when applied to CLIP-B/16 using the same A100-80G GPU. The y-axis represents the average cross-dataset accuracy.", "qtype": "Implementation_Details" @@ -1910,9 +1910,9 @@ "masked_elements": [ "test-time prompt tuning on predefined class names" ], - "figure_path": "./MISSQA/figures/2_2408.05775v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.05775v1_figure_1.png", "paperid": "2408.05775v1", - "paper_path": "./MISSQA/papers/2408.05775v1.json", + "paper_path": "./MISS-QA/papers/2408.05775v1.json", "figure_id": "2408.05775v1_figure_1.png", "caption": "Figure 1: TPT versus Self-TPT. (a) TPT learns prompts from source data (stage 1), then adapts them to individual samples for prediction (stages 2&3). (b) Self-TPT employs text-oriented self-supervised learning (SSL) for joint training (stage 1) and for new class adaptation (stage 2), followed by direct predictions for each image (stage 3). (c) We present the frame per second (FPS) and graphics memory usage for each method when applied to CLIP-B/16 using the same A100-80G GPU. The y-axis represents the average cross-dataset accuracy.", "qtype": "Implementation_Details" @@ -1938,9 +1938,9 @@ "Semantic Unifier", "Semantic Features" ], - "figure_path": "./MISSQA/figures/0_2410.12609v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.12609v1_figure_2.png", "paperid": "2410.12609v1", - "paper_path": "./MISSQA/papers/2410.12609v1.json", + "paper_path": "./MISS-QA/papers/2410.12609v1.json", "figure_id": "2410.12609v1_figure_2.png", "caption": "Figure 2. The proposed graph foundation reasoning engine SCORE transforms diverse graph tasks into zero-shot reasoning on multi-relational knowledge graphs with the integration of semantic features.", "qtype": "Implementation_Details" @@ -1967,9 +1967,9 @@ "masked_elements": [ "SCMP" ], - "figure_path": "./MISSQA/figures/1_2410.12609v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.12609v1_figure_2.png", "paperid": "2410.12609v1", - "paper_path": "./MISSQA/papers/2410.12609v1.json", + "paper_path": "./MISS-QA/papers/2410.12609v1.json", "figure_id": "2410.12609v1_figure_2.png", "caption": "Figure 2. The proposed graph foundation reasoning engine SCORE transforms diverse graph tasks into zero-shot reasoning on multi-relational knowledge graphs with the integration of semantic features.", "qtype": "Implementation_Details" @@ -1996,9 +1996,9 @@ "masked_elements": [ "SCMP" ], - "figure_path": "./MISSQA/figures/2_2410.12609v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.12609v1_figure_2.png", "paperid": "2410.12609v1", - "paper_path": "./MISSQA/papers/2410.12609v1.json", + "paper_path": "./MISS-QA/papers/2410.12609v1.json", "figure_id": "2410.12609v1_figure_2.png", "caption": "Figure 2. The proposed graph foundation reasoning engine SCORE transforms diverse graph tasks into zero-shot reasoning on multi-relational knowledge graphs with the integration of semantic features.", "qtype": "Literature_Background" @@ -2025,9 +2025,9 @@ "Semantic Unifier", "Relation Graph" ], - "figure_path": "./MISSQA/figures/3_2410.12609v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.12609v1_figure_2.png", "paperid": "2410.12609v1", - "paper_path": "./MISSQA/papers/2410.12609v1.json", + "paper_path": "./MISS-QA/papers/2410.12609v1.json", "figure_id": "2410.12609v1_figure_2.png", "caption": "Figure 2. The proposed graph foundation reasoning engine SCORE transforms diverse graph tasks into zero-shot reasoning on multi-relational knowledge graphs with the integration of semantic features.", "qtype": "Literature_Background" @@ -2057,9 +2057,9 @@ "masked_elements": [ "CBAM" ], - "figure_path": "./MISSQA/figures/0_2411.09469v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.09469v1_figure_1.png", "paperid": "2411.09469v1", - "paper_path": "./MISSQA/papers/2411.09469v1.json", + "paper_path": "./MISS-QA/papers/2411.09469v1.json", "figure_id": "2411.09469v1_figure_1.png", "caption": "Figure 1: The proposed explainable Cervix-AID-Net model for high-risk and low-risk classification.", "qtype": "Literature_Background" @@ -2083,9 +2083,9 @@ "masked_elements": [ "CartoonX" ], - "figure_path": "./MISSQA/figures/1_2411.09469v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.09469v1_figure_1.png", "paperid": "2411.09469v1", - "paper_path": "./MISSQA/papers/2411.09469v1.json", + "paper_path": "./MISS-QA/papers/2411.09469v1.json", "figure_id": "2411.09469v1_figure_1.png", "caption": "Figure 1: The proposed explainable Cervix-AID-Net model for high-risk and low-risk classification.", "qtype": "Literature_Background" @@ -2113,9 +2113,9 @@ "masked_elements": [ "CBAM" ], - "figure_path": "./MISSQA/figures/2_2411.09469v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.09469v1_figure_1.png", "paperid": "2411.09469v1", - "paper_path": "./MISSQA/papers/2411.09469v1.json", + "paper_path": "./MISS-QA/papers/2411.09469v1.json", "figure_id": "2411.09469v1_figure_1.png", "caption": "Figure 1: The proposed explainable Cervix-AID-Net model for high-risk and low-risk classification.", "qtype": "Experimental_Results" @@ -2142,9 +2142,9 @@ "CartoonX", "pixel RDE" ], - "figure_path": "./MISSQA/figures/3_2411.09469v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.09469v1_figure_1.png", "paperid": "2411.09469v1", - "paper_path": "./MISSQA/papers/2411.09469v1.json", + "paper_path": "./MISS-QA/papers/2411.09469v1.json", "figure_id": "2411.09469v1_figure_1.png", "caption": "Figure 1: The proposed explainable Cervix-AID-Net model for high-risk and low-risk classification.", "qtype": "Experimental_Results" @@ -2172,9 +2172,9 @@ "DepRoBERTa", "Llama3-8B" ], - "figure_path": "./MISSQA/figures/0_2410.04501v3_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.04501v3_figure_1.png", "paperid": "2410.04501v3", - "paper_path": "./MISSQA/papers/2410.04501v3.json", + "paper_path": "./MISS-QA/papers/2410.04501v3.json", "figure_id": "2410.04501v3_figure_1.png", "caption": "Figure 1: An overview of our approach. (a) pseudo-labels generation for unlabeled data. We first use 500500500500 labeled posts to fine-tune DepRoBERTa [5] and Llama3-8B [4] for the classification task. Then, we combine these models with Qwen2-72B-Instruct via prompting to annotate 1,50015001{,}5001 , 500 posts in the unlabeled dataset. We keep only ≈900absent900\\approx 900≈ 900 posts for which all three models predict the same and combine these with the 500500500500 labeled posts to form a new training set (Section IV-A). (b) LLMs fine-tuning. We then fine-tune Llama3-8B, Llama3.1-8B, and Gemma2-9B on the newly formed dataset with Macro Double Soft F1 loss (Section IV-B). (c) Model Ensembling. These fine-tuned models are combined with prompting Qwen2-72B-Instruct to create an ensemble model for classifying new user posts\n(Section IV-C).", "qtype": "Experimental_Results" @@ -2198,9 +2198,9 @@ "masked_elements": [ "Llama3-8B" ], - "figure_path": "./MISSQA/figures/1_2410.04501v3_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.04501v3_figure_1.png", "paperid": "2410.04501v3", - "paper_path": "./MISSQA/papers/2410.04501v3.json", + "paper_path": "./MISS-QA/papers/2410.04501v3.json", "figure_id": "2410.04501v3_figure_1.png", "caption": "Figure 1: An overview of our approach. (a) pseudo-labels generation for unlabeled data. We first use 500500500500 labeled posts to fine-tune DepRoBERTa [5] and Llama3-8B [4] for the classification task. Then, we combine these models with Qwen2-72B-Instruct via prompting to annotate 1,50015001{,}5001 , 500 posts in the unlabeled dataset. We keep only ≈900absent900\\approx 900≈ 900 posts for which all three models predict the same and combine these with the 500500500500 labeled posts to form a new training set (Section IV-A). (b) LLMs fine-tuning. We then fine-tune Llama3-8B, Llama3.1-8B, and Gemma2-9B on the newly formed dataset with Macro Double Soft F1 loss (Section IV-B). (c) Model Ensembling. These fine-tuned models are combined with prompting Qwen2-72B-Instruct to create an ensemble model for classifying new user posts\n(Section IV-C).", "qtype": "Experimental_Results" @@ -2219,9 +2219,9 @@ "masked_elements": [ "Qwen2-72B-Instruct" ], - "figure_path": "./MISSQA/figures/2_2410.04501v3_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.04501v3_figure_1.png", "paperid": "2410.04501v3", - "paper_path": "./MISSQA/papers/2410.04501v3.json", + "paper_path": "./MISS-QA/papers/2410.04501v3.json", "figure_id": "2410.04501v3_figure_1.png", "caption": "Figure 1: An overview of our approach. (a) pseudo-labels generation for unlabeled data. We first use 500500500500 labeled posts to fine-tune DepRoBERTa [5] and Llama3-8B [4] for the classification task. Then, we combine these models with Qwen2-72B-Instruct via prompting to annotate 1,50015001{,}5001 , 500 posts in the unlabeled dataset. We keep only ≈900absent900\\approx 900≈ 900 posts for which all three models predict the same and combine these with the 500500500500 labeled posts to form a new training set (Section IV-A). (b) LLMs fine-tuning. We then fine-tune Llama3-8B, Llama3.1-8B, and Gemma2-9B on the newly formed dataset with Macro Double Soft F1 loss (Section IV-B). (c) Model Ensembling. These fine-tuned models are combined with prompting Qwen2-72B-Instruct to create an ensemble model for classifying new user posts\n(Section IV-C).", "qtype": "Others" @@ -2247,9 +2247,9 @@ "Llama3-8B", "Gemma2-9B" ], - "figure_path": "./MISSQA/figures/3_2410.04501v3_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2410.04501v3_figure_1.png", "paperid": "2410.04501v3", - "paper_path": "./MISSQA/papers/2410.04501v3.json", + "paper_path": "./MISS-QA/papers/2410.04501v3.json", "figure_id": "2410.04501v3_figure_1.png", "caption": "Figure 1: An overview of our approach. (a) pseudo-labels generation for unlabeled data. We first use 500500500500 labeled posts to fine-tune DepRoBERTa [5] and Llama3-8B [4] for the classification task. Then, we combine these models with Qwen2-72B-Instruct via prompting to annotate 1,50015001{,}5001 , 500 posts in the unlabeled dataset. We keep only ≈900absent900\\approx 900≈ 900 posts for which all three models predict the same and combine these with the 500500500500 labeled posts to form a new training set (Section IV-A). (b) LLMs fine-tuning. We then fine-tune Llama3-8B, Llama3.1-8B, and Gemma2-9B on the newly formed dataset with Macro Double Soft F1 loss (Section IV-B). (c) Model Ensembling. These fine-tuned models are combined with prompting Qwen2-72B-Instruct to create an ensemble model for classifying new user posts\n(Section IV-C).", "qtype": "Others" @@ -2275,9 +2275,9 @@ "Spatial codebook", "Temporal codebook" ], - "figure_path": "./MISSQA/figures/0_2411.16468v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.16468v1_figure_1.png", "paperid": "2411.16468v1", - "paper_path": "./MISSQA/papers/2411.16468v1.json", + "paper_path": "./MISS-QA/papers/2411.16468v1.json", "figure_id": "2411.16468v1_figure_1.png", "caption": "Figure 1: Network architecture of Stage I.\nStage I uses HQ face videos to train HQ 3D-VQGAN (Ehsubscript𝐸ℎE_{h}italic_E start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT and Dhsubscript𝐷ℎD_{h}italic_D start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT) and spatial and temporal codebooks (𝒞Ssubscript𝒞𝑆\\mathcal{C}_{S}caligraphic_C start_POSTSUBSCRIPT italic_S end_POSTSUBSCRIPT and 𝒞Tsubscript𝒞𝑇\\mathcal{C}_{T}caligraphic_C start_POSTSUBSCRIPT italic_T end_POSTSUBSCRIPT).\n(a) illustrates the quantization operation STLookUp through two codebooks in our proposed framework. (b) and (c) display the computation process of temporal attention and motion residual, respectively.\n(d) We leverage a pre-trained feature network DINOv2 and trainable multi-scale discriminator heads to construct a more powerful discriminator for stable training.", "qtype": "Others" @@ -2298,9 +2298,9 @@ "Pretrained DINOv2", "Multi-scale discriminator heads" ], - "figure_path": "./MISSQA/figures/1_2411.16468v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.16468v1_figure_1.png", "paperid": "2411.16468v1", - "paper_path": "./MISSQA/papers/2411.16468v1.json", + "paper_path": "./MISS-QA/papers/2411.16468v1.json", "figure_id": "2411.16468v1_figure_1.png", "caption": "Figure 1: Network architecture of Stage I.\nStage I uses HQ face videos to train HQ 3D-VQGAN (Ehsubscript𝐸ℎE_{h}italic_E start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT and Dhsubscript𝐷ℎD_{h}italic_D start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT) and spatial and temporal codebooks (𝒞Ssubscript𝒞𝑆\\mathcal{C}_{S}caligraphic_C start_POSTSUBSCRIPT italic_S end_POSTSUBSCRIPT and 𝒞Tsubscript𝒞𝑇\\mathcal{C}_{T}caligraphic_C start_POSTSUBSCRIPT italic_T end_POSTSUBSCRIPT).\n(a) illustrates the quantization operation STLookUp through two codebooks in our proposed framework. (b) and (c) display the computation process of temporal attention and motion residual, respectively.\n(d) We leverage a pre-trained feature network DINOv2 and trainable multi-scale discriminator heads to construct a more powerful discriminator for stable training.", "qtype": "Others" @@ -2329,9 +2329,9 @@ "masked_elements": [ "temporal attention" ], - "figure_path": "./MISSQA/figures/2_2411.16468v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.16468v1_figure_1.png", "paperid": "2411.16468v1", - "paper_path": "./MISSQA/papers/2411.16468v1.json", + "paper_path": "./MISS-QA/papers/2411.16468v1.json", "figure_id": "2411.16468v1_figure_1.png", "caption": "Figure 1: Network architecture of Stage I.\nStage I uses HQ face videos to train HQ 3D-VQGAN (Ehsubscript𝐸ℎE_{h}italic_E start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT and Dhsubscript𝐷ℎD_{h}italic_D start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT) and spatial and temporal codebooks (𝒞Ssubscript𝒞𝑆\\mathcal{C}_{S}caligraphic_C start_POSTSUBSCRIPT italic_S end_POSTSUBSCRIPT and 𝒞Tsubscript𝒞𝑇\\mathcal{C}_{T}caligraphic_C start_POSTSUBSCRIPT italic_T end_POSTSUBSCRIPT).\n(a) illustrates the quantization operation STLookUp through two codebooks in our proposed framework. (b) and (c) display the computation process of temporal attention and motion residual, respectively.\n(d) We leverage a pre-trained feature network DINOv2 and trainable multi-scale discriminator heads to construct a more powerful discriminator for stable training.", "qtype": "Design_Rationale" @@ -2356,9 +2356,9 @@ "masked_elements": [ "DINOv2" ], - "figure_path": "./MISSQA/figures/3_2411.16468v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.16468v1_figure_1.png", "paperid": "2411.16468v1", - "paper_path": "./MISSQA/papers/2411.16468v1.json", + "paper_path": "./MISS-QA/papers/2411.16468v1.json", "figure_id": "2411.16468v1_figure_1.png", "caption": "Figure 1: Network architecture of Stage I.\nStage I uses HQ face videos to train HQ 3D-VQGAN (Ehsubscript𝐸ℎE_{h}italic_E start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT and Dhsubscript𝐷ℎD_{h}italic_D start_POSTSUBSCRIPT italic_h end_POSTSUBSCRIPT) and spatial and temporal codebooks (𝒞Ssubscript𝒞𝑆\\mathcal{C}_{S}caligraphic_C start_POSTSUBSCRIPT italic_S end_POSTSUBSCRIPT and 𝒞Tsubscript𝒞𝑇\\mathcal{C}_{T}caligraphic_C start_POSTSUBSCRIPT italic_T end_POSTSUBSCRIPT).\n(a) illustrates the quantization operation STLookUp through two codebooks in our proposed framework. (b) and (c) display the computation process of temporal attention and motion residual, respectively.\n(d) We leverage a pre-trained feature network DINOv2 and trainable multi-scale discriminator heads to construct a more powerful discriminator for stable training.", "qtype": "Design_Rationale" @@ -2389,9 +2389,9 @@ "L1", "L2" ], - "figure_path": "./MISSQA/figures/0_2411.07527v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.07527v1_figure_2.png", "paperid": "2411.07527v1", - "paper_path": "./MISSQA/papers/2411.07527v1.json", + "paper_path": "./MISS-QA/papers/2411.07527v1.json", "figure_id": "2411.07527v1_figure_2.png", "caption": "Figure 2: Overview of Pen Framework", "qtype": "Design_Rationale" @@ -2417,9 +2417,9 @@ "masked_elements": [ "Roberta" ], - "figure_path": "./MISSQA/figures/1_2411.07527v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.07527v1_figure_2.png", "paperid": "2411.07527v1", - "paper_path": "./MISSQA/papers/2411.07527v1.json", + "paper_path": "./MISS-QA/papers/2411.07527v1.json", "figure_id": "2411.07527v1_figure_2.png", "caption": "Figure 2: Overview of Pen Framework", "qtype": "Implementation_Details" @@ -2444,9 +2444,9 @@ "masked_elements": [ "Option Order Sensitivity Test" ], - "figure_path": "./MISSQA/figures/0_2411.03823v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.03823v1_figure_1.png", "paperid": "2411.03823v1", - "paper_path": "./MISSQA/papers/2411.03823v1.json", + "paper_path": "./MISS-QA/papers/2411.03823v1.json", "figure_id": "2411.03823v1_figure_1.png", "caption": "Figure 1: A description of Multimodal Data Contamination (left) and the overview of proposed MM-Detect framework (right).", "qtype": "Implementation_Details" @@ -2473,9 +2473,9 @@ "Slot Guessing for Perturbation Captions", "Mask Word Prediction" ], - "figure_path": "./MISSQA/figures/1_2411.03823v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.03823v1_figure_1.png", "paperid": "2411.03823v1", - "paper_path": "./MISSQA/papers/2411.03823v1.json", + "paper_path": "./MISS-QA/papers/2411.03823v1.json", "figure_id": "2411.03823v1_figure_1.png", "caption": "Figure 1: A description of Multimodal Data Contamination (left) and the overview of proposed MM-Detect framework (right).", "qtype": "Implementation_Details" @@ -2500,9 +2500,9 @@ "masked_elements": [ "Option Order Sensitivity Test" ], - "figure_path": "./MISSQA/figures/2_2411.03823v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.03823v1_figure_1.png", "paperid": "2411.03823v1", - "paper_path": "./MISSQA/papers/2411.03823v1.json", + "paper_path": "./MISS-QA/papers/2411.03823v1.json", "figure_id": "2411.03823v1_figure_1.png", "caption": "Figure 1: A description of Multimodal Data Contamination (left) and the overview of proposed MM-Detect framework (right).", "qtype": "Implementation_Details" @@ -2522,9 +2522,9 @@ "masked_elements": [ "Context Optimizer" ], - "figure_path": "./MISSQA/figures/0_2408.09434v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.09434v2_figure_1.png", "paperid": "2408.09434v2", - "paper_path": "./MISSQA/papers/2408.09434v2.json", + "paper_path": "./MISS-QA/papers/2408.09434v2.json", "figure_id": "2408.09434v2_figure_1.png", "caption": "Figure 1: Hysem Architecture diagram", "qtype": "Literature_Background" @@ -2551,9 +2551,9 @@ "masked_elements": [ "Syntax Corrector" ], - "figure_path": "./MISSQA/figures/1_2408.09434v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.09434v2_figure_1.png", "paperid": "2408.09434v2", - "paper_path": "./MISSQA/papers/2408.09434v2.json", + "paper_path": "./MISS-QA/papers/2408.09434v2.json", "figure_id": "2408.09434v2_figure_1.png", "caption": "Figure 1: Hysem Architecture diagram", "qtype": "Literature_Background" @@ -2580,9 +2580,9 @@ "masked_elements": [ "Token-based Encoding" ], - "figure_path": "./MISSQA/figures/2_2408.09434v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.09434v2_figure_1.png", "paperid": "2408.09434v2", - "paper_path": "./MISSQA/papers/2408.09434v2.json", + "paper_path": "./MISS-QA/papers/2408.09434v2.json", "figure_id": "2408.09434v2_figure_1.png", "caption": "Figure 1: Hysem Architecture diagram", "qtype": "Experimental_Results" @@ -2602,9 +2602,9 @@ "masked_elements": [ "Code Executor" ], - "figure_path": "./MISSQA/figures/3_2408.09434v2_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2408.09434v2_figure_1.png", "paperid": "2408.09434v2", - "paper_path": "./MISSQA/papers/2408.09434v2.json", + "paper_path": "./MISS-QA/papers/2408.09434v2.json", "figure_id": "2408.09434v2_figure_1.png", "caption": "Figure 1: Hysem Architecture diagram", "qtype": "Experimental_Results" @@ -2630,9 +2630,9 @@ "masked_elements": [ "global-local cooperation" ], - "figure_path": "./MISSQA/figures/0_2409.19610v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.19610v1_figure_1.png", "paperid": "2409.19610v1", - "paper_path": "./MISSQA/papers/2409.19610v1.json", + "paper_path": "./MISS-QA/papers/2409.19610v1.json", "figure_id": "2409.19610v1_figure_1.png", "caption": "Figure 1: The image demonstrates the framework of the PromptFolio algorithm. The algorithm updates the global prompt and local prompt while keeping the weights of the fixed vision-language pretrained model unchanged. Additionally, it aggregates the global prompts from each client. The right side of the image intuitively demonstrates the advantages of global-local cooperation for performance when global and local are treated as two assets.", "qtype": "Experimental_Results" @@ -2653,9 +2653,9 @@ "masked_elements": [ "image encoder" ], - "figure_path": "./MISSQA/figures/1_2409.19610v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.19610v1_figure_1.png", "paperid": "2409.19610v1", - "paper_path": "./MISSQA/papers/2409.19610v1.json", + "paper_path": "./MISS-QA/papers/2409.19610v1.json", "figure_id": "2409.19610v1_figure_1.png", "caption": "Figure 1: The image demonstrates the framework of the PromptFolio algorithm. The algorithm updates the global prompt and local prompt while keeping the weights of the fixed vision-language pretrained model unchanged. Additionally, it aggregates the global prompts from each client. The right side of the image intuitively demonstrates the advantages of global-local cooperation for performance when global and local are treated as two assets.", "qtype": "Others" @@ -2676,9 +2676,9 @@ "Margin Loss", "Projection Head" ], - "figure_path": "./MISSQA/figures/0_2411.18290v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.18290v1_figure_2.png", "paperid": "2411.18290v1", - "paper_path": "./MISSQA/papers/2411.18290v1.json", + "paper_path": "./MISS-QA/papers/2411.18290v1.json", "figure_id": "2411.18290v1_figure_2.png", "caption": "Figure 2: (a) Our SATs model begins with the encoder-decoder module, which uses shared weights to process two input signals and encode them into a discriminative representation. This representation is then further processed through a non-linear projection module and a distance metric learning module to produce feature maps.\n(b) A graphical representation of our metric learning strategy.\nCircles indicate individual CT images, I𝐼Iitalic_I, while red squares highlight the tumors. The tumors are composed of A and B, representing symmetrical and asymmetrical lesions relative to the central sagittal axis of symmetry, respectively.", "qtype": "Others" @@ -2705,9 +2705,9 @@ "masked_elements": [ "Projection Head" ], - "figure_path": "./MISSQA/figures/1_2411.18290v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.18290v1_figure_2.png", "paperid": "2411.18290v1", - "paper_path": "./MISSQA/papers/2411.18290v1.json", + "paper_path": "./MISS-QA/papers/2411.18290v1.json", "figure_id": "2411.18290v1_figure_2.png", "caption": "Figure 2: (a) Our SATs model begins with the encoder-decoder module, which uses shared weights to process two input signals and encode them into a discriminative representation. This representation is then further processed through a non-linear projection module and a distance metric learning module to produce feature maps.\n(b) A graphical representation of our metric learning strategy.\nCircles indicate individual CT images, I𝐼Iitalic_I, while red squares highlight the tumors. The tumors are composed of A and B, representing symmetrical and asymmetrical lesions relative to the central sagittal axis of symmetry, respectively.", "qtype": "Design_Rationale" @@ -2733,9 +2733,9 @@ "masked_elements": [ "accuracy loss" ], - "figure_path": "./MISSQA/figures/0_2411.05324v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.05324v1_figure_1.png", "paperid": "2411.05324v1", - "paper_path": "./MISSQA/papers/2411.05324v1.json", + "paper_path": "./MISS-QA/papers/2411.05324v1.json", "figure_id": "2411.05324v1_figure_1.png", "caption": "Fig. 1: The SASWISE pipeline efficiently estimates uncertainty while maintaining or enhancing pre-trained model performance. It begins by training a supervised model to convergence, followed by creating multiple candidate blocks in each block position. These blocks are shuffled and recombined into new models. In the diversification stage, two unique models are selected from the pool and trained on the same data sample. This stage involves calculating and utilizing the accuracy loss between the model being updated and the ground truth, along with the consistency loss between the two models, to only update the model being refined. After enough diversification training epochs, the best models from the partial or complete model pool are used to generate results from a single input. The final results for tasks with continuous or discrete data types are determined using median or majority voting methods, respectively, with uncertainty maps produced using standard deviation or majority ratio.", "qtype": "Design_Rationale" @@ -2762,9 +2762,9 @@ "Median", "Result Pool of 128 Predictions" ], - "figure_path": "./MISSQA/figures/1_2411.05324v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.05324v1_figure_1.png", "paperid": "2411.05324v1", - "paper_path": "./MISSQA/papers/2411.05324v1.json", + "paper_path": "./MISS-QA/papers/2411.05324v1.json", "figure_id": "2411.05324v1_figure_1.png", "caption": "Fig. 1: The SASWISE pipeline efficiently estimates uncertainty while maintaining or enhancing pre-trained model performance. It begins by training a supervised model to convergence, followed by creating multiple candidate blocks in each block position. These blocks are shuffled and recombined into new models. In the diversification stage, two unique models are selected from the pool and trained on the same data sample. This stage involves calculating and utilizing the accuracy loss between the model being updated and the ground truth, along with the consistency loss between the two models, to only update the model being refined. After enough diversification training epochs, the best models from the partial or complete model pool are used to generate results from a single input. The final results for tasks with continuous or discrete data types are determined using median or majority voting methods, respectively, with uncertainty maps produced using standard deviation or majority ratio.", "qtype": "Design_Rationale" @@ -2792,9 +2792,9 @@ "masked_elements": [ "accuracy loss" ], - "figure_path": "./MISSQA/figures/2_2411.05324v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.05324v1_figure_1.png", "paperid": "2411.05324v1", - "paper_path": "./MISSQA/papers/2411.05324v1.json", + "paper_path": "./MISS-QA/papers/2411.05324v1.json", "figure_id": "2411.05324v1_figure_1.png", "caption": "Fig. 1: The SASWISE pipeline efficiently estimates uncertainty while maintaining or enhancing pre-trained model performance. It begins by training a supervised model to convergence, followed by creating multiple candidate blocks in each block position. These blocks are shuffled and recombined into new models. In the diversification stage, two unique models are selected from the pool and trained on the same data sample. This stage involves calculating and utilizing the accuracy loss between the model being updated and the ground truth, along with the consistency loss between the two models, to only update the model being refined. After enough diversification training epochs, the best models from the partial or complete model pool are used to generate results from a single input. The final results for tasks with continuous or discrete data types are determined using median or majority voting methods, respectively, with uncertainty maps produced using standard deviation or majority ratio.", "qtype": "Implementation_Details" @@ -2819,9 +2819,9 @@ "masked_elements": [ "Inter Cluster Sample" ], - "figure_path": "./MISSQA/figures/0_2408.03573v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.03573v1_figure_1.png", "paperid": "2408.03573v1", - "paper_path": "./MISSQA/papers/2408.03573v1.json", + "paper_path": "./MISS-QA/papers/2408.03573v1.json", "figure_id": "2408.03573v1_figure_1.png", "caption": "Figure 1: Overall Workflow of AcTracer.\n\\footnotesize1⃝: Internal and external information extraction from LLMs during inference.\n\\footnotesize2⃝: An automated search mechanism to identify a suitable number of clusters corresponding to a given LLM and test data. The geometry structure obtained through the clustering algorithm partitions the input test spaces for more efficient sampling.\n\\footnotesize3⃝: An intra-cluster and inter-cluster sampling strategy to actively select the next data point for labelling.", "qtype": "Implementation_Details" @@ -2848,9 +2848,9 @@ "Intra Cluster Sample", "LLM Confidence Distribution" ], - "figure_path": "./MISSQA/figures/1_2408.03573v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.03573v1_figure_1.png", "paperid": "2408.03573v1", - "paper_path": "./MISSQA/papers/2408.03573v1.json", + "paper_path": "./MISS-QA/papers/2408.03573v1.json", "figure_id": "2408.03573v1_figure_1.png", "caption": "Figure 1: Overall Workflow of AcTracer.\n\\footnotesize1⃝: Internal and external information extraction from LLMs during inference.\n\\footnotesize2⃝: An automated search mechanism to identify a suitable number of clusters corresponding to a given LLM and test data. The geometry structure obtained through the clustering algorithm partitions the input test spaces for more efficient sampling.\n\\footnotesize3⃝: An intra-cluster and inter-cluster sampling strategy to actively select the next data point for labelling.", "qtype": "Implementation_Details" @@ -2876,9 +2876,9 @@ "Cluster Search", "Adaptive Sampling" ], - "figure_path": "./MISSQA/figures/2_2408.03573v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.03573v1_figure_1.png", "paperid": "2408.03573v1", - "paper_path": "./MISSQA/papers/2408.03573v1.json", + "paper_path": "./MISS-QA/papers/2408.03573v1.json", "figure_id": "2408.03573v1_figure_1.png", "caption": "Figure 1: Overall Workflow of AcTracer.\n\\footnotesize1⃝: Internal and external information extraction from LLMs during inference.\n\\footnotesize2⃝: An automated search mechanism to identify a suitable number of clusters corresponding to a given LLM and test data. The geometry structure obtained through the clustering algorithm partitions the input test spaces for more efficient sampling.\n\\footnotesize3⃝: An intra-cluster and inter-cluster sampling strategy to actively select the next data point for labelling.", "qtype": "Literature_Background" @@ -2903,9 +2903,9 @@ "masked_elements": [ "Eq. Frame model" ], - "figure_path": "./MISSQA/figures/0_2408.06321v3_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.06321v3_figure_2.png", "paperid": "2408.06321v3", - "paper_path": "./MISSQA/papers/2408.06321v3.json", + "paper_path": "./MISS-QA/papers/2408.06321v3.json", "figure_id": "2408.06321v3_figure_2.png", "caption": "Figure 2: \nEqNIO (a) processes gravity-aligned IMU measurements,\n{(ai,ωi)}i=1nsuperscriptsubscriptsubscript𝑎𝑖subscript𝜔𝑖𝑖1𝑛\\{(a_{i},\\omega_{i})\\}_{i=1}^{n}{ ( italic_a start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT , italic_ω start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) } start_POSTSUBSCRIPT italic_i = 1 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_n end_POSTSUPERSCRIPT.\nAn equivariant network (blue) predicts a canonical equivariant frame F𝐹Fitalic_F into which IMU measurements are mapped, i.e. canonicalized, yielding invariant inputs {(ai′,ωi′)}i=1nsuperscriptsubscriptsubscriptsuperscript𝑎′𝑖subscriptsuperscript𝜔′𝑖𝑖1𝑛\\{(a^{\\prime}_{i},\\omega^{\\prime}_{i})\\}_{i=1}^{n}{ ( italic_a start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT , italic_ω start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) } start_POSTSUBSCRIPT italic_i = 1 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_n end_POSTSUPERSCRIPT.\nA conventional neural network then predicts invariant displacement (d′superscript𝑑′d^{\\prime}italic_d start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT) and covariance (Σ′superscriptΣ′\\Sigma^{\\prime}roman_Σ start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT) which are mapped back yielding equivariant displacement (d𝑑ditalic_d) and covariance (ΣΣ\\Sigmaroman_Σ).\nThe equivariant network (b) takes as input n×C0s𝑛superscriptsubscript𝐶0𝑠n\\times C_{0}^{s}italic_n × italic_C start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_s end_POSTSUPERSCRIPT scalars, and n×C0v𝑛superscriptsubscript𝐶0𝑣n\\times C_{0}^{v}italic_n × italic_C start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_v end_POSTSUPERSCRIPT vectors:\nVectors are processed by equivariant layers (Eq-L, Eq-Conv, Eq-LN), while scalars are separately processed with conventional layers.\nEq-L (green) uses two weights W1,W2subscript𝑊1subscript𝑊2W_{1},W_{2}italic_W start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT , italic_W start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT for SO(2) equivariance, and only W1subscript𝑊1W_{1}italic_W start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT for O(2) equivariance. Eq-Conv (pink) uses Eq-L to perform 1-D convolutions over time. The equivariant non-linear layer (orange) mixes vector and scalar features.", "qtype": "Literature_Background" @@ -2931,9 +2931,9 @@ "equivariant non-linear layer", "scalars and vectors" ], - "figure_path": "./MISSQA/figures/1_2408.06321v3_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.06321v3_figure_2.png", "paperid": "2408.06321v3", - "paper_path": "./MISSQA/papers/2408.06321v3.json", + "paper_path": "./MISS-QA/papers/2408.06321v3.json", "figure_id": "2408.06321v3_figure_2.png", "caption": "Figure 2: \nEqNIO (a) processes gravity-aligned IMU measurements,\n{(ai,ωi)}i=1nsuperscriptsubscriptsubscript𝑎𝑖subscript𝜔𝑖𝑖1𝑛\\{(a_{i},\\omega_{i})\\}_{i=1}^{n}{ ( italic_a start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT , italic_ω start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) } start_POSTSUBSCRIPT italic_i = 1 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_n end_POSTSUPERSCRIPT.\nAn equivariant network (blue) predicts a canonical equivariant frame F𝐹Fitalic_F into which IMU measurements are mapped, i.e. canonicalized, yielding invariant inputs {(ai′,ωi′)}i=1nsuperscriptsubscriptsubscriptsuperscript𝑎′𝑖subscriptsuperscript𝜔′𝑖𝑖1𝑛\\{(a^{\\prime}_{i},\\omega^{\\prime}_{i})\\}_{i=1}^{n}{ ( italic_a start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT , italic_ω start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ) } start_POSTSUBSCRIPT italic_i = 1 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_n end_POSTSUPERSCRIPT.\nA conventional neural network then predicts invariant displacement (d′superscript𝑑′d^{\\prime}italic_d start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT) and covariance (Σ′superscriptΣ′\\Sigma^{\\prime}roman_Σ start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT) which are mapped back yielding equivariant displacement (d𝑑ditalic_d) and covariance (ΣΣ\\Sigmaroman_Σ).\nThe equivariant network (b) takes as input n×C0s𝑛superscriptsubscript𝐶0𝑠n\\times C_{0}^{s}italic_n × italic_C start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_s end_POSTSUPERSCRIPT scalars, and n×C0v𝑛superscriptsubscript𝐶0𝑣n\\times C_{0}^{v}italic_n × italic_C start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT start_POSTSUPERSCRIPT italic_v end_POSTSUPERSCRIPT vectors:\nVectors are processed by equivariant layers (Eq-L, Eq-Conv, Eq-LN), while scalars are separately processed with conventional layers.\nEq-L (green) uses two weights W1,W2subscript𝑊1subscript𝑊2W_{1},W_{2}italic_W start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT , italic_W start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT for SO(2) equivariance, and only W1subscript𝑊1W_{1}italic_W start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT for O(2) equivariance. Eq-Conv (pink) uses Eq-L to perform 1-D convolutions over time. The equivariant non-linear layer (orange) mixes vector and scalar features.", "qtype": "Experimental_Results" @@ -2960,9 +2960,9 @@ "incremental view addition", "PnP pose init probe" ], - "figure_path": "./MISSQA/figures/0_2408.16690v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.16690v2_figure_2.png", "paperid": "2408.16690v2", - "paper_path": "./MISSQA/papers/2408.16690v2.json", + "paper_path": "./MISS-QA/papers/2408.16690v2.json", "figure_id": "2408.16690v2_figure_2.png", "caption": "Figure 2: Method overview. We leverage generic objects in few-view input images as pose probes. The pose probe is automatically segmented by SAM with prompts, and initialized by a cube shape. The method does not introduce extra burden but successfully facilitates pose estimation in feature-sparse scenes.", "qtype": "Experimental_Results" @@ -2983,9 +2983,9 @@ "masked_elements": [ "cube init." ], - "figure_path": "./MISSQA/figures/1_2408.16690v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.16690v2_figure_2.png", "paperid": "2408.16690v2", - "paper_path": "./MISSQA/papers/2408.16690v2.json", + "paper_path": "./MISS-QA/papers/2408.16690v2.json", "figure_id": "2408.16690v2_figure_2.png", "caption": "Figure 2: Method overview. We leverage generic objects in few-view input images as pose probes. The pose probe is automatically segmented by SAM with prompts, and initialized by a cube shape. The method does not introduce extra burden but successfully facilitates pose estimation in feature-sparse scenes.", "qtype": "Others" @@ -3004,9 +3004,9 @@ "masked_elements": [ "pose probes" ], - "figure_path": "./MISSQA/figures/2_2408.16690v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.16690v2_figure_2.png", "paperid": "2408.16690v2", - "paper_path": "./MISSQA/papers/2408.16690v2.json", + "paper_path": "./MISS-QA/papers/2408.16690v2.json", "figure_id": "2408.16690v2_figure_2.png", "caption": "Figure 2: Method overview. We leverage generic objects in few-view input images as pose probes. The pose probe is automatically segmented by SAM with prompts, and initialized by a cube shape. The method does not introduce extra burden but successfully facilitates pose estimation in feature-sparse scenes.", "qtype": "Others" @@ -3025,9 +3025,9 @@ "masked_elements": [ "LLM-based Prompt" ], - "figure_path": "./MISSQA/figures/0_2408.05905v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.05905v2_figure_2.png", "paperid": "2408.05905v2", - "paper_path": "./MISSQA/papers/2408.05905v2.json", + "paper_path": "./MISS-QA/papers/2408.05905v2.json", "figure_id": "2408.05905v2_figure_2.png", "caption": "Figure 2. The pipeline of our proposed STPrompt.", "qtype": "Others" @@ -3046,9 +3046,9 @@ "masked_elements": [ "TopK" ], - "figure_path": "./MISSQA/figures/1_2408.05905v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.05905v2_figure_2.png", "paperid": "2408.05905v2", - "paper_path": "./MISSQA/papers/2408.05905v2.json", + "paper_path": "./MISS-QA/papers/2408.05905v2.json", "figure_id": "2408.05905v2_figure_2.png", "caption": "Figure 2. The pipeline of our proposed STPrompt.", "qtype": "Others" @@ -3074,9 +3074,9 @@ "Spatial Attention Aggregation", "Temporal Adapter" ], - "figure_path": "./MISSQA/figures/2_2408.05905v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.05905v2_figure_2.png", "paperid": "2408.05905v2", - "paper_path": "./MISSQA/papers/2408.05905v2.json", + "paper_path": "./MISS-QA/papers/2408.05905v2.json", "figure_id": "2408.05905v2_figure_2.png", "caption": "Figure 2. The pipeline of our proposed STPrompt.", "qtype": "Design_Rationale" @@ -3102,9 +3102,9 @@ "Binary Classifier", "MIL-Align" ], - "figure_path": "./MISSQA/figures/3_2408.05905v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2408.05905v2_figure_2.png", "paperid": "2408.05905v2", - "paper_path": "./MISSQA/papers/2408.05905v2.json", + "paper_path": "./MISS-QA/papers/2408.05905v2.json", "figure_id": "2408.05905v2_figure_2.png", "caption": "Figure 2. The pipeline of our proposed STPrompt.", "qtype": "Design_Rationale" @@ -3131,9 +3131,9 @@ "Joint Sampling Set", "Independent Component-wise Sampling" ], - "figure_path": "./MISSQA/figures/0_2411.07591v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.07591v1_figure_2.png", "paperid": "2411.07591v1", - "paper_path": "./MISSQA/papers/2411.07591v1.json", + "paper_path": "./MISS-QA/papers/2411.07591v1.json", "figure_id": "2411.07591v1_figure_2.png", "caption": "Figure 2: Synchronous Sampling with Exclusive Scopes.", "qtype": "Design_Rationale" @@ -3159,9 +3159,9 @@ "Independent Component-wise Sampling", "Sampling Set for Component 1" ], - "figure_path": "./MISSQA/figures/1_2411.07591v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.07591v1_figure_2.png", "paperid": "2411.07591v1", - "paper_path": "./MISSQA/papers/2411.07591v1.json", + "paper_path": "./MISS-QA/papers/2411.07591v1.json", "figure_id": "2411.07591v1_figure_2.png", "caption": "Figure 2: Synchronous Sampling with Exclusive Scopes.", "qtype": "Implementation_Details" @@ -3187,9 +3187,9 @@ "Synchronous Component-wise Sampling", "Joint Sampling Set for Components 1&2" ], - "figure_path": "./MISSQA/figures/2_2411.07591v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.07591v1_figure_2.png", "paperid": "2411.07591v1", - "paper_path": "./MISSQA/papers/2411.07591v1.json", + "paper_path": "./MISS-QA/papers/2411.07591v1.json", "figure_id": "2411.07591v1_figure_2.png", "caption": "Figure 2: Synchronous Sampling with Exclusive Scopes.", "qtype": "Implementation_Details" @@ -3214,9 +3214,9 @@ "masked_elements": [ "MTB" ], - "figure_path": "./MISSQA/figures/0_2409.09588v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.09588v1_figure_2.png", "paperid": "2409.09588v1", - "paper_path": "./MISSQA/papers/2409.09588v1.json", + "paper_path": "./MISS-QA/papers/2409.09588v1.json", "figure_id": "2409.09588v1_figure_2.png", "caption": "Figure 2: Overall architecture of our GLCONet method. We use ResNet-50/Swin Transformer/PVT as the encoder and propose a collaborative optimization strategy (COS) that contains a global perception module (GPM), a local refinement module (LRM) and a group-wise hybrid interaction module (GHIM) to simultaneously model long-range dependencies and local details. In addition, we design an adjacent reverse decoder (ARD) to integrate the complementary information with different layers through cross-layer aggregation and reverse optimization.", "qtype": "Implementation_Details" @@ -3247,9 +3247,9 @@ "GPM", "LRM" ], - "figure_path": "./MISSQA/figures/1_2409.09588v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.09588v1_figure_2.png", "paperid": "2409.09588v1", - "paper_path": "./MISSQA/papers/2409.09588v1.json", + "paper_path": "./MISS-QA/papers/2409.09588v1.json", "figure_id": "2409.09588v1_figure_2.png", "caption": "Figure 2: Overall architecture of our GLCONet method. We use ResNet-50/Swin Transformer/PVT as the encoder and propose a collaborative optimization strategy (COS) that contains a global perception module (GPM), a local refinement module (LRM) and a group-wise hybrid interaction module (GHIM) to simultaneously model long-range dependencies and local details. In addition, we design an adjacent reverse decoder (ARD) to integrate the complementary information with different layers through cross-layer aggregation and reverse optimization.", "qtype": "Literature_Background" @@ -3276,9 +3276,9 @@ "GHIM", "ARD" ], - "figure_path": "./MISSQA/figures/2_2409.09588v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.09588v1_figure_2.png", "paperid": "2409.09588v1", - "paper_path": "./MISSQA/papers/2409.09588v1.json", + "paper_path": "./MISS-QA/papers/2409.09588v1.json", "figure_id": "2409.09588v1_figure_2.png", "caption": "Figure 2: Overall architecture of our GLCONet method. We use ResNet-50/Swin Transformer/PVT as the encoder and propose a collaborative optimization strategy (COS) that contains a global perception module (GPM), a local refinement module (LRM) and a group-wise hybrid interaction module (GHIM) to simultaneously model long-range dependencies and local details. In addition, we design an adjacent reverse decoder (ARD) to integrate the complementary information with different layers through cross-layer aggregation and reverse optimization.", "qtype": "Literature_Background" @@ -3298,9 +3298,9 @@ "masked_elements": [ "LLM" ], - "figure_path": "./MISSQA/figures/0_2410.13523v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.13523v1_figure_1.png", "paperid": "2410.13523v1", - "paper_path": "./MISSQA/papers/2410.13523v1.json", + "paper_path": "./MISS-QA/papers/2410.13523v1.json", "figure_id": "2410.13523v1_figure_1.png", "caption": "Figure 1: \nComparison of real image-text datasets and synthetic datasets.\n(a): The real image-text dataset, MIMIC-CXR (Johnson et al., 2019b), while authentic, often contains imperfections such as long-tailed data distribution, unpaired images and text, and low-quality CXR images, which limit the performance of MedVLP models pretrained on this dataset.\n(b): The synthetic dataset generation process uses clinical entities as prompts to an LLM (e.g., Llama3.1 (AI@Meta, 2024)) to generate synthetic reports. These reports are then used to create synthetic images through RoentGen (Bluethgen et al., 2024). We propose an automated pipeline to control the dataset distribution, ensuring it is balanced and includes paired image-text samples.", "qtype": "Literature_Background" @@ -3326,9 +3326,9 @@ "masked_elements": [ "Balanced Dataset" ], - "figure_path": "./MISSQA/figures/1_2410.13523v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.13523v1_figure_1.png", "paperid": "2410.13523v1", - "paper_path": "./MISSQA/papers/2410.13523v1.json", + "paper_path": "./MISS-QA/papers/2410.13523v1.json", "figure_id": "2410.13523v1_figure_1.png", "caption": "Figure 1: \nComparison of real image-text datasets and synthetic datasets.\n(a): The real image-text dataset, MIMIC-CXR (Johnson et al., 2019b), while authentic, often contains imperfections such as long-tailed data distribution, unpaired images and text, and low-quality CXR images, which limit the performance of MedVLP models pretrained on this dataset.\n(b): The synthetic dataset generation process uses clinical entities as prompts to an LLM (e.g., Llama3.1 (AI@Meta, 2024)) to generate synthetic reports. These reports are then used to create synthetic images through RoentGen (Bluethgen et al., 2024). We propose an automated pipeline to control the dataset distribution, ensuring it is balanced and includes paired image-text samples.", "qtype": "Literature_Background" @@ -3357,9 +3357,9 @@ "Clinical Entities", "LLM" ], - "figure_path": "./MISSQA/figures/2_2410.13523v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.13523v1_figure_1.png", "paperid": "2410.13523v1", - "paper_path": "./MISSQA/papers/2410.13523v1.json", + "paper_path": "./MISS-QA/papers/2410.13523v1.json", "figure_id": "2410.13523v1_figure_1.png", "caption": "Figure 1: \nComparison of real image-text datasets and synthetic datasets.\n(a): The real image-text dataset, MIMIC-CXR (Johnson et al., 2019b), while authentic, often contains imperfections such as long-tailed data distribution, unpaired images and text, and low-quality CXR images, which limit the performance of MedVLP models pretrained on this dataset.\n(b): The synthetic dataset generation process uses clinical entities as prompts to an LLM (e.g., Llama3.1 (AI@Meta, 2024)) to generate synthetic reports. These reports are then used to create synthetic images through RoentGen (Bluethgen et al., 2024). We propose an automated pipeline to control the dataset distribution, ensuring it is balanced and includes paired image-text samples.", "qtype": "Experimental_Results" @@ -3385,9 +3385,9 @@ "Text2Image Model", "High Fidelity Image" ], - "figure_path": "./MISSQA/figures/3_2410.13523v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2410.13523v1_figure_1.png", "paperid": "2410.13523v1", - "paper_path": "./MISSQA/papers/2410.13523v1.json", + "paper_path": "./MISS-QA/papers/2410.13523v1.json", "figure_id": "2410.13523v1_figure_1.png", "caption": "Figure 1: \nComparison of real image-text datasets and synthetic datasets.\n(a): The real image-text dataset, MIMIC-CXR (Johnson et al., 2019b), while authentic, often contains imperfections such as long-tailed data distribution, unpaired images and text, and low-quality CXR images, which limit the performance of MedVLP models pretrained on this dataset.\n(b): The synthetic dataset generation process uses clinical entities as prompts to an LLM (e.g., Llama3.1 (AI@Meta, 2024)) to generate synthetic reports. These reports are then used to create synthetic images through RoentGen (Bluethgen et al., 2024). We propose an automated pipeline to control the dataset distribution, ensuring it is balanced and includes paired image-text samples.", "qtype": "Experimental_Results" @@ -3418,9 +3418,9 @@ "masked_elements": [ "GMM" ], - "figure_path": "./MISSQA/figures/0_2407.05358v3_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.05358v3_figure_1.png", "paperid": "2407.05358v3", - "paper_path": "./MISSQA/papers/2407.05358v3.json", + "paper_path": "./MISS-QA/papers/2407.05358v3.json", "figure_id": "2407.05358v3_figure_1.png", "caption": "Figure 1: Comparing conventional AVS methods [7, 13] with our CPM approach, CPM inherits the class-agnostic query from transformer-based methods and integrates class-conditional prompts sampled from the learned joint-modal data distribution to achieve three objectives: 1) learn disentangled audio partitioning, 2) facilitate semantic-guided object identification, and 3) promote more explicit audio-visual contrastive learning.", "qtype": "Experimental_Results" @@ -3440,9 +3440,9 @@ "masked_elements": [ "GMM" ], - "figure_path": "./MISSQA/figures/1_2407.05358v3_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.05358v3_figure_1.png", "paperid": "2407.05358v3", - "paper_path": "./MISSQA/papers/2407.05358v3.json", + "paper_path": "./MISS-QA/papers/2407.05358v3.json", "figure_id": "2407.05358v3_figure_1.png", "caption": "Figure 1: Comparing conventional AVS methods [7, 13] with our CPM approach, CPM inherits the class-agnostic query from transformer-based methods and integrates class-conditional prompts sampled from the learned joint-modal data distribution to achieve three objectives: 1) learn disentangled audio partitioning, 2) facilitate semantic-guided object identification, and 3) promote more explicit audio-visual contrastive learning.", "qtype": "Others" @@ -3462,9 +3462,9 @@ "masked_elements": [ "Bipartite Matching to GT" ], - "figure_path": "./MISSQA/figures/2_2407.05358v3_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.05358v3_figure_1.png", "paperid": "2407.05358v3", - "paper_path": "./MISSQA/papers/2407.05358v3.json", + "paper_path": "./MISS-QA/papers/2407.05358v3.json", "figure_id": "2407.05358v3_figure_1.png", "caption": "Figure 1: Comparing conventional AVS methods [7, 13] with our CPM approach, CPM inherits the class-agnostic query from transformer-based methods and integrates class-conditional prompts sampled from the learned joint-modal data distribution to achieve three objectives: 1) learn disentangled audio partitioning, 2) facilitate semantic-guided object identification, and 3) promote more explicit audio-visual contrastive learning.", "qtype": "Others" @@ -3484,9 +3484,9 @@ "masked_elements": [ "Self-Validating Filtering" ], - "figure_path": "./MISSQA/figures/0_2410.12761v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.12761v1_figure_2.png", "paperid": "2410.12761v1", - "paper_path": "./MISSQA/papers/2410.12761v1.json", + "paper_path": "./MISS-QA/papers/2410.12761v1.json", "figure_id": "2410.12761v1_figure_2.png", "caption": "Figure 2: Illustration of SAFREE framework. Based on proximity analysis between the masked token embeddings and the toxic subspace 𝒞𝒞\\mathcal{C}caligraphic_C, we detect unsafe tokens and project them into orthogonal to the toxic concept (in red), but still be in the input space ℐℐ\\mathcal{I}caligraphic_I (in green). SAFREE adaptively controls the filtering strength in an input-dependent manner, which also regulates a latent-level re-attention mechanism. Note that our approach can be broadly applied to various image and video diffusion backbones.", "qtype": "Others" @@ -3506,9 +3506,9 @@ "masked_elements": [ "Selective Orthogonal Projection" ], - "figure_path": "./MISSQA/figures/1_2410.12761v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.12761v1_figure_2.png", "paperid": "2410.12761v1", - "paper_path": "./MISSQA/papers/2410.12761v1.json", + "paper_path": "./MISS-QA/papers/2410.12761v1.json", "figure_id": "2410.12761v1_figure_2.png", "caption": "Figure 2: Illustration of SAFREE framework. Based on proximity analysis between the masked token embeddings and the toxic subspace 𝒞𝒞\\mathcal{C}caligraphic_C, we detect unsafe tokens and project them into orthogonal to the toxic concept (in red), but still be in the input space ℐℐ\\mathcal{I}caligraphic_I (in green). SAFREE adaptively controls the filtering strength in an input-dependent manner, which also regulates a latent-level re-attention mechanism. Note that our approach can be broadly applied to various image and video diffusion backbones.", "qtype": "Others" @@ -3534,9 +3534,9 @@ "Self-Validating Filtering", "denoising steps" ], - "figure_path": "./MISSQA/figures/2_2410.12761v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.12761v1_figure_2.png", "paperid": "2410.12761v1", - "paper_path": "./MISSQA/papers/2410.12761v1.json", + "paper_path": "./MISS-QA/papers/2410.12761v1.json", "figure_id": "2410.12761v1_figure_2.png", "caption": "Figure 2: Illustration of SAFREE framework. Based on proximity analysis between the masked token embeddings and the toxic subspace 𝒞𝒞\\mathcal{C}caligraphic_C, we detect unsafe tokens and project them into orthogonal to the toxic concept (in red), but still be in the input space ℐℐ\\mathcal{I}caligraphic_I (in green). SAFREE adaptively controls the filtering strength in an input-dependent manner, which also regulates a latent-level re-attention mechanism. Note that our approach can be broadly applied to various image and video diffusion backbones.", "qtype": "Design_Rationale" @@ -3562,9 +3562,9 @@ "Selective Orthogonal Projection", "Toxic Token Detection" ], - "figure_path": "./MISSQA/figures/3_2410.12761v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.12761v1_figure_2.png", "paperid": "2410.12761v1", - "paper_path": "./MISSQA/papers/2410.12761v1.json", + "paper_path": "./MISS-QA/papers/2410.12761v1.json", "figure_id": "2410.12761v1_figure_2.png", "caption": "Figure 2: Illustration of SAFREE framework. Based on proximity analysis between the masked token embeddings and the toxic subspace 𝒞𝒞\\mathcal{C}caligraphic_C, we detect unsafe tokens and project them into orthogonal to the toxic concept (in red), but still be in the input space ℐℐ\\mathcal{I}caligraphic_I (in green). SAFREE adaptively controls the filtering strength in an input-dependent manner, which also regulates a latent-level re-attention mechanism. Note that our approach can be broadly applied to various image and video diffusion backbones.", "qtype": "Design_Rationale" @@ -3592,9 +3592,9 @@ "Gram Matrix", "MLP" ], - "figure_path": "./MISSQA/figures/0_2411.17226v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.17226v1_figure_2.png", "paperid": "2411.17226v1", - "paper_path": "./MISSQA/papers/2411.17226v1.json", + "paper_path": "./MISS-QA/papers/2411.17226v1.json", "figure_id": "2411.17226v1_figure_2.png", "caption": "Figure 2: The architecture of MWFormer. The main image processing network consists of a Transformer encoder, a Transformer decoder, and convolution tails.\n(a) A feature extraction network learns to generate some of the parameters of the Transformer blocks and intra-patch Transformer blocks in the main network, thereby partially controlling the production of intermediate feature maps. (b) The Transformer block in the encoder of the main network, which is guided by the feature vector. (c) Transformer decoder of the main network, whose queries are learnable parameters.", "qtype": "Design_Rationale" @@ -3620,9 +3620,9 @@ "masked_elements": [ "Depthwise Convolution" ], - "figure_path": "./MISSQA/figures/1_2411.17226v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.17226v1_figure_2.png", "paperid": "2411.17226v1", - "paper_path": "./MISSQA/papers/2411.17226v1.json", + "paper_path": "./MISS-QA/papers/2411.17226v1.json", "figure_id": "2411.17226v1_figure_2.png", "caption": "Figure 2: The architecture of MWFormer. The main image processing network consists of a Transformer encoder, a Transformer decoder, and convolution tails.\n(a) A feature extraction network learns to generate some of the parameters of the Transformer blocks and intra-patch Transformer blocks in the main network, thereby partially controlling the production of intermediate feature maps. (b) The Transformer block in the encoder of the main network, which is guided by the feature vector. (c) Transformer decoder of the main network, whose queries are learnable parameters.", "qtype": "Design_Rationale" @@ -3648,9 +3648,9 @@ "HyperMLP", "Depthwise Conv" ], - "figure_path": "./MISSQA/figures/2_2411.17226v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.17226v1_figure_2.png", "paperid": "2411.17226v1", - "paper_path": "./MISSQA/papers/2411.17226v1.json", + "paper_path": "./MISS-QA/papers/2411.17226v1.json", "figure_id": "2411.17226v1_figure_2.png", "caption": "Figure 2: The architecture of MWFormer. The main image processing network consists of a Transformer encoder, a Transformer decoder, and convolution tails.\n(a) A feature extraction network learns to generate some of the parameters of the Transformer blocks and intra-patch Transformer blocks in the main network, thereby partially controlling the production of intermediate feature maps. (b) The Transformer block in the encoder of the main network, which is guided by the feature vector. (c) Transformer decoder of the main network, whose queries are learnable parameters.", "qtype": "Implementation_Details" @@ -3677,9 +3677,9 @@ "Gram Matrix", "MLP" ], - "figure_path": "./MISSQA/figures/3_2411.17226v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.17226v1_figure_2.png", "paperid": "2411.17226v1", - "paper_path": "./MISSQA/papers/2411.17226v1.json", + "paper_path": "./MISS-QA/papers/2411.17226v1.json", "figure_id": "2411.17226v1_figure_2.png", "caption": "Figure 2: The architecture of MWFormer. The main image processing network consists of a Transformer encoder, a Transformer decoder, and convolution tails.\n(a) A feature extraction network learns to generate some of the parameters of the Transformer blocks and intra-patch Transformer blocks in the main network, thereby partially controlling the production of intermediate feature maps. (b) The Transformer block in the encoder of the main network, which is guided by the feature vector. (c) Transformer decoder of the main network, whose queries are learnable parameters.", "qtype": "Implementation_Details" @@ -3708,9 +3708,9 @@ "masked_elements": [ "Validator" ], - "figure_path": "./MISSQA/figures/0_2407.07921v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.07921v1_figure_1.png", "paperid": "2407.07921v1", - "paper_path": "./MISSQA/papers/2407.07921v1.json", + "paper_path": "./MISS-QA/papers/2407.07921v1.json", "figure_id": "2407.07921v1_figure_1.png", "caption": "Figure 1: DFLoc system overview. Clients handle tasks including model training, verification, and block mining, constituting a blockchain that can aggregate and distribute the global model, replacing the central server.", "qtype": "Implementation_Details" @@ -3736,9 +3736,9 @@ "Miner", "PoS Consensus" ], - "figure_path": "./MISSQA/figures/1_2407.07921v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.07921v1_figure_1.png", "paperid": "2407.07921v1", - "paper_path": "./MISSQA/papers/2407.07921v1.json", + "paper_path": "./MISS-QA/papers/2407.07921v1.json", "figure_id": "2407.07921v1_figure_1.png", "caption": "Figure 1: DFLoc system overview. Clients handle tasks including model training, verification, and block mining, constituting a blockchain that can aggregate and distribute the global model, replacing the central server.", "qtype": "Implementation_Details" @@ -3767,9 +3767,9 @@ "Miner", "Local Model" ], - "figure_path": "./MISSQA/figures/2_2407.07921v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.07921v1_figure_1.png", "paperid": "2407.07921v1", - "paper_path": "./MISSQA/papers/2407.07921v1.json", + "paper_path": "./MISS-QA/papers/2407.07921v1.json", "figure_id": "2407.07921v1_figure_1.png", "caption": "Figure 1: DFLoc system overview. Clients handle tasks including model training, verification, and block mining, constituting a blockchain that can aggregate and distribute the global model, replacing the central server.", "qtype": "Literature_Background" @@ -3796,9 +3796,9 @@ "Adaptive Frequency Weight Matrix", "Frequency Distance Matrix" ], - "figure_path": "./MISSQA/figures/0_2408.13716v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.13716v1_figure_2.png", "paperid": "2408.13716v1", - "paper_path": "./MISSQA/papers/2408.13716v1.json", + "paper_path": "./MISS-QA/papers/2408.13716v1.json", "figure_id": "2408.13716v1_figure_2.png", "caption": "Figure 2: Overview of FreqINR. The inference process for INR-based models (light blue) is guided by our core component, Adaptive DCT Frequency Loss (dark blue), which leverages the Frequency Distance Matrix (light green) and the Adaptive Frequency Weight Matrix (dark green) to dynamically enhance fine detail learning.", "qtype": "Literature_Background" @@ -3827,9 +3827,9 @@ "Adaptive DCT Frequency Loss", "Spatial Loss" ], - "figure_path": "./MISSQA/figures/1_2408.13716v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.13716v1_figure_2.png", "paperid": "2408.13716v1", - "paper_path": "./MISSQA/papers/2408.13716v1.json", + "paper_path": "./MISS-QA/papers/2408.13716v1.json", "figure_id": "2408.13716v1_figure_2.png", "caption": "Figure 2: Overview of FreqINR. The inference process for INR-based models (light blue) is guided by our core component, Adaptive DCT Frequency Loss (dark blue), which leverages the Frequency Distance Matrix (light green) and the Adaptive Frequency Weight Matrix (dark green) to dynamically enhance fine detail learning.", "qtype": "Literature_Background" @@ -3859,9 +3859,9 @@ "Frequency Distance Matrix", "Adaptive Frequency Weight Matrix" ], - "figure_path": "./MISSQA/figures/2_2408.13716v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.13716v1_figure_2.png", "paperid": "2408.13716v1", - "paper_path": "./MISSQA/papers/2408.13716v1.json", + "paper_path": "./MISS-QA/papers/2408.13716v1.json", "figure_id": "2408.13716v1_figure_2.png", "caption": "Figure 2: Overview of FreqINR. The inference process for INR-based models (light blue) is guided by our core component, Adaptive DCT Frequency Loss (dark blue), which leverages the Frequency Distance Matrix (light green) and the Adaptive Frequency Weight Matrix (dark green) to dynamically enhance fine detail learning.", "qtype": "Experimental_Results" @@ -3890,9 +3890,9 @@ "Spatial Loss", "Adaptive DCT Frequency Loss" ], - "figure_path": "./MISSQA/figures/3_2408.13716v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2408.13716v1_figure_2.png", "paperid": "2408.13716v1", - "paper_path": "./MISSQA/papers/2408.13716v1.json", + "paper_path": "./MISS-QA/papers/2408.13716v1.json", "figure_id": "2408.13716v1_figure_2.png", "caption": "Figure 2: Overview of FreqINR. The inference process for INR-based models (light blue) is guided by our core component, Adaptive DCT Frequency Loss (dark blue), which leverages the Frequency Distance Matrix (light green) and the Adaptive Frequency Weight Matrix (dark green) to dynamically enhance fine detail learning.", "qtype": "Experimental_Results" @@ -3919,9 +3919,9 @@ "masked_elements": [ "Dataset construction" ], - "figure_path": "./MISSQA/figures/0_2410.23843v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.23843v1_figure_1.png", "paperid": "2410.23843v1", - "paper_path": "./MISSQA/papers/2410.23843v1.json", + "paper_path": "./MISS-QA/papers/2410.23843v1.json", "figure_id": "2410.23843v1_figure_1.png", "caption": "Fig. 1: This framework outlines the comprehensive approach to understanding the performance decline of edited models. On the left, traditional knowledge editing tasks are categorized into different types, each with distinct editing objectives: yes/no, a/b/c/d, and entity/event. On the right, our experiments are structured from both data and model perspectives. From the data perspective, we conduct three experiments: (a) a comprehensive performance evaluation of the model, (b) the construction of a Multi-Question Dataset (MQD), and (c) an assessment of the impact of editing different target outputs on model performance. From the model perspective, we design four experiments: (d) an evaluation of the edited model’s forgetting ability, (e) an identification of the current knowledge editing method’s bottleneck and an exploration of the correlation between editing probability values and parameter layer norms, and (f) a proposal of a sequence editing method, which effectively enhances the performance of the edited model.", "qtype": "Experimental_Results" @@ -3949,9 +3949,9 @@ "Dump for sequence", "Edit sequence" ], - "figure_path": "./MISSQA/figures/1_2410.23843v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.23843v1_figure_1.png", "paperid": "2410.23843v1", - "paper_path": "./MISSQA/papers/2410.23843v1.json", + "paper_path": "./MISS-QA/papers/2410.23843v1.json", "figure_id": "2410.23843v1_figure_1.png", "caption": "Fig. 1: This framework outlines the comprehensive approach to understanding the performance decline of edited models. On the left, traditional knowledge editing tasks are categorized into different types, each with distinct editing objectives: yes/no, a/b/c/d, and entity/event. On the right, our experiments are structured from both data and model perspectives. From the data perspective, we conduct three experiments: (a) a comprehensive performance evaluation of the model, (b) the construction of a Multi-Question Dataset (MQD), and (c) an assessment of the impact of editing different target outputs on model performance. From the model perspective, we design four experiments: (d) an evaluation of the edited model’s forgetting ability, (e) an identification of the current knowledge editing method’s bottleneck and an exploration of the correlation between editing probability values and parameter layer norms, and (f) a proposal of a sequence editing method, which effectively enhances the performance of the edited model.", "qtype": "Experimental_Results" @@ -3972,9 +3972,9 @@ "Dump for sequence", "Knowledge editing" ], - "figure_path": "./MISSQA/figures/2_2410.23843v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.23843v1_figure_1.png", "paperid": "2410.23843v1", - "paper_path": "./MISSQA/papers/2410.23843v1.json", + "paper_path": "./MISS-QA/papers/2410.23843v1.json", "figure_id": "2410.23843v1_figure_1.png", "caption": "Fig. 1: This framework outlines the comprehensive approach to understanding the performance decline of edited models. On the left, traditional knowledge editing tasks are categorized into different types, each with distinct editing objectives: yes/no, a/b/c/d, and entity/event. On the right, our experiments are structured from both data and model perspectives. From the data perspective, we conduct three experiments: (a) a comprehensive performance evaluation of the model, (b) the construction of a Multi-Question Dataset (MQD), and (c) an assessment of the impact of editing different target outputs on model performance. From the model perspective, we design four experiments: (d) an evaluation of the edited model’s forgetting ability, (e) an identification of the current knowledge editing method’s bottleneck and an exploration of the correlation between editing probability values and parameter layer norms, and (f) a proposal of a sequence editing method, which effectively enhances the performance of the edited model.", "qtype": "Others" @@ -3995,9 +3995,9 @@ "Symmetry Hypotheses", "Symmetry Decoder" ], - "figure_path": "./MISSQA/figures/0_2411.17763v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.17763v1_figure_2.png", "paperid": "2411.17763v1", - "paper_path": "./MISSQA/papers/2411.17763v1.json", + "paper_path": "./MISS-QA/papers/2411.17763v1.json", "figure_id": "2411.17763v1_figure_2.png", "caption": "Figure 2: Overview of Reflect3D, our zero-shot single-image symmetry detector. Top: Our transformer-based feed-forward symmetry detector (Section 3.2) predicts symmetry planes from a single RGB image. Bottom: Our multi-view symmetry enhancement pipeline (Section 3.3) leverages multi-view diffusion to resolve the inherent single-view ambiguity in symmetry detection. Aggregating symmetry predictions from multiple synthesized views results in more precise and comprehensive symmetry predictions.", "qtype": "Others" @@ -4018,9 +4018,9 @@ "Multi-view Diffusion", "Aggregation" ], - "figure_path": "./MISSQA/figures/1_2411.17763v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.17763v1_figure_2.png", "paperid": "2411.17763v1", - "paper_path": "./MISSQA/papers/2411.17763v1.json", + "paper_path": "./MISS-QA/papers/2411.17763v1.json", "figure_id": "2411.17763v1_figure_2.png", "caption": "Figure 2: Overview of Reflect3D, our zero-shot single-image symmetry detector. Top: Our transformer-based feed-forward symmetry detector (Section 3.2) predicts symmetry planes from a single RGB image. Bottom: Our multi-view symmetry enhancement pipeline (Section 3.3) leverages multi-view diffusion to resolve the inherent single-view ambiguity in symmetry detection. Aggregating symmetry predictions from multiple synthesized views results in more precise and comprehensive symmetry predictions.", "qtype": "Others" @@ -4046,9 +4046,9 @@ "masked_elements": [ "Symmetry Hypotheses" ], - "figure_path": "./MISSQA/figures/2_2411.17763v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.17763v1_figure_2.png", "paperid": "2411.17763v1", - "paper_path": "./MISSQA/papers/2411.17763v1.json", + "paper_path": "./MISS-QA/papers/2411.17763v1.json", "figure_id": "2411.17763v1_figure_2.png", "caption": "Figure 2: Overview of Reflect3D, our zero-shot single-image symmetry detector. Top: Our transformer-based feed-forward symmetry detector (Section 3.2) predicts symmetry planes from a single RGB image. Bottom: Our multi-view symmetry enhancement pipeline (Section 3.3) leverages multi-view diffusion to resolve the inherent single-view ambiguity in symmetry detection. Aggregating symmetry predictions from multiple synthesized views results in more precise and comprehensive symmetry predictions.", "qtype": "Design_Rationale" @@ -4075,9 +4075,9 @@ "masked_elements": [ "Multi-view Diffusion" ], - "figure_path": "./MISSQA/figures/3_2411.17763v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.17763v1_figure_2.png", "paperid": "2411.17763v1", - "paper_path": "./MISSQA/papers/2411.17763v1.json", + "paper_path": "./MISS-QA/papers/2411.17763v1.json", "figure_id": "2411.17763v1_figure_2.png", "caption": "Figure 2: Overview of Reflect3D, our zero-shot single-image symmetry detector. Top: Our transformer-based feed-forward symmetry detector (Section 3.2) predicts symmetry planes from a single RGB image. Bottom: Our multi-view symmetry enhancement pipeline (Section 3.3) leverages multi-view diffusion to resolve the inherent single-view ambiguity in symmetry detection. Aggregating symmetry predictions from multiple synthesized views results in more precise and comprehensive symmetry predictions.", "qtype": "Design_Rationale" @@ -4109,9 +4109,9 @@ "FCE module", "P5" ], - "figure_path": "./MISSQA/figures/0_2410.01031v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.01031v2_figure_2.png", "paperid": "2410.01031v2", - "paper_path": "./MISSQA/papers/2410.01031v2.json", + "paper_path": "./MISS-QA/papers/2410.01031v2.json", "figure_id": "2410.01031v2_figure_2.png", "caption": "Figure 2: Network architecture of improved method-1 (M1): adding one FCE module to the Backbone component of YOLOv8.", "qtype": "Design_Rationale" @@ -4132,9 +4132,9 @@ "masked_elements": [ "Concat" ], - "figure_path": "./MISSQA/figures/1_2410.01031v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.01031v2_figure_2.png", "paperid": "2410.01031v2", - "paper_path": "./MISSQA/papers/2410.01031v2.json", + "paper_path": "./MISS-QA/papers/2410.01031v2.json", "figure_id": "2410.01031v2_figure_2.png", "caption": "Figure 2: Network architecture of improved method-1 (M1): adding one FCE module to the Backbone component of YOLOv8.", "qtype": "Design_Rationale" @@ -4156,9 +4156,9 @@ "Upsample", "FCE Module" ], - "figure_path": "./MISSQA/figures/2_2410.01031v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.01031v2_figure_2.png", "paperid": "2410.01031v2", - "paper_path": "./MISSQA/papers/2410.01031v2.json", + "paper_path": "./MISS-QA/papers/2410.01031v2.json", "figure_id": "2410.01031v2_figure_2.png", "caption": "Figure 2: Network architecture of improved method-1 (M1): adding one FCE module to the Backbone component of YOLOv8.", "qtype": "Implementation_Details" @@ -4179,9 +4179,9 @@ "masked_elements": [ "C2f" ], - "figure_path": "./MISSQA/figures/3_2410.01031v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.01031v2_figure_2.png", "paperid": "2410.01031v2", - "paper_path": "./MISSQA/papers/2410.01031v2.json", + "paper_path": "./MISS-QA/papers/2410.01031v2.json", "figure_id": "2410.01031v2_figure_2.png", "caption": "Figure 2: Network architecture of improved method-1 (M1): adding one FCE module to the Backbone component of YOLOv8.", "qtype": "Implementation_Details" @@ -4202,9 +4202,9 @@ "masked_elements": [ "Cross Modality Alignment / Translation Module" ], - "figure_path": "./MISSQA/figures/0_2407.05036v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.05036v1_figure_1.png", "paperid": "2407.05036v1", - "paper_path": "./MISSQA/papers/2407.05036v1.json", + "paper_path": "./MISS-QA/papers/2407.05036v1.json", "figure_id": "2407.05036v1_figure_1.png", "caption": "Figure 1: Text-centric multimodal alignment, which converts different modalities into text to serve as input prompts for LLMs, is a common method for aligning large multimodal language models when pairwise multimodal data is limited.", "qtype": "Implementation_Details" @@ -4225,9 +4225,9 @@ "LLM", "Text Token Rep." ], - "figure_path": "./MISSQA/figures/1_2407.05036v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.05036v1_figure_1.png", "paperid": "2407.05036v1", - "paper_path": "./MISSQA/papers/2407.05036v1.json", + "paper_path": "./MISS-QA/papers/2407.05036v1.json", "figure_id": "2407.05036v1_figure_1.png", "caption": "Figure 1: Text-centric multimodal alignment, which converts different modalities into text to serve as input prompts for LLMs, is a common method for aligning large multimodal language models when pairwise multimodal data is limited.", "qtype": "Implementation_Details" @@ -4247,9 +4247,9 @@ "masked_elements": [ "Image Encoder" ], - "figure_path": "./MISSQA/figures/2_2407.05036v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.05036v1_figure_1.png", "paperid": "2407.05036v1", - "paper_path": "./MISSQA/papers/2407.05036v1.json", + "paper_path": "./MISS-QA/papers/2407.05036v1.json", "figure_id": "2407.05036v1_figure_1.png", "caption": "Figure 1: Text-centric multimodal alignment, which converts different modalities into text to serve as input prompts for LLMs, is a common method for aligning large multimodal language models when pairwise multimodal data is limited.", "qtype": "Literature_Background" @@ -4269,9 +4269,9 @@ "masked_elements": [ "LLM" ], - "figure_path": "./MISSQA/figures/3_2407.05036v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.05036v1_figure_1.png", "paperid": "2407.05036v1", - "paper_path": "./MISSQA/papers/2407.05036v1.json", + "paper_path": "./MISS-QA/papers/2407.05036v1.json", "figure_id": "2407.05036v1_figure_1.png", "caption": "Figure 1: Text-centric multimodal alignment, which converts different modalities into text to serve as input prompts for LLMs, is a common method for aligning large multimodal language models when pairwise multimodal data is limited.", "qtype": "Literature_Background" @@ -4298,9 +4298,9 @@ "masked_elements": [ "Aggregation" ], - "figure_path": "./MISSQA/figures/0_2411.01825v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.01825v2_figure_2.png", "paperid": "2411.01825v2", - "paper_path": "./MISSQA/papers/2411.01825v2.json", + "paper_path": "./MISS-QA/papers/2411.01825v2.json", "figure_id": "2411.01825v2_figure_2.png", "caption": "Figure 2: Workflow of FedReMa. (1) Local training. (2) Upload model to server. (3) Aggregate global feature extractors. (4) Determine whether the current period is a critical learning period, if so, go to (5), otherwise go to (8). (5) Measure the task relevance 𝒮𝒮\\mathcal{S}caligraphic_S using similarity of their logits, then perform MDS to filter out irrelevant clients. At the same time, the server will record the client’s historical dependencies 𝒢𝒢\\mathcal{G}caligraphic_G. (6) Update CCP status. (7) Aggregate personalized classifiers by 𝒜k(t)superscriptsubscript𝒜𝑘𝑡\\mathcal{A}_{k}^{(t)}caligraphic_A start_POSTSUBSCRIPT italic_k end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_t ) end_POSTSUPERSCRIPT. (8) Aggregate personalized classifiers based on historical dependencies 𝒢𝒢\\mathcal{G}caligraphic_G. (9) Server communicates personalized models to all clients.", "qtype": "Literature_Background" @@ -4328,9 +4328,9 @@ "CCP", "MDS" ], - "figure_path": "./MISSQA/figures/1_2411.01825v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.01825v2_figure_2.png", "paperid": "2411.01825v2", - "paper_path": "./MISSQA/papers/2411.01825v2.json", + "paper_path": "./MISS-QA/papers/2411.01825v2.json", "figure_id": "2411.01825v2_figure_2.png", "caption": "Figure 2: Workflow of FedReMa. (1) Local training. (2) Upload model to server. (3) Aggregate global feature extractors. (4) Determine whether the current period is a critical learning period, if so, go to (5), otherwise go to (8). (5) Measure the task relevance 𝒮𝒮\\mathcal{S}caligraphic_S using similarity of their logits, then perform MDS to filter out irrelevant clients. At the same time, the server will record the client’s historical dependencies 𝒢𝒢\\mathcal{G}caligraphic_G. (6) Update CCP status. (7) Aggregate personalized classifiers by 𝒜k(t)superscriptsubscript𝒜𝑘𝑡\\mathcal{A}_{k}^{(t)}caligraphic_A start_POSTSUBSCRIPT italic_k end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_t ) end_POSTSUPERSCRIPT. (8) Aggregate personalized classifiers based on historical dependencies 𝒢𝒢\\mathcal{G}caligraphic_G. (9) Server communicates personalized models to all clients.", "qtype": "Experimental_Results" @@ -4357,9 +4357,9 @@ "feature extractor aggregation", "personalized classifier aggregation" ], - "figure_path": "./MISSQA/figures/2_2411.01825v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.01825v2_figure_2.png", "paperid": "2411.01825v2", - "paper_path": "./MISSQA/papers/2411.01825v2.json", + "paper_path": "./MISS-QA/papers/2411.01825v2.json", "figure_id": "2411.01825v2_figure_2.png", "caption": "Figure 2: Workflow of FedReMa. (1) Local training. (2) Upload model to server. (3) Aggregate global feature extractors. (4) Determine whether the current period is a critical learning period, if so, go to (5), otherwise go to (8). (5) Measure the task relevance 𝒮𝒮\\mathcal{S}caligraphic_S using similarity of their logits, then perform MDS to filter out irrelevant clients. At the same time, the server will record the client’s historical dependencies 𝒢𝒢\\mathcal{G}caligraphic_G. (6) Update CCP status. (7) Aggregate personalized classifiers by 𝒜k(t)superscriptsubscript𝒜𝑘𝑡\\mathcal{A}_{k}^{(t)}caligraphic_A start_POSTSUBSCRIPT italic_k end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ( italic_t ) end_POSTSUPERSCRIPT. (8) Aggregate personalized classifiers based on historical dependencies 𝒢𝒢\\mathcal{G}caligraphic_G. (9) Server communicates personalized models to all clients.", "qtype": "Experimental_Results" @@ -4386,9 +4386,9 @@ "masked_elements": [ "Update" ], - "figure_path": "./MISSQA/figures/0_2411.17788v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.17788v1_figure_1.png", "paperid": "2411.17788v1", - "paper_path": "./MISSQA/papers/2411.17788v1.json", + "paper_path": "./MISS-QA/papers/2411.17788v1.json", "figure_id": "2411.17788v1_figure_1.png", "caption": "Figure 1: Overview of our model architecture. Given the point clouds of each part, we first use a feature extractor to generate part features and pairwise features. These features, along with the initial poses, are updated in a stack of geometric point attention modules. The predicted poses and positions are recycled for the next round of predictions in the geometric recycling module.", "qtype": "Experimental_Results" @@ -4416,9 +4416,9 @@ "Part Attention", "Pair Attention" ], - "figure_path": "./MISSQA/figures/1_2411.17788v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.17788v1_figure_1.png", "paperid": "2411.17788v1", - "paper_path": "./MISSQA/papers/2411.17788v1.json", + "paper_path": "./MISS-QA/papers/2411.17788v1.json", "figure_id": "2411.17788v1_figure_1.png", "caption": "Figure 1: Overview of our model architecture. Given the point clouds of each part, we first use a feature extractor to generate part features and pairwise features. These features, along with the initial poses, are updated in a stack of geometric point attention modules. The predicted poses and positions are recycled for the next round of predictions in the geometric recycling module.", "qtype": "Experimental_Results" @@ -4437,9 +4437,9 @@ "masked_elements": [ "Feature Extractor" ], - "figure_path": "./MISSQA/figures/2_2411.17788v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.17788v1_figure_1.png", "paperid": "2411.17788v1", - "paper_path": "./MISSQA/papers/2411.17788v1.json", + "paper_path": "./MISS-QA/papers/2411.17788v1.json", "figure_id": "2411.17788v1_figure_1.png", "caption": "Figure 1: Overview of our model architecture. Given the point clouds of each part, we first use a feature extractor to generate part features and pairwise features. These features, along with the initial poses, are updated in a stack of geometric point attention modules. The predicted poses and positions are recycled for the next round of predictions in the geometric recycling module.", "qtype": "Others" @@ -4470,9 +4470,9 @@ "masked_elements": [ "Negative Sampler" ], - "figure_path": "./MISSQA/figures/0_2409.07237v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.07237v1_figure_1.png", "paperid": "2409.07237v1", - "paper_path": "./MISSQA/papers/2409.07237v1.json", + "paper_path": "./MISS-QA/papers/2409.07237v1.json", "figure_id": "2409.07237v1_figure_1.png", "caption": "Figure 1. Illustration of the training stage of a personalized recommender system.", "qtype": "Others" @@ -4492,9 +4492,9 @@ "masked_elements": [ "BPR Loss" ], - "figure_path": "./MISSQA/figures/1_2409.07237v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.07237v1_figure_1.png", "paperid": "2409.07237v1", - "paper_path": "./MISSQA/papers/2409.07237v1.json", + "paper_path": "./MISS-QA/papers/2409.07237v1.json", "figure_id": "2409.07237v1_figure_1.png", "caption": "Figure 1. Illustration of the training stage of a personalized recommender system.", "qtype": "Others" @@ -4525,9 +4525,9 @@ "masked_elements": [ "Negative Sampler" ], - "figure_path": "./MISSQA/figures/2_2409.07237v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2409.07237v1_figure_1.png", "paperid": "2409.07237v1", - "paper_path": "./MISSQA/papers/2409.07237v1.json", + "paper_path": "./MISS-QA/papers/2409.07237v1.json", "figure_id": "2409.07237v1_figure_1.png", "caption": "Figure 1. Illustration of the training stage of a personalized recommender system.", "qtype": "Design_Rationale" @@ -4547,9 +4547,9 @@ "masked_elements": [ "Recommender" ], - "figure_path": "./MISSQA/figures/3_2409.07237v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2409.07237v1_figure_1.png", "paperid": "2409.07237v1", - "paper_path": "./MISSQA/papers/2409.07237v1.json", + "paper_path": "./MISS-QA/papers/2409.07237v1.json", "figure_id": "2409.07237v1_figure_1.png", "caption": "Figure 1. Illustration of the training stage of a personalized recommender system.", "qtype": "Design_Rationale" @@ -4576,9 +4576,9 @@ "Local Hard Negative Sampling", "single-label classification" ], - "figure_path": "./MISSQA/figures/0_2408.05786v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.05786v1_figure_2.png", "paperid": "2408.05786v1", - "paper_path": "./MISSQA/papers/2408.05786v1.json", + "paper_path": "./MISS-QA/papers/2408.05786v1.json", "figure_id": "2408.05786v1_figure_2.png", "caption": "Figure 2: Illustration of HiLight. Given an input text, HiLight infers label probabilities by a text encoder and a multi-label classification head. With the inferred label probabilities and positive labels, HiLight conducts label space learning with MLC and HiLCL task. HiLCL is our proposed method and it divides the multi-label classification learning into multiple single-label classification learning. Then, HiLCL improves contrastive learning on each single-label classification learning with Local Hard Negative Sampling, which introduces negative labels from sibling and descendant label set of the positive label. Those negative labels outside the sibling and descendant label set are masked out during learning. HiLCL schedules learning with Hierarchical Learning strategy, which adopts a fine-to-coarse learning strategy to improve the discrimination of finest-grained labels.", "qtype": "Design_Rationale" @@ -4605,9 +4605,9 @@ "masked_elements": [ "Hierarchical Learning" ], - "figure_path": "./MISSQA/figures/1_2408.05786v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.05786v1_figure_2.png", "paperid": "2408.05786v1", - "paper_path": "./MISSQA/papers/2408.05786v1.json", + "paper_path": "./MISS-QA/papers/2408.05786v1.json", "figure_id": "2408.05786v1_figure_2.png", "caption": "Figure 2: Illustration of HiLight. Given an input text, HiLight infers label probabilities by a text encoder and a multi-label classification head. With the inferred label probabilities and positive labels, HiLight conducts label space learning with MLC and HiLCL task. HiLCL is our proposed method and it divides the multi-label classification learning into multiple single-label classification learning. Then, HiLCL improves contrastive learning on each single-label classification learning with Local Hard Negative Sampling, which introduces negative labels from sibling and descendant label set of the positive label. Those negative labels outside the sibling and descendant label set are masked out during learning. HiLCL schedules learning with Hierarchical Learning strategy, which adopts a fine-to-coarse learning strategy to improve the discrimination of finest-grained labels.", "qtype": "Design_Rationale" @@ -4631,9 +4631,9 @@ "masked_elements": [ "Local Hard Negative Sampling" ], - "figure_path": "./MISSQA/figures/2_2408.05786v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.05786v1_figure_2.png", "paperid": "2408.05786v1", - "paper_path": "./MISSQA/papers/2408.05786v1.json", + "paper_path": "./MISS-QA/papers/2408.05786v1.json", "figure_id": "2408.05786v1_figure_2.png", "caption": "Figure 2: Illustration of HiLight. Given an input text, HiLight infers label probabilities by a text encoder and a multi-label classification head. With the inferred label probabilities and positive labels, HiLight conducts label space learning with MLC and HiLCL task. HiLCL is our proposed method and it divides the multi-label classification learning into multiple single-label classification learning. Then, HiLCL improves contrastive learning on each single-label classification learning with Local Hard Negative Sampling, which introduces negative labels from sibling and descendant label set of the positive label. Those negative labels outside the sibling and descendant label set are masked out during learning. HiLCL schedules learning with Hierarchical Learning strategy, which adopts a fine-to-coarse learning strategy to improve the discrimination of finest-grained labels.", "qtype": "Implementation_Details" @@ -4660,9 +4660,9 @@ "Hierarchical Learning", "HiLCL Task" ], - "figure_path": "./MISSQA/figures/3_2408.05786v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2408.05786v1_figure_2.png", "paperid": "2408.05786v1", - "paper_path": "./MISSQA/papers/2408.05786v1.json", + "paper_path": "./MISS-QA/papers/2408.05786v1.json", "figure_id": "2408.05786v1_figure_2.png", "caption": "Figure 2: Illustration of HiLight. Given an input text, HiLight infers label probabilities by a text encoder and a multi-label classification head. With the inferred label probabilities and positive labels, HiLight conducts label space learning with MLC and HiLCL task. HiLCL is our proposed method and it divides the multi-label classification learning into multiple single-label classification learning. Then, HiLCL improves contrastive learning on each single-label classification learning with Local Hard Negative Sampling, which introduces negative labels from sibling and descendant label set of the positive label. Those negative labels outside the sibling and descendant label set are masked out during learning. HiLCL schedules learning with Hierarchical Learning strategy, which adopts a fine-to-coarse learning strategy to improve the discrimination of finest-grained labels.", "qtype": "Implementation_Details" @@ -4689,9 +4689,9 @@ "Projection according to position x,y", "Adding perturbation" ], - "figure_path": "./MISSQA/figures/0_2408.12727v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.12727v1_figure_2.png", "paperid": "2408.12727v1", - "paper_path": "./MISSQA/papers/2408.12727v1.json", + "paper_path": "./MISS-QA/papers/2408.12727v1.json", "figure_id": "2408.12727v1_figure_2.png", "caption": "Figure 2: Generating perturbed features by iterating ①–⑥ to induce ID switch between two objects in BankTweak: a focus on the feature extractor during the association phase.", "qtype": "Implementation_Details" @@ -4711,9 +4711,9 @@ "masked_elements": [ "Feature extractor E(·)" ], - "figure_path": "./MISSQA/figures/1_2408.12727v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.12727v1_figure_2.png", "paperid": "2408.12727v1", - "paper_path": "./MISSQA/papers/2408.12727v1.json", + "paper_path": "./MISS-QA/papers/2408.12727v1.json", "figure_id": "2408.12727v1_figure_2.png", "caption": "Figure 2: Generating perturbed features by iterating ①–⑥ to induce ID switch between two objects in BankTweak: a focus on the feature extractor during the association phase.", "qtype": "Literature_Background" @@ -4740,9 +4740,9 @@ "Detector D(·)", "Adding perturbation" ], - "figure_path": "./MISSQA/figures/2_2408.12727v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.12727v1_figure_2.png", "paperid": "2408.12727v1", - "paper_path": "./MISSQA/papers/2408.12727v1.json", + "paper_path": "./MISS-QA/papers/2408.12727v1.json", "figure_id": "2408.12727v1_figure_2.png", "caption": "Figure 2: Generating perturbed features by iterating ①–⑥ to induce ID switch between two objects in BankTweak: a focus on the feature extractor during the association phase.", "qtype": "Literature_Background" @@ -4767,9 +4767,9 @@ "masked_elements": [ "sketch attention" ], - "figure_path": "./MISSQA/figures/0_2411.16199v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.16199v2_figure_2.png", "paperid": "2411.16199v2", - "paper_path": "./MISSQA/papers/2411.16199v2.json", + "paper_path": "./MISS-QA/papers/2411.16199v2.json", "figure_id": "2411.16199v2_figure_2.png", "caption": "Figure 2: The pipeline of VIRES model.\n(a) The input video clip and corresponding text descriptions are encoded using independent encoders.\nNoise is selectively added to the latent code according to the instance mask.\n(b) This noised latent code is fed into the denoising network, composed of staked transformer blocks with spatial and temporal self-attention modules, trained to reverse the diffusion process.\n(c) The Sequential ControlNet extracts structure layouts and injects them into the denoising network with the standardized self-scaling.\n(d) The sketch attention is implemented as a parallel branch within the spatial self-attention module, injecting structure semantics into subsequent transformer blocks.\n(e) A sketch-aware encoder additionally provides multi-level texture features during decoding, generating the repainted video clip from the denoised latent code.", "qtype": "Literature_Background" @@ -4795,9 +4795,9 @@ "masked_elements": [ "standardized self-scaling" ], - "figure_path": "./MISSQA/figures/1_2411.16199v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.16199v2_figure_2.png", "paperid": "2411.16199v2", - "paper_path": "./MISSQA/papers/2411.16199v2.json", + "paper_path": "./MISS-QA/papers/2411.16199v2.json", "figure_id": "2411.16199v2_figure_2.png", "caption": "Figure 2: The pipeline of VIRES model.\n(a) The input video clip and corresponding text descriptions are encoded using independent encoders.\nNoise is selectively added to the latent code according to the instance mask.\n(b) This noised latent code is fed into the denoising network, composed of staked transformer blocks with spatial and temporal self-attention modules, trained to reverse the diffusion process.\n(c) The Sequential ControlNet extracts structure layouts and injects them into the denoising network with the standardized self-scaling.\n(d) The sketch attention is implemented as a parallel branch within the spatial self-attention module, injecting structure semantics into subsequent transformer blocks.\n(e) A sketch-aware encoder additionally provides multi-level texture features during decoding, generating the repainted video clip from the denoised latent code.", "qtype": "Literature_Background" @@ -4823,9 +4823,9 @@ "masked_elements": [ "standardized self-scaling" ], - "figure_path": "./MISSQA/figures/2_2411.16199v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.16199v2_figure_2.png", "paperid": "2411.16199v2", - "paper_path": "./MISSQA/papers/2411.16199v2.json", + "paper_path": "./MISS-QA/papers/2411.16199v2.json", "figure_id": "2411.16199v2_figure_2.png", "caption": "Figure 2: The pipeline of VIRES model.\n(a) The input video clip and corresponding text descriptions are encoded using independent encoders.\nNoise is selectively added to the latent code according to the instance mask.\n(b) This noised latent code is fed into the denoising network, composed of staked transformer blocks with spatial and temporal self-attention modules, trained to reverse the diffusion process.\n(c) The Sequential ControlNet extracts structure layouts and injects them into the denoising network with the standardized self-scaling.\n(d) The sketch attention is implemented as a parallel branch within the spatial self-attention module, injecting structure semantics into subsequent transformer blocks.\n(e) A sketch-aware encoder additionally provides multi-level texture features during decoding, generating the repainted video clip from the denoised latent code.", "qtype": "Literature_Background" @@ -4850,9 +4850,9 @@ "masked_elements": [ "sketch attention" ], - "figure_path": "./MISSQA/figures/3_2411.16199v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.16199v2_figure_2.png", "paperid": "2411.16199v2", - "paper_path": "./MISSQA/papers/2411.16199v2.json", + "paper_path": "./MISS-QA/papers/2411.16199v2.json", "figure_id": "2411.16199v2_figure_2.png", "caption": "Figure 2: The pipeline of VIRES model.\n(a) The input video clip and corresponding text descriptions are encoded using independent encoders.\nNoise is selectively added to the latent code according to the instance mask.\n(b) This noised latent code is fed into the denoising network, composed of staked transformer blocks with spatial and temporal self-attention modules, trained to reverse the diffusion process.\n(c) The Sequential ControlNet extracts structure layouts and injects them into the denoising network with the standardized self-scaling.\n(d) The sketch attention is implemented as a parallel branch within the spatial self-attention module, injecting structure semantics into subsequent transformer blocks.\n(e) A sketch-aware encoder additionally provides multi-level texture features during decoding, generating the repainted video clip from the denoised latent code.", "qtype": "Literature_Background" @@ -4877,9 +4877,9 @@ "masked_elements": [ "Reverse Distillation" ], - "figure_path": "./MISSQA/figures/0_2407.08239v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.08239v1_figure_2.png", "paperid": "2407.08239v1", - "paper_path": "./MISSQA/papers/2407.08239v1.json", + "paper_path": "./MISS-QA/papers/2407.08239v1.json", "figure_id": "2407.08239v1_figure_2.png", "caption": "Figure 2: The workflow of SDF: On the left is our process of training diverse experts through reverse knowledge distillation. On the right is the process of mining the most informative samples in the target domain using diverse experts. Below is our process for generating labels for these most informative samples, which ultimately participate in the training.", "qtype": "Experimental_Results" @@ -4899,9 +4899,9 @@ "masked_elements": [ "multi-perspective sampling" ], - "figure_path": "./MISSQA/figures/0_2407.08223v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.08223v1_figure_1.png", "paperid": "2407.08223v1", - "paper_path": "./MISSQA/papers/2407.08223v1.json", + "paper_path": "./MISS-QA/papers/2407.08223v1.json", "figure_id": "2407.08223v1_figure_1.png", "caption": "Figure 1: Illustration of different RAG approaches. Given a knowledge-intensive query Q𝑄Qitalic_Q and retrieved documents, (a) Standard RAG incorporates all documents into the prompt, increasing input length and slowing inference; (b) Self-Reflective RAG (Asai et al., 2023) requires specialized instruction-tuning of the general-purpose language model (LM) to generate specific tags for self-reflection; (c) Corrective RAG (Yan et al., 2024) employs an external retrieval evaluator to refine document quality, focusing solely on contextual information without enhancing reasoning capabilities; (d) In contrast, our proposed Speculative RAG leverages a larger generalist LM to efficiently verify multiple RAG drafts produced in parallel by a smaller, specialized LM. Each draft is generated from a distinct subset of retrieved documents, providing diverse perspectives on the evidence while minimizing the number of input tokens per draft.", "qtype": "Others" @@ -4927,9 +4927,9 @@ "masked_elements": [ "specialist RAG drafter" ], - "figure_path": "./MISSQA/figures/1_2407.08223v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.08223v1_figure_1.png", "paperid": "2407.08223v1", - "paper_path": "./MISSQA/papers/2407.08223v1.json", + "paper_path": "./MISS-QA/papers/2407.08223v1.json", "figure_id": "2407.08223v1_figure_1.png", "caption": "Figure 1: Illustration of different RAG approaches. Given a knowledge-intensive query Q𝑄Qitalic_Q and retrieved documents, (a) Standard RAG incorporates all documents into the prompt, increasing input length and slowing inference; (b) Self-Reflective RAG (Asai et al., 2023) requires specialized instruction-tuning of the general-purpose language model (LM) to generate specific tags for self-reflection; (c) Corrective RAG (Yan et al., 2024) employs an external retrieval evaluator to refine document quality, focusing solely on contextual information without enhancing reasoning capabilities; (d) In contrast, our proposed Speculative RAG leverages a larger generalist LM to efficiently verify multiple RAG drafts produced in parallel by a smaller, specialized LM. Each draft is generated from a distinct subset of retrieved documents, providing diverse perspectives on the evidence while minimizing the number of input tokens per draft.", "qtype": "Design_Rationale" @@ -4954,9 +4954,9 @@ "masked_elements": [ "generalist LM" ], - "figure_path": "./MISSQA/figures/2_2407.08223v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.08223v1_figure_1.png", "paperid": "2407.08223v1", - "paper_path": "./MISSQA/papers/2407.08223v1.json", + "paper_path": "./MISS-QA/papers/2407.08223v1.json", "figure_id": "2407.08223v1_figure_1.png", "caption": "Figure 1: Illustration of different RAG approaches. Given a knowledge-intensive query Q𝑄Qitalic_Q and retrieved documents, (a) Standard RAG incorporates all documents into the prompt, increasing input length and slowing inference; (b) Self-Reflective RAG (Asai et al., 2023) requires specialized instruction-tuning of the general-purpose language model (LM) to generate specific tags for self-reflection; (c) Corrective RAG (Yan et al., 2024) employs an external retrieval evaluator to refine document quality, focusing solely on contextual information without enhancing reasoning capabilities; (d) In contrast, our proposed Speculative RAG leverages a larger generalist LM to efficiently verify multiple RAG drafts produced in parallel by a smaller, specialized LM. Each draft is generated from a distinct subset of retrieved documents, providing diverse perspectives on the evidence while minimizing the number of input tokens per draft.", "qtype": "Design_Rationale" @@ -4983,9 +4983,9 @@ "masked_elements": [ "optimized weight clustering" ], - "figure_path": "./MISSQA/figures/0_2409.10918v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.10918v1_figure_1.png", "paperid": "2409.10918v1", - "paper_path": "./MISSQA/papers/2409.10918v1.json", + "paper_path": "./MISS-QA/papers/2409.10918v1.json", "figure_id": "2409.10918v1_figure_1.png", "caption": "Figure 1: Overview of conventional Few-shot learning pipeline with multilayer perceptron (MLP) search and proposed FSL-HDnn pipeline.", "qtype": "Implementation_Details" @@ -5010,9 +5010,9 @@ "masked_elements": [ "HV encoder" ], - "figure_path": "./MISSQA/figures/1_2409.10918v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.10918v1_figure_1.png", "paperid": "2409.10918v1", - "paper_path": "./MISSQA/papers/2409.10918v1.json", + "paper_path": "./MISS-QA/papers/2409.10918v1.json", "figure_id": "2409.10918v1_figure_1.png", "caption": "Figure 1: Overview of conventional Few-shot learning pipeline with multilayer perceptron (MLP) search and proposed FSL-HDnn pipeline.", "qtype": "Implementation_Details" @@ -5033,9 +5033,9 @@ "Semi-supervised Learning", "Discriminator" ], - "figure_path": "./MISSQA/figures/0_2407.03640v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.03640v1_figure_1.png", "paperid": "2407.03640v1", - "paper_path": "./MISSQA/papers/2407.03640v1.json", + "paper_path": "./MISS-QA/papers/2407.03640v1.json", "figure_id": "2407.03640v1_figure_1.png", "caption": "Figure 1: Schematic Diagram of Generation technology for Emotion Recognition. 444\n\nFacial image is sourced from https://zenodo.org/records/1188976.\nFeature extraction module is adapted from is from https://medium.com/clique-org/how-to-create-a-face-recognition-model-using-facenet-keras-fd65c0b092f1.\nDimensional emotion models can be found at [33].\nData augmentation schematic is from https://www.baeldung.com/cs/ml-gan-data-augmentation.\nSemi-supervised learning schematic is provided by [34].\nCross-domain image is from [35].\nSchematics of each of the four generative models are taken from https://www.compthree.com/blog/autoencoder/, https://www.javatpoint.com/generative-adversarial-network,\nhttps://towardsdatascience.com/diffusion-models-made-easy-8414298ce4da,\nhttps://engineeringprompts.substack.com/p/frameworks-to-build-llm-applications", "qtype": "Implementation_Details" @@ -5063,9 +5063,9 @@ "GAN", "Semi-supervised Learning" ], - "figure_path": "./MISSQA/figures/1_2407.03640v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.03640v1_figure_1.png", "paperid": "2407.03640v1", - "paper_path": "./MISSQA/papers/2407.03640v1.json", + "paper_path": "./MISS-QA/papers/2407.03640v1.json", "figure_id": "2407.03640v1_figure_1.png", "caption": "Figure 1: Schematic Diagram of Generation technology for Emotion Recognition. 444\n\nFacial image is sourced from https://zenodo.org/records/1188976.\nFeature extraction module is adapted from is from https://medium.com/clique-org/how-to-create-a-face-recognition-model-using-facenet-keras-fd65c0b092f1.\nDimensional emotion models can be found at [33].\nData augmentation schematic is from https://www.baeldung.com/cs/ml-gan-data-augmentation.\nSemi-supervised learning schematic is provided by [34].\nCross-domain image is from [35].\nSchematics of each of the four generative models are taken from https://www.compthree.com/blog/autoencoder/, https://www.javatpoint.com/generative-adversarial-network,\nhttps://towardsdatascience.com/diffusion-models-made-easy-8414298ce4da,\nhttps://engineeringprompts.substack.com/p/frameworks-to-build-llm-applications", "qtype": "Literature_Background" @@ -5085,9 +5085,9 @@ "masked_elements": [ "Diffusion Model" ], - "figure_path": "./MISSQA/figures/2_2407.03640v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.03640v1_figure_1.png", "paperid": "2407.03640v1", - "paper_path": "./MISSQA/papers/2407.03640v1.json", + "paper_path": "./MISS-QA/papers/2407.03640v1.json", "figure_id": "2407.03640v1_figure_1.png", "caption": "Figure 1: Schematic Diagram of Generation technology for Emotion Recognition. 444\n\nFacial image is sourced from https://zenodo.org/records/1188976.\nFeature extraction module is adapted from is from https://medium.com/clique-org/how-to-create-a-face-recognition-model-using-facenet-keras-fd65c0b092f1.\nDimensional emotion models can be found at [33].\nData augmentation schematic is from https://www.baeldung.com/cs/ml-gan-data-augmentation.\nSemi-supervised learning schematic is provided by [34].\nCross-domain image is from [35].\nSchematics of each of the four generative models are taken from https://www.compthree.com/blog/autoencoder/, https://www.javatpoint.com/generative-adversarial-network,\nhttps://towardsdatascience.com/diffusion-models-made-easy-8414298ce4da,\nhttps://engineeringprompts.substack.com/p/frameworks-to-build-llm-applications", "qtype": "Literature_Background" @@ -5112,9 +5112,9 @@ "masked_elements": [ "depth-aware motion segmentation" ], - "figure_path": "./MISSQA/figures/0_2411.13291v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.13291v1_figure_2.png", "paperid": "2411.13291v1", - "paper_path": "./MISSQA/papers/2411.13291v1.json", + "paper_path": "./MISS-QA/papers/2411.13291v1.json", "figure_id": "2411.13291v1_figure_2.png", "caption": "Figure 2: DATAP-SfM pipeline. Given monocular videos as input with consistent video depth (optional), DATAP can simultaneously estimate long-term point tracking with visible and dynamic characteristics. Incorporating with DATAP, we formalize a concise, elegant, and robust pipeline of structure from motion by performing global bundle adjustment for point tracking classified as static and visible.", "qtype": "Literature_Background" @@ -5137,9 +5137,9 @@ "masked_elements": [ "Global BA" ], - "figure_path": "./MISSQA/figures/1_2411.13291v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.13291v1_figure_2.png", "paperid": "2411.13291v1", - "paper_path": "./MISSQA/papers/2411.13291v1.json", + "paper_path": "./MISS-QA/papers/2411.13291v1.json", "figure_id": "2411.13291v1_figure_2.png", "caption": "Figure 2: DATAP-SfM pipeline. Given monocular videos as input with consistent video depth (optional), DATAP can simultaneously estimate long-term point tracking with visible and dynamic characteristics. Incorporating with DATAP, we formalize a concise, elegant, and robust pipeline of structure from motion by performing global bundle adjustment for point tracking classified as static and visible.", "qtype": "Literature_Background" @@ -5169,9 +5169,9 @@ "consistent video depth", "depth-aware motion segmentation" ], - "figure_path": "./MISSQA/figures/2_2411.13291v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.13291v1_figure_2.png", "paperid": "2411.13291v1", - "paper_path": "./MISSQA/papers/2411.13291v1.json", + "paper_path": "./MISS-QA/papers/2411.13291v1.json", "figure_id": "2411.13291v1_figure_2.png", "caption": "Figure 2: DATAP-SfM pipeline. Given monocular videos as input with consistent video depth (optional), DATAP can simultaneously estimate long-term point tracking with visible and dynamic characteristics. Incorporating with DATAP, we formalize a concise, elegant, and robust pipeline of structure from motion by performing global bundle adjustment for point tracking classified as static and visible.", "qtype": "Experimental_Results" @@ -5199,9 +5199,9 @@ "masked_elements": [ "iterative transformer updates" ], - "figure_path": "./MISSQA/figures/3_2411.13291v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.13291v1_figure_2.png", "paperid": "2411.13291v1", - "paper_path": "./MISSQA/papers/2411.13291v1.json", + "paper_path": "./MISS-QA/papers/2411.13291v1.json", "figure_id": "2411.13291v1_figure_2.png", "caption": "Figure 2: DATAP-SfM pipeline. Given monocular videos as input with consistent video depth (optional), DATAP can simultaneously estimate long-term point tracking with visible and dynamic characteristics. Incorporating with DATAP, we formalize a concise, elegant, and robust pipeline of structure from motion by performing global bundle adjustment for point tracking classified as static and visible.", "qtype": "Experimental_Results" @@ -5226,9 +5226,9 @@ "masked_elements": [ "Interface" ], - "figure_path": "./MISSQA/figures/0_2409.07486v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.07486v1_figure_1.png", "paperid": "2409.07486v1", - "paper_path": "./MISSQA/papers/2409.07486v1.json", + "paper_path": "./MISS-QA/papers/2409.07486v1.json", "figure_id": "2409.07486v1_figure_1.png", "caption": "Figure 1: High-Level Overview of MarS.MarS is powered by a generative foundation model (LMM) trained on order-level historical financial market data. During real-time simulation, LMM dynamically generates order series in response to various conditions, including user-submitted interactive orders, vague target scenario descriptions, and current/recent market data. These generated order series, combined with user interactive orders, are matched in a simulated clearing house in real-time, producing fine-grained simulated market trajectories. The flexibility of LMM s order generation enables MarS to support various downstream applications, such as forecasting, detection/warning systems, analysis platforms, and agent training environments.", "qtype": "Experimental_Results" @@ -5256,9 +5256,9 @@ "Ensemble Model", "order model" ], - "figure_path": "./MISSQA/figures/1_2409.07486v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.07486v1_figure_1.png", "paperid": "2409.07486v1", - "paper_path": "./MISSQA/papers/2409.07486v1.json", + "paper_path": "./MISS-QA/papers/2409.07486v1.json", "figure_id": "2409.07486v1_figure_1.png", "caption": "Figure 1: High-Level Overview of MarS.MarS is powered by a generative foundation model (LMM) trained on order-level historical financial market data. During real-time simulation, LMM dynamically generates order series in response to various conditions, including user-submitted interactive orders, vague target scenario descriptions, and current/recent market data. These generated order series, combined with user interactive orders, are matched in a simulated clearing house in real-time, producing fine-grained simulated market trajectories. The flexibility of LMM s order generation enables MarS to support various downstream applications, such as forecasting, detection/warning systems, analysis platforms, and agent training environments.", "qtype": "Experimental_Results" @@ -5277,9 +5277,9 @@ "masked_elements": [ "Order-level Historical Market Data" ], - "figure_path": "./MISSQA/figures/2_2409.07486v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2409.07486v1_figure_1.png", "paperid": "2409.07486v1", - "paper_path": "./MISSQA/papers/2409.07486v1.json", + "paper_path": "./MISS-QA/papers/2409.07486v1.json", "figure_id": "2409.07486v1_figure_1.png", "caption": "Figure 1: High-Level Overview of MarS.MarS is powered by a generative foundation model (LMM) trained on order-level historical financial market data. During real-time simulation, LMM dynamically generates order series in response to various conditions, including user-submitted interactive orders, vague target scenario descriptions, and current/recent market data. These generated order series, combined with user interactive orders, are matched in a simulated clearing house in real-time, producing fine-grained simulated market trajectories. The flexibility of LMM s order generation enables MarS to support various downstream applications, such as forecasting, detection/warning systems, analysis platforms, and agent training environments.", "qtype": "Others" @@ -5299,9 +5299,9 @@ "masked_elements": [ "Simulated Clearing House" ], - "figure_path": "./MISSQA/figures/3_2409.07486v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2409.07486v1_figure_1.png", "paperid": "2409.07486v1", - "paper_path": "./MISSQA/papers/2409.07486v1.json", + "paper_path": "./MISS-QA/papers/2409.07486v1.json", "figure_id": "2409.07486v1_figure_1.png", "caption": "Figure 1: High-Level Overview of MarS.MarS is powered by a generative foundation model (LMM) trained on order-level historical financial market data. During real-time simulation, LMM dynamically generates order series in response to various conditions, including user-submitted interactive orders, vague target scenario descriptions, and current/recent market data. These generated order series, combined with user interactive orders, are matched in a simulated clearing house in real-time, producing fine-grained simulated market trajectories. The flexibility of LMM s order generation enables MarS to support various downstream applications, such as forecasting, detection/warning systems, analysis platforms, and agent training environments.", "qtype": "Others" @@ -5320,9 +5320,9 @@ "masked_elements": [ "Orthogonal Attention (OA)" ], - "figure_path": "./MISSQA/figures/0_2410.07658v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.07658v1_figure_2.png", "paperid": "2410.07658v1", - "paper_path": "./MISSQA/papers/2410.07658v1.json", + "paper_path": "./MISS-QA/papers/2410.07658v1.json", "figure_id": "2410.07658v1_figure_2.png", "caption": "Figure 2: The overall framework of SeMv-3D. SeMv-3D consists of two components: 1) Triplane Prior Learner (TPL) that learns a triplane prior to capture consistent 3D visual details and 2) Semantic-aligned View Synthesizer (SVS) that enhances the alignment between the semantic with 3D content and enables single-step generation of arbitrary views. Here, Orthogonal Attention (OA) focuses on the orthogonal correspondences within the triplane, maintaining triplane consistency and extracting fine-grained features.", "qtype": "Others" @@ -5341,9 +5341,9 @@ "masked_elements": [ "Semantic-aligned View Synthesizer" ], - "figure_path": "./MISSQA/figures/1_2410.07658v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.07658v1_figure_2.png", "paperid": "2410.07658v1", - "paper_path": "./MISSQA/papers/2410.07658v1.json", + "paper_path": "./MISS-QA/papers/2410.07658v1.json", "figure_id": "2410.07658v1_figure_2.png", "caption": "Figure 2: The overall framework of SeMv-3D. SeMv-3D consists of two components: 1) Triplane Prior Learner (TPL) that learns a triplane prior to capture consistent 3D visual details and 2) Semantic-aligned View Synthesizer (SVS) that enhances the alignment between the semantic with 3D content and enables single-step generation of arbitrary views. Here, Orthogonal Attention (OA) focuses on the orthogonal correspondences within the triplane, maintaining triplane consistency and extracting fine-grained features.", "qtype": "Others" @@ -5370,9 +5370,9 @@ "Object Retention", "Triplane Orthogonalization" ], - "figure_path": "./MISSQA/figures/2_2410.07658v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.07658v1_figure_2.png", "paperid": "2410.07658v1", - "paper_path": "./MISSQA/papers/2410.07658v1.json", + "paper_path": "./MISS-QA/papers/2410.07658v1.json", "figure_id": "2410.07658v1_figure_2.png", "caption": "Figure 2: The overall framework of SeMv-3D. SeMv-3D consists of two components: 1) Triplane Prior Learner (TPL) that learns a triplane prior to capture consistent 3D visual details and 2) Semantic-aligned View Synthesizer (SVS) that enhances the alignment between the semantic with 3D content and enables single-step generation of arbitrary views. Here, Orthogonal Attention (OA) focuses on the orthogonal correspondences within the triplane, maintaining triplane consistency and extracting fine-grained features.", "qtype": "Design_Rationale" @@ -5398,9 +5398,9 @@ "masked_elements": [ "Cross Attn" ], - "figure_path": "./MISSQA/figures/3_2410.07658v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.07658v1_figure_2.png", "paperid": "2410.07658v1", - "paper_path": "./MISSQA/papers/2410.07658v1.json", + "paper_path": "./MISS-QA/papers/2410.07658v1.json", "figure_id": "2410.07658v1_figure_2.png", "caption": "Figure 2: The overall framework of SeMv-3D. SeMv-3D consists of two components: 1) Triplane Prior Learner (TPL) that learns a triplane prior to capture consistent 3D visual details and 2) Semantic-aligned View Synthesizer (SVS) that enhances the alignment between the semantic with 3D content and enables single-step generation of arbitrary views. Here, Orthogonal Attention (OA) focuses on the orthogonal correspondences within the triplane, maintaining triplane consistency and extracting fine-grained features.", "qtype": "Design_Rationale" @@ -5426,9 +5426,9 @@ "Spatial Calibration", "Depth Estimation" ], - "figure_path": "./MISSQA/figures/0_2411.00527v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.00527v2_figure_1.png", "paperid": "2411.00527v2", - "paper_path": "./MISSQA/papers/2411.00527v2.json", + "paper_path": "./MISS-QA/papers/2411.00527v2.json", "figure_id": "2411.00527v2_figure_1.png", "caption": "Figure 1. \nRecent developments for near-field imaging radars enabled the acquisition of high-resolution depth images, and the sensors are now increasingly gaining attention as complementary modalities to optical depth sensing. Direct comparisons from our MAROON dataset, however, highlight significant differences between radar and optical reconstructions. This work employs the collected multimodal data of four depth imagers, depicted on the left, to systematically characterize these fundamental differences together with sensor-specific findings in a joint evaluation framework.", "qtype": "Design_Rationale" @@ -5449,9 +5449,9 @@ "Joint Alignment", "Depth Deviation Measurement" ], - "figure_path": "./MISSQA/figures/1_2411.00527v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.00527v2_figure_1.png", "paperid": "2411.00527v2", - "paper_path": "./MISSQA/papers/2411.00527v2.json", + "paper_path": "./MISS-QA/papers/2411.00527v2.json", "figure_id": "2411.00527v2_figure_1.png", "caption": "Figure 1. \nRecent developments for near-field imaging radars enabled the acquisition of high-resolution depth images, and the sensors are now increasingly gaining attention as complementary modalities to optical depth sensing. Direct comparisons from our MAROON dataset, however, highlight significant differences between radar and optical reconstructions. This work employs the collected multimodal data of four depth imagers, depicted on the left, to systematically characterize these fundamental differences together with sensor-specific findings in a joint evaluation framework.", "qtype": "Design_Rationale" @@ -5470,9 +5470,9 @@ "masked_elements": [ "Spatial Calibration" ], - "figure_path": "./MISSQA/figures/2_2411.00527v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.00527v2_figure_1.png", "paperid": "2411.00527v2", - "paper_path": "./MISSQA/papers/2411.00527v2.json", + "paper_path": "./MISS-QA/papers/2411.00527v2.json", "figure_id": "2411.00527v2_figure_1.png", "caption": "Figure 1. \nRecent developments for near-field imaging radars enabled the acquisition of high-resolution depth images, and the sensors are now increasingly gaining attention as complementary modalities to optical depth sensing. Direct comparisons from our MAROON dataset, however, highlight significant differences between radar and optical reconstructions. This work employs the collected multimodal data of four depth imagers, depicted on the left, to systematically characterize these fundamental differences together with sensor-specific findings in a joint evaluation framework.", "qtype": "Implementation_Details" @@ -5493,9 +5493,9 @@ "Depth Estimation", "Optical Segmentation" ], - "figure_path": "./MISSQA/figures/3_2411.00527v2_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.00527v2_figure_1.png", "paperid": "2411.00527v2", - "paper_path": "./MISSQA/papers/2411.00527v2.json", + "paper_path": "./MISS-QA/papers/2411.00527v2.json", "figure_id": "2411.00527v2_figure_1.png", "caption": "Figure 1. \nRecent developments for near-field imaging radars enabled the acquisition of high-resolution depth images, and the sensors are now increasingly gaining attention as complementary modalities to optical depth sensing. Direct comparisons from our MAROON dataset, however, highlight significant differences between radar and optical reconstructions. This work employs the collected multimodal data of four depth imagers, depicted on the left, to systematically characterize these fundamental differences together with sensor-specific findings in a joint evaluation framework.", "qtype": "Implementation_Details" @@ -5521,9 +5521,9 @@ "masked_elements": [ "loss surface" ], - "figure_path": "./MISSQA/figures/0_2411.05663v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.05663v1_figure_1.png", "paperid": "2411.05663v1", - "paper_path": "./MISSQA/papers/2411.05663v1.json", + "paper_path": "./MISS-QA/papers/2411.05663v1.json", "figure_id": "2411.05663v1_figure_1.png", "caption": "Figure 1: The overview of Online-LoRA. As the data is continuously streamed (a), a new pair of trainable LoRA parameters (A4,B4subscript𝐴4subscript𝐵4A_{4},B_{4}italic_A start_POSTSUBSCRIPT 4 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 4 end_POSTSUBSCRIPT) is added (b) every time the loss surface encounters a plateau (c). Subsequently, the previous LoRA parameters (A1,B1;A2,B2;A3,B3subscript𝐴1subscript𝐵1subscript𝐴2subscript𝐵2subscript𝐴3subscript𝐵3A_{1},B_{1};A_{2},B_{2};A_{3},B_{3}italic_A start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT ; italic_A start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT ; italic_A start_POSTSUBSCRIPT 3 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 3 end_POSTSUBSCRIPT) are frozen (the lock sign in (b)) and merged to the weights of the pre-trained ViT model.", "qtype": "Implementation_Details" @@ -5551,9 +5551,9 @@ "A4", "B4" ], - "figure_path": "./MISSQA/figures/1_2411.05663v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.05663v1_figure_1.png", "paperid": "2411.05663v1", - "paper_path": "./MISSQA/papers/2411.05663v1.json", + "paper_path": "./MISS-QA/papers/2411.05663v1.json", "figure_id": "2411.05663v1_figure_1.png", "caption": "Figure 1: The overview of Online-LoRA. As the data is continuously streamed (a), a new pair of trainable LoRA parameters (A4,B4subscript𝐴4subscript𝐵4A_{4},B_{4}italic_A start_POSTSUBSCRIPT 4 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 4 end_POSTSUBSCRIPT) is added (b) every time the loss surface encounters a plateau (c). Subsequently, the previous LoRA parameters (A1,B1;A2,B2;A3,B3subscript𝐴1subscript𝐵1subscript𝐴2subscript𝐵2subscript𝐴3subscript𝐵3A_{1},B_{1};A_{2},B_{2};A_{3},B_{3}italic_A start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT ; italic_A start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT ; italic_A start_POSTSUBSCRIPT 3 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 3 end_POSTSUBSCRIPT) are frozen (the lock sign in (b)) and merged to the weights of the pre-trained ViT model.", "qtype": "Literature_Background" @@ -5581,9 +5581,9 @@ "frozen LoRA weights", "pre-trained model weights" ], - "figure_path": "./MISSQA/figures/2_2411.05663v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.05663v1_figure_1.png", "paperid": "2411.05663v1", - "paper_path": "./MISSQA/papers/2411.05663v1.json", + "paper_path": "./MISS-QA/papers/2411.05663v1.json", "figure_id": "2411.05663v1_figure_1.png", "caption": "Figure 1: The overview of Online-LoRA. As the data is continuously streamed (a), a new pair of trainable LoRA parameters (A4,B4subscript𝐴4subscript𝐵4A_{4},B_{4}italic_A start_POSTSUBSCRIPT 4 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 4 end_POSTSUBSCRIPT) is added (b) every time the loss surface encounters a plateau (c). Subsequently, the previous LoRA parameters (A1,B1;A2,B2;A3,B3subscript𝐴1subscript𝐵1subscript𝐴2subscript𝐵2subscript𝐴3subscript𝐵3A_{1},B_{1};A_{2},B_{2};A_{3},B_{3}italic_A start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 1 end_POSTSUBSCRIPT ; italic_A start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 2 end_POSTSUBSCRIPT ; italic_A start_POSTSUBSCRIPT 3 end_POSTSUBSCRIPT , italic_B start_POSTSUBSCRIPT 3 end_POSTSUBSCRIPT) are frozen (the lock sign in (b)) and merged to the weights of the pre-trained ViT model.", "qtype": "Literature_Background" @@ -5610,9 +5610,9 @@ "Importance Score Estimation", "Needle-in-a-Haystack tests" ], - "figure_path": "./MISSQA/figures/0_2410.19258v3_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.19258v3_figure_1.png", "paperid": "2410.19258v3", - "paper_path": "./MISSQA/papers/2410.19258v3.json", + "paper_path": "./MISS-QA/papers/2410.19258v3.json", "figure_id": "2410.19258v3_figure_1.png", "caption": "Figure 1: Our proposed head-level KV cache compression method consists of two steps: (1) Head-Level Importance Score Estimation (upper part): important heads that contribute to the\ncontextual reasoning ability are identified using Needle-in-a-Haystack tests. (2) Head-Level KV Cache Allocation (lower part): KV cache budgets for each head during the prefilling phase are allocated based on the importance score distribution identified in the first step.", "qtype": "Literature_Background" @@ -5637,9 +5637,9 @@ "masked_elements": [ "Basic Budget" ], - "figure_path": "./MISSQA/figures/1_2410.19258v3_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.19258v3_figure_1.png", "paperid": "2410.19258v3", - "paper_path": "./MISSQA/papers/2410.19258v3.json", + "paper_path": "./MISS-QA/papers/2410.19258v3.json", "figure_id": "2410.19258v3_figure_1.png", "caption": "Figure 1: Our proposed head-level KV cache compression method consists of two steps: (1) Head-Level Importance Score Estimation (upper part): important heads that contribute to the\ncontextual reasoning ability are identified using Needle-in-a-Haystack tests. (2) Head-Level KV Cache Allocation (lower part): KV cache budgets for each head during the prefilling phase are allocated based on the importance score distribution identified in the first step.", "qtype": "Literature_Background" @@ -5666,9 +5666,9 @@ "Importance Score Estimation", "Needle-in-a-Haystack tests" ], - "figure_path": "./MISSQA/figures/2_2410.19258v3_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.19258v3_figure_1.png", "paperid": "2410.19258v3", - "paper_path": "./MISSQA/papers/2410.19258v3.json", + "paper_path": "./MISS-QA/papers/2410.19258v3.json", "figure_id": "2410.19258v3_figure_1.png", "caption": "Figure 1: Our proposed head-level KV cache compression method consists of two steps: (1) Head-Level Importance Score Estimation (upper part): important heads that contribute to the\ncontextual reasoning ability are identified using Needle-in-a-Haystack tests. (2) Head-Level KV Cache Allocation (lower part): KV cache budgets for each head during the prefilling phase are allocated based on the importance score distribution identified in the first step.", "qtype": "Literature_Background" @@ -5695,9 +5695,9 @@ "masked_elements": [ "Head-Level Allocation" ], - "figure_path": "./MISSQA/figures/3_2410.19258v3_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2410.19258v3_figure_1.png", "paperid": "2410.19258v3", - "paper_path": "./MISSQA/papers/2410.19258v3.json", + "paper_path": "./MISS-QA/papers/2410.19258v3.json", "figure_id": "2410.19258v3_figure_1.png", "caption": "Figure 1: Our proposed head-level KV cache compression method consists of two steps: (1) Head-Level Importance Score Estimation (upper part): important heads that contribute to the\ncontextual reasoning ability are identified using Needle-in-a-Haystack tests. (2) Head-Level KV Cache Allocation (lower part): KV cache budgets for each head during the prefilling phase are allocated based on the importance score distribution identified in the first step.", "qtype": "Experimental_Results" @@ -5717,9 +5717,9 @@ "masked_elements": [ "Monitoring System" ], - "figure_path": "./MISSQA/figures/0_2407.14377v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.14377v1_figure_1.png", "paperid": "2407.14377v1", - "paper_path": "./MISSQA/papers/2407.14377v1.json", + "paper_path": "./MISS-QA/papers/2407.14377v1.json", "figure_id": "2407.14377v1_figure_1.png", "caption": "Figure 1: O-RAN architecture with probabilistic forecast as rApp.", "qtype": "Experimental_Results" @@ -5739,9 +5739,9 @@ "masked_elements": [ "Decision Engine" ], - "figure_path": "./MISSQA/figures/1_2407.14377v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.14377v1_figure_1.png", "paperid": "2407.14377v1", - "paper_path": "./MISSQA/papers/2407.14377v1.json", + "paper_path": "./MISS-QA/papers/2407.14377v1.json", "figure_id": "2407.14377v1_figure_1.png", "caption": "Figure 1: O-RAN architecture with probabilistic forecast as rApp.", "qtype": "Experimental_Results" @@ -5762,9 +5762,9 @@ "Non-Real Time RIC", "Decision Engine" ], - "figure_path": "./MISSQA/figures/2_2407.14377v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.14377v1_figure_1.png", "paperid": "2407.14377v1", - "paper_path": "./MISSQA/papers/2407.14377v1.json", + "paper_path": "./MISS-QA/papers/2407.14377v1.json", "figure_id": "2407.14377v1_figure_1.png", "caption": "Figure 1: O-RAN architecture with probabilistic forecast as rApp.", "qtype": "Others" @@ -5785,9 +5785,9 @@ "Analytic Engine", "Monitoring System" ], - "figure_path": "./MISSQA/figures/3_2407.14377v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.14377v1_figure_1.png", "paperid": "2407.14377v1", - "paper_path": "./MISSQA/papers/2407.14377v1.json", + "paper_path": "./MISS-QA/papers/2407.14377v1.json", "figure_id": "2407.14377v1_figure_1.png", "caption": "Figure 1: O-RAN architecture with probabilistic forecast as rApp.", "qtype": "Others" @@ -5814,9 +5814,9 @@ "masked_elements": [ "first-stage calibration" ], - "figure_path": "./MISSQA/figures/0_2411.10513v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.10513v2_figure_1.png", "paperid": "2411.10513v2", - "paper_path": "./MISSQA/papers/2411.10513v2.json", + "paper_path": "./MISS-QA/papers/2411.10513v2.json", "figure_id": "2411.10513v2_figure_1.png", "caption": "Figure 1: The Any2Any retrieval framework retrieves multimodal data with varying incomplete modalities across instances.\nWe employ a two-stage calibration process using conformal prediction to facilitate comparisons between query and reference instances, each has different incomplete modalities.\nAny2Any supports any number and combination of modalities, enabling it to adapt to any multimodal retrieval dataset.\nThis illustrative figure uses data from the KITTI dataset [9] and captions generated by LLaVA [24].", "qtype": "Others" @@ -5844,9 +5844,9 @@ "masked_elements": [ "first stage calibration" ], - "figure_path": "./MISSQA/figures/1_2411.10513v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.10513v2_figure_1.png", "paperid": "2411.10513v2", - "paper_path": "./MISSQA/papers/2411.10513v2.json", + "paper_path": "./MISS-QA/papers/2411.10513v2.json", "figure_id": "2411.10513v2_figure_1.png", "caption": "Figure 1: The Any2Any retrieval framework retrieves multimodal data with varying incomplete modalities across instances.\nWe employ a two-stage calibration process using conformal prediction to facilitate comparisons between query and reference instances, each has different incomplete modalities.\nAny2Any supports any number and combination of modalities, enabling it to adapt to any multimodal retrieval dataset.\nThis illustrative figure uses data from the KITTI dataset [9] and captions generated by LLaVA [24].", "qtype": "Design_Rationale" @@ -5873,9 +5873,9 @@ "cross-modal similarity", "conformal probability" ], - "figure_path": "./MISSQA/figures/2_2411.10513v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.10513v2_figure_1.png", "paperid": "2411.10513v2", - "paper_path": "./MISSQA/papers/2411.10513v2.json", + "paper_path": "./MISS-QA/papers/2411.10513v2.json", "figure_id": "2411.10513v2_figure_1.png", "caption": "Figure 1: The Any2Any retrieval framework retrieves multimodal data with varying incomplete modalities across instances.\nWe employ a two-stage calibration process using conformal prediction to facilitate comparisons between query and reference instances, each has different incomplete modalities.\nAny2Any supports any number and combination of modalities, enabling it to adapt to any multimodal retrieval dataset.\nThis illustrative figure uses data from the KITTI dataset [9] and captions generated by LLaVA [24].", "qtype": "Design_Rationale" @@ -5899,9 +5899,9 @@ "masked_elements": [ "Dual-length Hybrid Patch (DHP) module" ], - "figure_path": "./MISSQA/figures/0_2411.00330v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.00330v1_figure_2.png", "paperid": "2411.00330v1", - "paper_path": "./MISSQA/papers/2411.00330v1.json", + "paper_path": "./MISS-QA/papers/2411.00330v1.json", "figure_id": "2411.00330v1_figure_2.png", "caption": "Figure 2: Pipeline of the proposed MIPL approach. It consists of the backbone, the CIS module, the BGA module, and the DHP module. ’SCHP’ is a semantic analysis module to obtain the human semantic information. ’Norm.’ and ’Trans.’ denote the normalization and transpose operations, respectively.", "qtype": "Design_Rationale" @@ -5928,9 +5928,9 @@ "Clothing Information Stripping (CIS) module", "Image Encoder" ], - "figure_path": "./MISSQA/figures/1_2411.00330v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.00330v1_figure_2.png", "paperid": "2411.00330v1", - "paper_path": "./MISSQA/papers/2411.00330v1.json", + "paper_path": "./MISS-QA/papers/2411.00330v1.json", "figure_id": "2411.00330v1_figure_2.png", "caption": "Figure 2: Pipeline of the proposed MIPL approach. It consists of the backbone, the CIS module, the BGA module, and the DHP module. ’SCHP’ is a semantic analysis module to obtain the human semantic information. ’Norm.’ and ’Trans.’ denote the normalization and transpose operations, respectively.", "qtype": "Design_Rationale" @@ -5957,9 +5957,9 @@ "masked_elements": [ "CIS module" ], - "figure_path": "./MISSQA/figures/2_2411.00330v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.00330v1_figure_2.png", "paperid": "2411.00330v1", - "paper_path": "./MISSQA/papers/2411.00330v1.json", + "paper_path": "./MISS-QA/papers/2411.00330v1.json", "figure_id": "2411.00330v1_figure_2.png", "caption": "Figure 2: Pipeline of the proposed MIPL approach. It consists of the backbone, the CIS module, the BGA module, and the DHP module. ’SCHP’ is a semantic analysis module to obtain the human semantic information. ’Norm.’ and ’Trans.’ denote the normalization and transpose operations, respectively.", "qtype": "Implementation_Details" @@ -5985,9 +5985,9 @@ "masked_elements": [ "BGA module" ], - "figure_path": "./MISSQA/figures/3_2411.00330v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.00330v1_figure_2.png", "paperid": "2411.00330v1", - "paper_path": "./MISSQA/papers/2411.00330v1.json", + "paper_path": "./MISS-QA/papers/2411.00330v1.json", "figure_id": "2411.00330v1_figure_2.png", "caption": "Figure 2: Pipeline of the proposed MIPL approach. It consists of the backbone, the CIS module, the BGA module, and the DHP module. ’SCHP’ is a semantic analysis module to obtain the human semantic information. ’Norm.’ and ’Trans.’ denote the normalization and transpose operations, respectively.", "qtype": "Implementation_Details" @@ -6006,9 +6006,9 @@ "masked_elements": [ "local control of complex amplitude" ], - "figure_path": "./MISSQA/figures/0_2411.09133v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.09133v1_figure_1.png", "paperid": "2411.09133v1", - "paper_path": "./MISSQA/papers/2411.09133v1.json", + "paper_path": "./MISS-QA/papers/2411.09133v1.json", "figure_id": "2411.09133v1_figure_1.png", "caption": "Figure 1: Computational imaging with metaoptics: degrees of freedom, physics, and algorithms. a. The general goal of a computational imaging device is to reconstruct various degrees of freedom of an incident light field, for instance its polarization, frequency, momentum, and complex amplitude distribution. Advanced degrees of freedom (e.g., density matrix of the quantum state of light) may also be of interest. b. Light manipulation is realized by leveraging physical properties of metaoptical devices, such as their ability to locally control the complex amplitude of an incoming wavefront, engineered spectral dispersion, non-locality (spatial dispersion), active control of physical properties (e.g., complex transmission), and nonlinear optical properties. c. Once imaged by a detector, the signal may be reconstructed using various reconstruction and estimation methods, such as least-square error minimization (which may include priors on the reconstructed degrees of freedom, such as high sparsity or low complexity). Black-box methods, such as fully connected neural networks, may also be utilized to classify detected signals and images. Other parameter estimation methods, such as maximum likelihood estimation (MLE) may also be used to estimate the degrees of freedom of the incident light field.", "qtype": "Implementation_Details" @@ -6028,9 +6028,9 @@ "masked_elements": [ "priors" ], - "figure_path": "./MISSQA/figures/1_2411.09133v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.09133v1_figure_1.png", "paperid": "2411.09133v1", - "paper_path": "./MISSQA/papers/2411.09133v1.json", + "paper_path": "./MISS-QA/papers/2411.09133v1.json", "figure_id": "2411.09133v1_figure_1.png", "caption": "Figure 1: Computational imaging with metaoptics: degrees of freedom, physics, and algorithms. a. The general goal of a computational imaging device is to reconstruct various degrees of freedom of an incident light field, for instance its polarization, frequency, momentum, and complex amplitude distribution. Advanced degrees of freedom (e.g., density matrix of the quantum state of light) may also be of interest. b. Light manipulation is realized by leveraging physical properties of metaoptical devices, such as their ability to locally control the complex amplitude of an incoming wavefront, engineered spectral dispersion, non-locality (spatial dispersion), active control of physical properties (e.g., complex transmission), and nonlinear optical properties. c. Once imaged by a detector, the signal may be reconstructed using various reconstruction and estimation methods, such as least-square error minimization (which may include priors on the reconstructed degrees of freedom, such as high sparsity or low complexity). Black-box methods, such as fully connected neural networks, may also be utilized to classify detected signals and images. Other parameter estimation methods, such as maximum likelihood estimation (MLE) may also be used to estimate the degrees of freedom of the incident light field.", "qtype": "Implementation_Details" @@ -6052,9 +6052,9 @@ "spectral dispersion", "MLE" ], - "figure_path": "./MISSQA/figures/2_2411.09133v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.09133v1_figure_1.png", "paperid": "2411.09133v1", - "paper_path": "./MISSQA/papers/2411.09133v1.json", + "paper_path": "./MISS-QA/papers/2411.09133v1.json", "figure_id": "2411.09133v1_figure_1.png", "caption": "Figure 1: Computational imaging with metaoptics: degrees of freedom, physics, and algorithms. a. The general goal of a computational imaging device is to reconstruct various degrees of freedom of an incident light field, for instance its polarization, frequency, momentum, and complex amplitude distribution. Advanced degrees of freedom (e.g., density matrix of the quantum state of light) may also be of interest. b. Light manipulation is realized by leveraging physical properties of metaoptical devices, such as their ability to locally control the complex amplitude of an incoming wavefront, engineered spectral dispersion, non-locality (spatial dispersion), active control of physical properties (e.g., complex transmission), and nonlinear optical properties. c. Once imaged by a detector, the signal may be reconstructed using various reconstruction and estimation methods, such as least-square error minimization (which may include priors on the reconstructed degrees of freedom, such as high sparsity or low complexity). Black-box methods, such as fully connected neural networks, may also be utilized to classify detected signals and images. Other parameter estimation methods, such as maximum likelihood estimation (MLE) may also be used to estimate the degrees of freedom of the incident light field.", "qtype": "Literature_Background" @@ -6076,9 +6076,9 @@ "non-locality", "neural networks" ], - "figure_path": "./MISSQA/figures/3_2411.09133v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.09133v1_figure_1.png", "paperid": "2411.09133v1", - "paper_path": "./MISSQA/papers/2411.09133v1.json", + "paper_path": "./MISS-QA/papers/2411.09133v1.json", "figure_id": "2411.09133v1_figure_1.png", "caption": "Figure 1: Computational imaging with metaoptics: degrees of freedom, physics, and algorithms. a. The general goal of a computational imaging device is to reconstruct various degrees of freedom of an incident light field, for instance its polarization, frequency, momentum, and complex amplitude distribution. Advanced degrees of freedom (e.g., density matrix of the quantum state of light) may also be of interest. b. Light manipulation is realized by leveraging physical properties of metaoptical devices, such as their ability to locally control the complex amplitude of an incoming wavefront, engineered spectral dispersion, non-locality (spatial dispersion), active control of physical properties (e.g., complex transmission), and nonlinear optical properties. c. Once imaged by a detector, the signal may be reconstructed using various reconstruction and estimation methods, such as least-square error minimization (which may include priors on the reconstructed degrees of freedom, such as high sparsity or low complexity). Black-box methods, such as fully connected neural networks, may also be utilized to classify detected signals and images. Other parameter estimation methods, such as maximum likelihood estimation (MLE) may also be used to estimate the degrees of freedom of the incident light field.", "qtype": "Literature_Background" @@ -6103,9 +6103,9 @@ "masked_elements": [ "Prompt Template Design" ], - "figure_path": "./MISSQA/figures/0_2409.15154v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.15154v1_figure_2.png", "paperid": "2409.15154v1", - "paper_path": "./MISSQA/papers/2409.15154v1.json", + "paper_path": "./MISS-QA/papers/2409.15154v1.json", "figure_id": "2409.15154v1_figure_2.png", "caption": "Figure 2. Overview of RMCBench Construction Process. T2C-L represents text-to-code level, CC represents code completion, CT represents code translation.", "qtype": "Literature_Background" @@ -6132,9 +6132,9 @@ "masked_elements": [ "Data Collection" ], - "figure_path": "./MISSQA/figures/1_2409.15154v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.15154v1_figure_2.png", "paperid": "2409.15154v1", - "paper_path": "./MISSQA/papers/2409.15154v1.json", + "paper_path": "./MISS-QA/papers/2409.15154v1.json", "figure_id": "2409.15154v1_figure_2.png", "caption": "Figure 2. Overview of RMCBench Construction Process. T2C-L represents text-to-code level, CC represents code completion, CT represents code translation.", "qtype": "Literature_Background" @@ -6163,9 +6163,9 @@ "Prompt Template Design", "Level 3 Prompts" ], - "figure_path": "./MISSQA/figures/2_2409.15154v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.15154v1_figure_2.png", "paperid": "2409.15154v1", - "paper_path": "./MISSQA/papers/2409.15154v1.json", + "paper_path": "./MISS-QA/papers/2409.15154v1.json", "figure_id": "2409.15154v1_figure_2.png", "caption": "Figure 2. Overview of RMCBench Construction Process. T2C-L represents text-to-code level, CC represents code completion, CT represents code translation.", "qtype": "Experimental_Results" @@ -6193,9 +6193,9 @@ "masked_elements": [ "Data Collection" ], - "figure_path": "./MISSQA/figures/3_2409.15154v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.15154v1_figure_2.png", "paperid": "2409.15154v1", - "paper_path": "./MISSQA/papers/2409.15154v1.json", + "paper_path": "./MISS-QA/papers/2409.15154v1.json", "figure_id": "2409.15154v1_figure_2.png", "caption": "Figure 2. Overview of RMCBench Construction Process. T2C-L represents text-to-code level, CC represents code completion, CT represents code translation.", "qtype": "Experimental_Results" @@ -6224,9 +6224,9 @@ "Generator", "Sequence Outputs" ], - "figure_path": "./MISSQA/figures/0_2407.08189v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.08189v1_figure_2.png", "paperid": "2407.08189v1", - "paper_path": "./MISSQA/papers/2407.08189v1.json", + "paper_path": "./MISS-QA/papers/2407.08189v1.json", "figure_id": "2407.08189v1_figure_2.png", "caption": "Figure 2. The framework of fairBERTs.", "qtype": "Experimental_Results" @@ -6253,9 +6253,9 @@ "Discriminator", "Generator" ], - "figure_path": "./MISSQA/figures/1_2407.08189v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2407.08189v1_figure_2.png", "paperid": "2407.08189v1", - "paper_path": "./MISSQA/papers/2407.08189v1.json", + "paper_path": "./MISS-QA/papers/2407.08189v1.json", "figure_id": "2407.08189v1_figure_2.png", "caption": "Figure 2. The framework of fairBERTs.", "qtype": "Experimental_Results" @@ -6276,9 +6276,9 @@ "Generator", "semantic & fairness-aware perturbation" ], - "figure_path": "./MISSQA/figures/2_2407.08189v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2407.08189v1_figure_2.png", "paperid": "2407.08189v1", - "paper_path": "./MISSQA/papers/2407.08189v1.json", + "paper_path": "./MISS-QA/papers/2407.08189v1.json", "figure_id": "2407.08189v1_figure_2.png", "caption": "Figure 2. The framework of fairBERTs.", "qtype": "Others" @@ -6303,9 +6303,9 @@ "Discriminator", "semantic & fairness-aware perturbation" ], - "figure_path": "./MISSQA/figures/3_2407.08189v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2407.08189v1_figure_2.png", "paperid": "2407.08189v1", - "paper_path": "./MISSQA/papers/2407.08189v1.json", + "paper_path": "./MISS-QA/papers/2407.08189v1.json", "figure_id": "2407.08189v1_figure_2.png", "caption": "Figure 2. The framework of fairBERTs.", "qtype": "Others" @@ -6326,9 +6326,9 @@ "Pre-trained Graph Layer", "User Side Metapath Expansion" ], - "figure_path": "./MISSQA/figures/0_2407.00056v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.00056v1_figure_2.png", "paperid": "2407.00056v1", - "paper_path": "./MISSQA/papers/2407.00056v1.json", + "paper_path": "./MISS-QA/papers/2407.00056v1.json", "figure_id": "2407.00056v1_figure_2.png", "caption": "Figure 2. The overall framework of MMBee, consists of two stages: (i) the offline Graph-guided Interest Expansion (GIE) stage conducts the behavior expansion based on the target user and author; (ii) the online GTR prediction stage aggregates the real-time multi-modal content and expanded behavior for end-to-end training.", "qtype": "Others" @@ -6354,9 +6354,9 @@ "masked_elements": [ "Graph-guided Interest Expansion" ], - "figure_path": "./MISSQA/figures/2_2407.00056v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2407.00056v1_figure_2.png", "paperid": "2407.00056v1", - "paper_path": "./MISSQA/papers/2407.00056v1.json", + "paper_path": "./MISS-QA/papers/2407.00056v1.json", "figure_id": "2407.00056v1_figure_2.png", "caption": "Figure 2. The overall framework of MMBee, consists of two stages: (i) the offline Graph-guided Interest Expansion (GIE) stage conducts the behavior expansion based on the target user and author; (ii) the online GTR prediction stage aggregates the real-time multi-modal content and expanded behavior for end-to-end training.", "qtype": "Design_Rationale" @@ -6381,9 +6381,9 @@ "masked_elements": [ "User Side Metapath Expansion" ], - "figure_path": "./MISSQA/figures/3_2407.00056v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2407.00056v1_figure_2.png", "paperid": "2407.00056v1", - "paper_path": "./MISSQA/papers/2407.00056v1.json", + "paper_path": "./MISS-QA/papers/2407.00056v1.json", "figure_id": "2407.00056v1_figure_2.png", "caption": "Figure 2. The overall framework of MMBee, consists of two stages: (i) the offline Graph-guided Interest Expansion (GIE) stage conducts the behavior expansion based on the target user and author; (ii) the online GTR prediction stage aggregates the real-time multi-modal content and expanded behavior for end-to-end training.", "qtype": "Design_Rationale" @@ -6408,9 +6408,9 @@ "structured", "unstructured" ], - "figure_path": "./MISSQA/figures/0_2408.04388v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.04388v1_figure_2.png", "paperid": "2408.04388v1", - "paper_path": "./MISSQA/papers/2408.04388v1.json", + "paper_path": "./MISS-QA/papers/2408.04388v1.json", "figure_id": "2408.04388v1_figure_2.png", "caption": "Figure 2. The schematic overview of MM-Forecast.\nBy consuming historical events in either format of unstructured or structured input (left), our image function identification module (middle) recognizes the image functions as verbal descriptions, which are then feed into LLM-based forecasting model (right).\nOur framework is versatile to handle both structured and unstructured events, meanwhile, it is compatible to popular LLM components for event forecasting, i.e., ICL and RAG.", "qtype": "Design_Rationale" @@ -6439,9 +6439,9 @@ "masked_elements": [ "Image Function Identification" ], - "figure_path": "./MISSQA/figures/1_2408.04388v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.04388v1_figure_2.png", "paperid": "2408.04388v1", - "paper_path": "./MISSQA/papers/2408.04388v1.json", + "paper_path": "./MISS-QA/papers/2408.04388v1.json", "figure_id": "2408.04388v1_figure_2.png", "caption": "Figure 2. The schematic overview of MM-Forecast.\nBy consuming historical events in either format of unstructured or structured input (left), our image function identification module (middle) recognizes the image functions as verbal descriptions, which are then feed into LLM-based forecasting model (right).\nOur framework is versatile to handle both structured and unstructured events, meanwhile, it is compatible to popular LLM components for event forecasting, i.e., ICL and RAG.", "qtype": "Design_Rationale" @@ -6467,9 +6467,9 @@ "masked_elements": [ "Image Function Identification" ], - "figure_path": "./MISSQA/figures/2_2408.04388v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.04388v1_figure_2.png", "paperid": "2408.04388v1", - "paper_path": "./MISSQA/papers/2408.04388v1.json", + "paper_path": "./MISS-QA/papers/2408.04388v1.json", "figure_id": "2408.04388v1_figure_2.png", "caption": "Figure 2. The schematic overview of MM-Forecast.\nBy consuming historical events in either format of unstructured or structured input (left), our image function identification module (middle) recognizes the image functions as verbal descriptions, which are then feed into LLM-based forecasting model (right).\nOur framework is versatile to handle both structured and unstructured events, meanwhile, it is compatible to popular LLM components for event forecasting, i.e., ICL and RAG.", "qtype": "Implementation_Details" @@ -6495,9 +6495,9 @@ "Mid-Level", "Oscillator" ], - "figure_path": "./MISSQA/figures/0_2408.03525v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.03525v1_figure_1.png", "paperid": "2408.03525v1", - "paper_path": "./MISSQA/papers/2408.03525v1.json", + "paper_path": "./MISS-QA/papers/2408.03525v1.json", "figure_id": "2408.03525v1_figure_1.png", "caption": "Figure 1: Overview of the central nervous system and hierarchical learning control framework.\na, Mammalian central nervous system structure, the figure contains the cerebral cortex partition, the spinal cord s internal structure, and the double-layer structure of CPGs neural circuits.\nb, Schematic diagram of the proposed hierarchical control framework.\nThe gray nodes in a and the gray box in b represent the sensing mechanism in the nervous system and control framework, respectively, and are responsible for the acquisition of sensing signals. In the nervous system, S1 and the visual cortex are mainly responsible. In the control frame, it is provided by sensor measurement. The green nodes and boxes in a and b represent the high-level institutions in the nervous system and control framework, respectively, responsible for observing the environment and making decisions. In the nervous system, most cortical regions are responsible for this function. In the control framework, this part is realized by the deep reinforcement learning neural network policy. The yellow nodes and boxes in a and b represent the mid-level institutions responsible for coordinating the limbs and generating various motion patterns. In the nervous system, the cerebellum and primary motor cortex are responsible. In the control framework, this part adopts an unsupervised reinforcement learning algorithm and skill-driven neural network. The purple nodes and boxes in a and b represent the low-level institutions that are responsible for the generation and execution of motion signals. In the nervous system, the brain stem and spinal cord are responsible. In the control framework, it is realized by the CPG module, which contains an oscillator and a desired pose solver to provide the desired joint position and uses the built-in PID feedback loop of the robot to control 18 motors. The solid line in a connects different nerve regions, representing the information flow relationship, and the thin purple solid line on the right represents the ascending and descending spinal nerves. Dotted lines indicate descending pathway feedback of the CPGs. The solid line in b represents the action relationship between the sensor and the control signal, and the black dotted line connects the specific analysis of each module.\nc, Four different indoor obstacle terrain crossing tasks.\nd, Various new obstacle terrain crossing tasks have never been learned.", "qtype": "Implementation_Details" @@ -6523,9 +6523,9 @@ "Desired poses solver", "Oscillator" ], - "figure_path": "./MISSQA/figures/1_2408.03525v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.03525v1_figure_1.png", "paperid": "2408.03525v1", - "paper_path": "./MISSQA/papers/2408.03525v1.json", + "paper_path": "./MISS-QA/papers/2408.03525v1.json", "figure_id": "2408.03525v1_figure_1.png", "caption": "Figure 1: Overview of the central nervous system and hierarchical learning control framework.\na, Mammalian central nervous system structure, the figure contains the cerebral cortex partition, the spinal cord s internal structure, and the double-layer structure of CPGs neural circuits.\nb, Schematic diagram of the proposed hierarchical control framework.\nThe gray nodes in a and the gray box in b represent the sensing mechanism in the nervous system and control framework, respectively, and are responsible for the acquisition of sensing signals. In the nervous system, S1 and the visual cortex are mainly responsible. In the control frame, it is provided by sensor measurement. The green nodes and boxes in a and b represent the high-level institutions in the nervous system and control framework, respectively, responsible for observing the environment and making decisions. In the nervous system, most cortical regions are responsible for this function. In the control framework, this part is realized by the deep reinforcement learning neural network policy. The yellow nodes and boxes in a and b represent the mid-level institutions responsible for coordinating the limbs and generating various motion patterns. In the nervous system, the cerebellum and primary motor cortex are responsible. In the control framework, this part adopts an unsupervised reinforcement learning algorithm and skill-driven neural network. The purple nodes and boxes in a and b represent the low-level institutions that are responsible for the generation and execution of motion signals. In the nervous system, the brain stem and spinal cord are responsible. In the control framework, it is realized by the CPG module, which contains an oscillator and a desired pose solver to provide the desired joint position and uses the built-in PID feedback loop of the robot to control 18 motors. The solid line in a connects different nerve regions, representing the information flow relationship, and the thin purple solid line on the right represents the ascending and descending spinal nerves. Dotted lines indicate descending pathway feedback of the CPGs. The solid line in b represents the action relationship between the sensor and the control signal, and the black dotted line connects the specific analysis of each module.\nc, Four different indoor obstacle terrain crossing tasks.\nd, Various new obstacle terrain crossing tasks have never been learned.", "qtype": "Implementation_Details" @@ -6552,9 +6552,9 @@ "High-Level", "Mid-Level" ], - "figure_path": "./MISSQA/figures/2_2408.03525v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.03525v1_figure_1.png", "paperid": "2408.03525v1", - "paper_path": "./MISSQA/papers/2408.03525v1.json", + "paper_path": "./MISS-QA/papers/2408.03525v1.json", "figure_id": "2408.03525v1_figure_1.png", "caption": "Figure 1: Overview of the central nervous system and hierarchical learning control framework.\na, Mammalian central nervous system structure, the figure contains the cerebral cortex partition, the spinal cord s internal structure, and the double-layer structure of CPGs neural circuits.\nb, Schematic diagram of the proposed hierarchical control framework.\nThe gray nodes in a and the gray box in b represent the sensing mechanism in the nervous system and control framework, respectively, and are responsible for the acquisition of sensing signals. In the nervous system, S1 and the visual cortex are mainly responsible. In the control frame, it is provided by sensor measurement. The green nodes and boxes in a and b represent the high-level institutions in the nervous system and control framework, respectively, responsible for observing the environment and making decisions. In the nervous system, most cortical regions are responsible for this function. In the control framework, this part is realized by the deep reinforcement learning neural network policy. The yellow nodes and boxes in a and b represent the mid-level institutions responsible for coordinating the limbs and generating various motion patterns. In the nervous system, the cerebellum and primary motor cortex are responsible. In the control framework, this part adopts an unsupervised reinforcement learning algorithm and skill-driven neural network. The purple nodes and boxes in a and b represent the low-level institutions that are responsible for the generation and execution of motion signals. In the nervous system, the brain stem and spinal cord are responsible. In the control framework, it is realized by the CPG module, which contains an oscillator and a desired pose solver to provide the desired joint position and uses the built-in PID feedback loop of the robot to control 18 motors. The solid line in a connects different nerve regions, representing the information flow relationship, and the thin purple solid line on the right represents the ascending and descending spinal nerves. Dotted lines indicate descending pathway feedback of the CPGs. The solid line in b represents the action relationship between the sensor and the control signal, and the black dotted line connects the specific analysis of each module.\nc, Four different indoor obstacle terrain crossing tasks.\nd, Various new obstacle terrain crossing tasks have never been learned.", "qtype": "Literature_Background" @@ -6583,9 +6583,9 @@ "Low-Level", "CPG module" ], - "figure_path": "./MISSQA/figures/3_2408.03525v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2408.03525v1_figure_1.png", "paperid": "2408.03525v1", - "paper_path": "./MISSQA/papers/2408.03525v1.json", + "paper_path": "./MISS-QA/papers/2408.03525v1.json", "figure_id": "2408.03525v1_figure_1.png", "caption": "Figure 1: Overview of the central nervous system and hierarchical learning control framework.\na, Mammalian central nervous system structure, the figure contains the cerebral cortex partition, the spinal cord s internal structure, and the double-layer structure of CPGs neural circuits.\nb, Schematic diagram of the proposed hierarchical control framework.\nThe gray nodes in a and the gray box in b represent the sensing mechanism in the nervous system and control framework, respectively, and are responsible for the acquisition of sensing signals. In the nervous system, S1 and the visual cortex are mainly responsible. In the control frame, it is provided by sensor measurement. The green nodes and boxes in a and b represent the high-level institutions in the nervous system and control framework, respectively, responsible for observing the environment and making decisions. In the nervous system, most cortical regions are responsible for this function. In the control framework, this part is realized by the deep reinforcement learning neural network policy. The yellow nodes and boxes in a and b represent the mid-level institutions responsible for coordinating the limbs and generating various motion patterns. In the nervous system, the cerebellum and primary motor cortex are responsible. In the control framework, this part adopts an unsupervised reinforcement learning algorithm and skill-driven neural network. The purple nodes and boxes in a and b represent the low-level institutions that are responsible for the generation and execution of motion signals. In the nervous system, the brain stem and spinal cord are responsible. In the control framework, it is realized by the CPG module, which contains an oscillator and a desired pose solver to provide the desired joint position and uses the built-in PID feedback loop of the robot to control 18 motors. The solid line in a connects different nerve regions, representing the information flow relationship, and the thin purple solid line on the right represents the ascending and descending spinal nerves. Dotted lines indicate descending pathway feedback of the CPGs. The solid line in b represents the action relationship between the sensor and the control signal, and the black dotted line connects the specific analysis of each module.\nc, Four different indoor obstacle terrain crossing tasks.\nd, Various new obstacle terrain crossing tasks have never been learned.", "qtype": "Literature_Background" @@ -6612,9 +6612,9 @@ "Levenshtein distance calculation", "BERT-based semantic similarity calculation" ], - "figure_path": "./MISSQA/figures/0_2411.07518v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.07518v1_figure_2.png", "paperid": "2411.07518v1", - "paper_path": "./MISSQA/papers/2411.07518v1.json", + "paper_path": "./MISS-QA/papers/2411.07518v1.json", "figure_id": "2411.07518v1_figure_2.png", "caption": "Figure 2: Our approach to identifying squatting and cloning LLM apps.", "qtype": "Experimental_Results" @@ -6641,9 +6641,9 @@ "masked_elements": [ "Conversion to SNN" ], - "figure_path": "./MISSQA/figures/0_2409.02146v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.02146v1_figure_1.png", "paperid": "2409.02146v1", - "paper_path": "./MISSQA/papers/2409.02146v1.json", + "paper_path": "./MISS-QA/papers/2409.02146v1.json", "figure_id": "2409.02146v1_figure_1.png", "caption": "Figure 1: The framework of proposed SNN adaptation pipeline.\nA pre-trained source model may degrade significantly on test data in wild-world scenarios, e.g., diverse weather conditions.\nThe proposed method adapts a source SNN model on the test data in an online manner based on unsupervised loss, which can significantly improve the SNN model s performance in such scenarios.", "qtype": "Experimental_Results" @@ -6664,9 +6664,9 @@ "Gradient enabled layer", "Spiking layer" ], - "figure_path": "./MISSQA/figures/1_2409.02146v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.02146v1_figure_1.png", "paperid": "2409.02146v1", - "paper_path": "./MISSQA/papers/2409.02146v1.json", + "paper_path": "./MISS-QA/papers/2409.02146v1.json", "figure_id": "2409.02146v1_figure_1.png", "caption": "Figure 1: The framework of proposed SNN adaptation pipeline.\nA pre-trained source model may degrade significantly on test data in wild-world scenarios, e.g., diverse weather conditions.\nThe proposed method adapts a source SNN model on the test data in an online manner based on unsupervised loss, which can significantly improve the SNN model s performance in such scenarios.", "qtype": "Experimental_Results" @@ -6687,9 +6687,9 @@ "masked_elements": [ "partial parameters fine-tuning" ], - "figure_path": "./MISSQA/figures/2_2409.02146v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2409.02146v1_figure_1.png", "paperid": "2409.02146v1", - "paper_path": "./MISSQA/papers/2409.02146v1.json", + "paper_path": "./MISS-QA/papers/2409.02146v1.json", "figure_id": "2409.02146v1_figure_1.png", "caption": "Figure 1: The framework of proposed SNN adaptation pipeline.\nA pre-trained source model may degrade significantly on test data in wild-world scenarios, e.g., diverse weather conditions.\nThe proposed method adapts a source SNN model on the test data in an online manner based on unsupervised loss, which can significantly improve the SNN model s performance in such scenarios.", "qtype": "Others" @@ -6709,9 +6709,9 @@ "masked_elements": [ "frozen layer" ], - "figure_path": "./MISSQA/figures/3_2409.02146v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2409.02146v1_figure_1.png", "paperid": "2409.02146v1", - "paper_path": "./MISSQA/papers/2409.02146v1.json", + "paper_path": "./MISS-QA/papers/2409.02146v1.json", "figure_id": "2409.02146v1_figure_1.png", "caption": "Figure 1: The framework of proposed SNN adaptation pipeline.\nA pre-trained source model may degrade significantly on test data in wild-world scenarios, e.g., diverse weather conditions.\nThe proposed method adapts a source SNN model on the test data in an online manner based on unsupervised loss, which can significantly improve the SNN model s performance in such scenarios.", "qtype": "Others" @@ -6736,9 +6736,9 @@ "masked_elements": [ "DaYu stage1" ], - "figure_path": "./MISSQA/figures/0_2411.10144v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.10144v1_figure_1.png", "paperid": "2411.10144v1", - "paper_path": "./MISSQA/papers/2411.10144v1.json", + "paper_path": "./MISS-QA/papers/2411.10144v1.json", "figure_id": "2411.10144v1_figure_1.png", "caption": "Figure 1: Overview of the DaYu Architecture. DaYu cascades the parameters of Stage 1 and Stage 2 models. Taking continuous cloud images from two moments x−1superscript𝑥1x^{-1}italic_x start_POSTSUPERSCRIPT - 1 end_POSTSUPERSCRIPT, x0superscript𝑥0x^{0}italic_x start_POSTSUPERSCRIPT 0 end_POSTSUPERSCRIPT as inputs, DaYu Stage 1 autoregressively forecasts 12 cloud images for the 0-6 hour period, and then uses x11superscript𝑥11x^{11}italic_x start_POSTSUPERSCRIPT 11 end_POSTSUPERSCRIPT, x12superscript𝑥12x^{12}italic_x start_POSTSUPERSCRIPT 12 end_POSTSUPERSCRIPT as inputs. Stage 2 forecasts 12 cloud images for the 6-12 hour period as the same pattern. Red-bordered boxes indicate the initial inputs for the stage models. Spatio-Temporal Encoder extracts feature embeddings from inputs. Transformer layers are then used to learn the global relationships of high-level abstract features. Finally, Spatio-Temporal Decoder generates the predicted cloud image for the next moment. In this figure, i𝑖iitalic_i ranges from 0 to 23.", "qtype": "Others" @@ -6757,9 +6757,9 @@ "masked_elements": [ "Patch Embedding & Merge" ], - "figure_path": "./MISSQA/figures/1_2411.10144v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.10144v1_figure_1.png", "paperid": "2411.10144v1", - "paper_path": "./MISSQA/papers/2411.10144v1.json", + "paper_path": "./MISS-QA/papers/2411.10144v1.json", "figure_id": "2411.10144v1_figure_1.png", "caption": "Figure 1: Overview of the DaYu Architecture. DaYu cascades the parameters of Stage 1 and Stage 2 models. Taking continuous cloud images from two moments x−1superscript𝑥1x^{-1}italic_x start_POSTSUPERSCRIPT - 1 end_POSTSUPERSCRIPT, x0superscript𝑥0x^{0}italic_x start_POSTSUPERSCRIPT 0 end_POSTSUPERSCRIPT as inputs, DaYu Stage 1 autoregressively forecasts 12 cloud images for the 0-6 hour period, and then uses x11superscript𝑥11x^{11}italic_x start_POSTSUPERSCRIPT 11 end_POSTSUPERSCRIPT, x12superscript𝑥12x^{12}italic_x start_POSTSUPERSCRIPT 12 end_POSTSUPERSCRIPT as inputs. Stage 2 forecasts 12 cloud images for the 6-12 hour period as the same pattern. Red-bordered boxes indicate the initial inputs for the stage models. Spatio-Temporal Encoder extracts feature embeddings from inputs. Transformer layers are then used to learn the global relationships of high-level abstract features. Finally, Spatio-Temporal Decoder generates the predicted cloud image for the next moment. In this figure, i𝑖iitalic_i ranges from 0 to 23.", "qtype": "Others" @@ -6786,9 +6786,9 @@ "masked_elements": [ "DaYu stage1" ], - "figure_path": "./MISSQA/figures/2_2411.10144v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.10144v1_figure_1.png", "paperid": "2411.10144v1", - "paper_path": "./MISSQA/papers/2411.10144v1.json", + "paper_path": "./MISS-QA/papers/2411.10144v1.json", "figure_id": "2411.10144v1_figure_1.png", "caption": "Figure 1: Overview of the DaYu Architecture. DaYu cascades the parameters of Stage 1 and Stage 2 models. Taking continuous cloud images from two moments x−1superscript𝑥1x^{-1}italic_x start_POSTSUPERSCRIPT - 1 end_POSTSUPERSCRIPT, x0superscript𝑥0x^{0}italic_x start_POSTSUPERSCRIPT 0 end_POSTSUPERSCRIPT as inputs, DaYu Stage 1 autoregressively forecasts 12 cloud images for the 0-6 hour period, and then uses x11superscript𝑥11x^{11}italic_x start_POSTSUPERSCRIPT 11 end_POSTSUPERSCRIPT, x12superscript𝑥12x^{12}italic_x start_POSTSUPERSCRIPT 12 end_POSTSUPERSCRIPT as inputs. Stage 2 forecasts 12 cloud images for the 6-12 hour period as the same pattern. Red-bordered boxes indicate the initial inputs for the stage models. Spatio-Temporal Encoder extracts feature embeddings from inputs. Transformer layers are then used to learn the global relationships of high-level abstract features. Finally, Spatio-Temporal Decoder generates the predicted cloud image for the next moment. In this figure, i𝑖iitalic_i ranges from 0 to 23.", "qtype": "Design_Rationale" @@ -6809,9 +6809,9 @@ "masked_elements": [ "Skip Connection" ], - "figure_path": "./MISSQA/figures/3_2411.10144v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.10144v1_figure_1.png", "paperid": "2411.10144v1", - "paper_path": "./MISSQA/papers/2411.10144v1.json", + "paper_path": "./MISS-QA/papers/2411.10144v1.json", "figure_id": "2411.10144v1_figure_1.png", "caption": "Figure 1: Overview of the DaYu Architecture. DaYu cascades the parameters of Stage 1 and Stage 2 models. Taking continuous cloud images from two moments x−1superscript𝑥1x^{-1}italic_x start_POSTSUPERSCRIPT - 1 end_POSTSUPERSCRIPT, x0superscript𝑥0x^{0}italic_x start_POSTSUPERSCRIPT 0 end_POSTSUPERSCRIPT as inputs, DaYu Stage 1 autoregressively forecasts 12 cloud images for the 0-6 hour period, and then uses x11superscript𝑥11x^{11}italic_x start_POSTSUPERSCRIPT 11 end_POSTSUPERSCRIPT, x12superscript𝑥12x^{12}italic_x start_POSTSUPERSCRIPT 12 end_POSTSUPERSCRIPT as inputs. Stage 2 forecasts 12 cloud images for the 6-12 hour period as the same pattern. Red-bordered boxes indicate the initial inputs for the stage models. Spatio-Temporal Encoder extracts feature embeddings from inputs. Transformer layers are then used to learn the global relationships of high-level abstract features. Finally, Spatio-Temporal Decoder generates the predicted cloud image for the next moment. In this figure, i𝑖iitalic_i ranges from 0 to 23.", "qtype": "Design_Rationale" @@ -6837,9 +6837,9 @@ "inter-modality contrastive learning", "inter-cancer contrastive learning" ], - "figure_path": "./MISSQA/figures/0_2407.15362v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.15362v2_figure_2.png", "paperid": "2407.15362v2", - "paper_path": "./MISSQA/papers/2407.15362v2.json", + "paper_path": "./MISS-QA/papers/2407.15362v2.json", "figure_id": "2407.15362v2_figure_2.png", "caption": "Figure 2: The Overview of mSTAR Pipeline. mSTAR is a whole-slide pretraining paradigm comprising two-stage pretraining. a, Stage 1 aims to inject multimodal knowledge into a slide aggregator by slide-level contrastive learning among WSIs, pathology reports and gene expression data. b, Stage 2 aims to seamlessly propagate multimodal knowledge learned at the slide level into the patch extractor by Self-Taught training, which leverages the slide aggregator pretrained in Stage 1 as “Teacher” and enforces patch extractor to be “Student”.", "qtype": "Design_Rationale" @@ -6865,9 +6865,9 @@ "EMA", "patch extractor" ], - "figure_path": "./MISSQA/figures/1_2407.15362v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2407.15362v2_figure_2.png", "paperid": "2407.15362v2", - "paper_path": "./MISSQA/papers/2407.15362v2.json", + "paper_path": "./MISS-QA/papers/2407.15362v2.json", "figure_id": "2407.15362v2_figure_2.png", "caption": "Figure 2: The Overview of mSTAR Pipeline. mSTAR is a whole-slide pretraining paradigm comprising two-stage pretraining. a, Stage 1 aims to inject multimodal knowledge into a slide aggregator by slide-level contrastive learning among WSIs, pathology reports and gene expression data. b, Stage 2 aims to seamlessly propagate multimodal knowledge learned at the slide level into the patch extractor by Self-Taught training, which leverages the slide aggregator pretrained in Stage 1 as “Teacher” and enforces patch extractor to be “Student”.", "qtype": "Design_Rationale" @@ -6894,9 +6894,9 @@ "Self-Taught training", "EMA" ], - "figure_path": "./MISSQA/figures/2_2407.15362v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2407.15362v2_figure_2.png", "paperid": "2407.15362v2", - "paper_path": "./MISSQA/papers/2407.15362v2.json", + "paper_path": "./MISS-QA/papers/2407.15362v2.json", "figure_id": "2407.15362v2_figure_2.png", "caption": "Figure 2: The Overview of mSTAR Pipeline. mSTAR is a whole-slide pretraining paradigm comprising two-stage pretraining. a, Stage 1 aims to inject multimodal knowledge into a slide aggregator by slide-level contrastive learning among WSIs, pathology reports and gene expression data. b, Stage 2 aims to seamlessly propagate multimodal knowledge learned at the slide level into the patch extractor by Self-Taught training, which leverages the slide aggregator pretrained in Stage 1 as “Teacher” and enforces patch extractor to be “Student”.", "qtype": "Implementation_Details" @@ -6922,9 +6922,9 @@ "inter-cancer contrastive learning", "triplet loss" ], - "figure_path": "./MISSQA/figures/3_2407.15362v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2407.15362v2_figure_2.png", "paperid": "2407.15362v2", - "paper_path": "./MISSQA/papers/2407.15362v2.json", + "paper_path": "./MISS-QA/papers/2407.15362v2.json", "figure_id": "2407.15362v2_figure_2.png", "caption": "Figure 2: The Overview of mSTAR Pipeline. mSTAR is a whole-slide pretraining paradigm comprising two-stage pretraining. a, Stage 1 aims to inject multimodal knowledge into a slide aggregator by slide-level contrastive learning among WSIs, pathology reports and gene expression data. b, Stage 2 aims to seamlessly propagate multimodal knowledge learned at the slide level into the patch extractor by Self-Taught training, which leverages the slide aggregator pretrained in Stage 1 as “Teacher” and enforces patch extractor to be “Student”.", "qtype": "Implementation_Details" @@ -6951,9 +6951,9 @@ "masked_elements": [ "State Tracker" ], - "figure_path": "./MISSQA/figures/0_2408.08693v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.08693v1_figure_1.png", "paperid": "2408.08693v1", - "paper_path": "./MISSQA/papers/2408.08693v1.json", + "paper_path": "./MISS-QA/papers/2408.08693v1.json", "figure_id": "2408.08693v1_figure_1.png", "caption": "Figure 1: Overview of the Med-PMC evaluation framework. The whole framework can be divided into three parts, including a) Multi-modal consultation, b) Patient Simulator, and c) Evaluation.", "qtype": "Implementation_Details" @@ -6973,9 +6973,9 @@ "masked_elements": [ "Information Extractor" ], - "figure_path": "./MISSQA/figures/1_2408.08693v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.08693v1_figure_1.png", "paperid": "2408.08693v1", - "paper_path": "./MISSQA/papers/2408.08693v1.json", + "paper_path": "./MISS-QA/papers/2408.08693v1.json", "figure_id": "2408.08693v1_figure_1.png", "caption": "Figure 1: Overview of the Med-PMC evaluation framework. The whole framework can be divided into three parts, including a) Multi-modal consultation, b) Patient Simulator, and c) Evaluation.", "qtype": "Implementation_Details" @@ -7002,9 +7002,9 @@ "masked_elements": [ "Patient Simulator" ], - "figure_path": "./MISSQA/figures/2_2408.08693v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.08693v1_figure_1.png", "paperid": "2408.08693v1", - "paper_path": "./MISSQA/papers/2408.08693v1.json", + "paper_path": "./MISS-QA/papers/2408.08693v1.json", "figure_id": "2408.08693v1_figure_1.png", "caption": "Figure 1: Overview of the Med-PMC evaluation framework. The whole framework can be divided into three parts, including a) Multi-modal consultation, b) Patient Simulator, and c) Evaluation.", "qtype": "Literature_Background" @@ -7024,9 +7024,9 @@ "masked_elements": [ "LLM-based Evaluation" ], - "figure_path": "./MISSQA/figures/3_2408.08693v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2408.08693v1_figure_1.png", "paperid": "2408.08693v1", - "paper_path": "./MISSQA/papers/2408.08693v1.json", + "paper_path": "./MISS-QA/papers/2408.08693v1.json", "figure_id": "2408.08693v1_figure_1.png", "caption": "Figure 1: Overview of the Med-PMC evaluation framework. The whole framework can be divided into three parts, including a) Multi-modal consultation, b) Patient Simulator, and c) Evaluation.", "qtype": "Literature_Background" @@ -7054,9 +7054,9 @@ "masked_elements": [ "Single-turn QA" ], - "figure_path": "./MISSQA/figures/0_2407.05721v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.05721v2_figure_1.png", "paperid": "2407.05721v2", - "paper_path": "./MISSQA/papers/2407.05721v2.json", + "paper_path": "./MISS-QA/papers/2407.05721v2.json", "figure_id": "2407.05721v2_figure_1.png", "caption": "Figure 1: Overview of dataset preparation.", "qtype": "Literature_Background" @@ -7088,9 +7088,9 @@ "Qwen1.5-72B", "psychological knowledge content" ], - "figure_path": "./MISSQA/figures/1_2407.05721v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.05721v2_figure_1.png", "paperid": "2407.05721v2", - "paper_path": "./MISSQA/papers/2407.05721v2.json", + "paper_path": "./MISS-QA/papers/2407.05721v2.json", "figure_id": "2407.05721v2_figure_1.png", "caption": "Figure 1: Overview of dataset preparation.", "qtype": "Literature_Background" @@ -7117,9 +7117,9 @@ "Web crawlers", "Single-turn QA" ], - "figure_path": "./MISSQA/figures/2_2407.05721v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.05721v2_figure_1.png", "paperid": "2407.05721v2", - "paper_path": "./MISSQA/papers/2407.05721v2.json", + "paper_path": "./MISS-QA/papers/2407.05721v2.json", "figure_id": "2407.05721v2_figure_1.png", "caption": "Figure 1: Overview of dataset preparation.", "qtype": "Experimental_Results" @@ -7144,9 +7144,9 @@ "masked_elements": [ "Evidence" ], - "figure_path": "./MISSQA/figures/3_2407.05721v2_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.05721v2_figure_1.png", "paperid": "2407.05721v2", - "paper_path": "./MISSQA/papers/2407.05721v2.json", + "paper_path": "./MISS-QA/papers/2407.05721v2.json", "figure_id": "2407.05721v2_figure_1.png", "caption": "Figure 1: Overview of dataset preparation.", "qtype": "Experimental_Results" @@ -7173,9 +7173,9 @@ "masked_elements": [ "v" ], - "figure_path": "./MISSQA/figures/0_2407.19415v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.19415v1_figure_1.png", "paperid": "2407.19415v1", - "paper_path": "./MISSQA/papers/2407.19415v1.json", + "paper_path": "./MISS-QA/papers/2407.19415v1.json", "figure_id": "2407.19415v1_figure_1.png", "caption": "Figure 1: The structure of II-CLVM. The global sparse (GS) sampling method is applied for each video and music to extract the pretrained feature sequences visubscript𝑣𝑖v_{i}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and ajsubscript𝑎𝑗a_{j}italic_a start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT. The encoded features vi′superscriptsubscript𝑣𝑖′v_{i}^{\\prime}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT and mj′superscriptsubscript𝑚𝑗′m_{j}^{\\prime}italic_m start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT are then obtained by video and music encoders, respectively. Then, the inter-modal similarity matrix S𝑆Sitalic_S and the intra-modal similarity matrix Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT, Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT, Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT are calculated. The inter-modal loss is calculated from the matrix S𝑆Sitalic_S, and the intra-modal losses for video and music modalities are calculated by Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT and Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT and Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, respectively.", "qtype": "Experimental_Results" @@ -7202,9 +7202,9 @@ "Intra-modal loss", "Sv" ], - "figure_path": "./MISSQA/figures/1_2407.19415v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.19415v1_figure_1.png", "paperid": "2407.19415v1", - "paper_path": "./MISSQA/papers/2407.19415v1.json", + "paper_path": "./MISS-QA/papers/2407.19415v1.json", "figure_id": "2407.19415v1_figure_1.png", "caption": "Figure 1: The structure of II-CLVM. The global sparse (GS) sampling method is applied for each video and music to extract the pretrained feature sequences visubscript𝑣𝑖v_{i}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and ajsubscript𝑎𝑗a_{j}italic_a start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT. The encoded features vi′superscriptsubscript𝑣𝑖′v_{i}^{\\prime}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT and mj′superscriptsubscript𝑚𝑗′m_{j}^{\\prime}italic_m start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT are then obtained by video and music encoders, respectively. Then, the inter-modal similarity matrix S𝑆Sitalic_S and the intra-modal similarity matrix Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT, Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT, Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT are calculated. The inter-modal loss is calculated from the matrix S𝑆Sitalic_S, and the intra-modal losses for video and music modalities are calculated by Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT and Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT and Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, respectively.", "qtype": "Experimental_Results" @@ -7223,9 +7223,9 @@ "masked_elements": [ "Global Sparse Sampling" ], - "figure_path": "./MISSQA/figures/2_2407.19415v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.19415v1_figure_1.png", "paperid": "2407.19415v1", - "paper_path": "./MISSQA/papers/2407.19415v1.json", + "paper_path": "./MISS-QA/papers/2407.19415v1.json", "figure_id": "2407.19415v1_figure_1.png", "caption": "Figure 1: The structure of II-CLVM. The global sparse (GS) sampling method is applied for each video and music to extract the pretrained feature sequences visubscript𝑣𝑖v_{i}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and ajsubscript𝑎𝑗a_{j}italic_a start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT. The encoded features vi′superscriptsubscript𝑣𝑖′v_{i}^{\\prime}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT and mj′superscriptsubscript𝑚𝑗′m_{j}^{\\prime}italic_m start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT are then obtained by video and music encoders, respectively. Then, the inter-modal similarity matrix S𝑆Sitalic_S and the intra-modal similarity matrix Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT, Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT, Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT are calculated. The inter-modal loss is calculated from the matrix S𝑆Sitalic_S, and the intra-modal losses for video and music modalities are calculated by Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT and Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT and Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, respectively.", "qtype": "Others" @@ -7245,9 +7245,9 @@ "masked_elements": [ "Video Feature Extractor" ], - "figure_path": "./MISSQA/figures/3_2407.19415v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.19415v1_figure_1.png", "paperid": "2407.19415v1", - "paper_path": "./MISSQA/papers/2407.19415v1.json", + "paper_path": "./MISS-QA/papers/2407.19415v1.json", "figure_id": "2407.19415v1_figure_1.png", "caption": "Figure 1: The structure of II-CLVM. The global sparse (GS) sampling method is applied for each video and music to extract the pretrained feature sequences visubscript𝑣𝑖v_{i}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and ajsubscript𝑎𝑗a_{j}italic_a start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT. The encoded features vi′superscriptsubscript𝑣𝑖′v_{i}^{\\prime}italic_v start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT and mj′superscriptsubscript𝑚𝑗′m_{j}^{\\prime}italic_m start_POSTSUBSCRIPT italic_j end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT are then obtained by video and music encoders, respectively. Then, the inter-modal similarity matrix S𝑆Sitalic_S and the intra-modal similarity matrix Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT, Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT, Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT are calculated. The inter-modal loss is calculated from the matrix S𝑆Sitalic_S, and the intra-modal losses for video and music modalities are calculated by Svsubscript𝑆𝑣S_{v}italic_S start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT and Sv′subscript𝑆superscript𝑣′S_{v^{\\prime}}italic_S start_POSTSUBSCRIPT italic_v start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, Smsubscript𝑆𝑚S_{m}italic_S start_POSTSUBSCRIPT italic_m end_POSTSUBSCRIPT and Sm′subscript𝑆superscript𝑚′S_{m^{\\prime}}italic_S start_POSTSUBSCRIPT italic_m start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT end_POSTSUBSCRIPT, respectively.", "qtype": "Others" @@ -7267,9 +7267,9 @@ "masked_elements": [ "multi-modal parallel graph autoencoder" ], - "figure_path": "./MISSQA/figures/0_2411.02534v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.02534v1_figure_1.png", "paperid": "2411.02534v1", - "paper_path": "./MISSQA/papers/2411.02534v1.json", + "paper_path": "./MISS-QA/papers/2411.02534v1.json", "figure_id": "2411.02534v1_figure_1.png", "caption": "Figure 1: The overall structure of the proposed model, stMMC is plotted here, where trapezoids represent the GCN layer, and rectangles represent extracted features. Dashed lines with double arrowheads represent that both GCNs share the same weight. stMMC takes two data modalities and passes them through the multi-modal parallel graph autoencoder (MPGA), where each modality is regulated by a contrastive learning mechanism. The detailed process of contrastive learning is shown in Figure 2. The MPGA reconstructs a refined gene expression data, which is then used for spatial clustering.", "qtype": "Others" @@ -7290,9 +7290,9 @@ "image features", "contrastive learning mechanism" ], - "figure_path": "./MISSQA/figures/1_2411.02534v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.02534v1_figure_1.png", "paperid": "2411.02534v1", - "paper_path": "./MISSQA/papers/2411.02534v1.json", + "paper_path": "./MISS-QA/papers/2411.02534v1.json", "figure_id": "2411.02534v1_figure_1.png", "caption": "Figure 1: The overall structure of the proposed model, stMMC is plotted here, where trapezoids represent the GCN layer, and rectangles represent extracted features. Dashed lines with double arrowheads represent that both GCNs share the same weight. stMMC takes two data modalities and passes them through the multi-modal parallel graph autoencoder (MPGA), where each modality is regulated by a contrastive learning mechanism. The detailed process of contrastive learning is shown in Figure 2. The MPGA reconstructs a refined gene expression data, which is then used for spatial clustering.", "qtype": "Others" @@ -7313,9 +7313,9 @@ "GCN layer", "multi-modal parallel graph autoencoder" ], - "figure_path": "./MISSQA/figures/2_2411.02534v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.02534v1_figure_1.png", "paperid": "2411.02534v1", - "paper_path": "./MISSQA/papers/2411.02534v1.json", + "paper_path": "./MISS-QA/papers/2411.02534v1.json", "figure_id": "2411.02534v1_figure_1.png", "caption": "Figure 1: The overall structure of the proposed model, stMMC is plotted here, where trapezoids represent the GCN layer, and rectangles represent extracted features. Dashed lines with double arrowheads represent that both GCNs share the same weight. stMMC takes two data modalities and passes them through the multi-modal parallel graph autoencoder (MPGA), where each modality is regulated by a contrastive learning mechanism. The detailed process of contrastive learning is shown in Figure 2. The MPGA reconstructs a refined gene expression data, which is then used for spatial clustering.", "qtype": "Design_Rationale" @@ -7341,9 +7341,9 @@ "corrupted graph", "contrastive learning mechanism" ], - "figure_path": "./MISSQA/figures/3_2411.02534v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.02534v1_figure_1.png", "paperid": "2411.02534v1", - "paper_path": "./MISSQA/papers/2411.02534v1.json", + "paper_path": "./MISS-QA/papers/2411.02534v1.json", "figure_id": "2411.02534v1_figure_1.png", "caption": "Figure 1: The overall structure of the proposed model, stMMC is plotted here, where trapezoids represent the GCN layer, and rectangles represent extracted features. Dashed lines with double arrowheads represent that both GCNs share the same weight. stMMC takes two data modalities and passes them through the multi-modal parallel graph autoencoder (MPGA), where each modality is regulated by a contrastive learning mechanism. The detailed process of contrastive learning is shown in Figure 2. The MPGA reconstructs a refined gene expression data, which is then used for spatial clustering.", "qtype": "Design_Rationale" @@ -7368,9 +7368,9 @@ "masked_elements": [ "tanh" ], - "figure_path": "./MISSQA/figures/0_2407.09705v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.09705v1_figure_2.png", "paperid": "2407.09705v1", - "paper_path": "./MISSQA/papers/2407.09705v1.json", + "paper_path": "./MISS-QA/papers/2407.09705v1.json", "figure_id": "2407.09705v1_figure_2.png", "caption": "Figure 2: Illustration of multimodal framework and the proposed Diagnosing & Re-learning method.", "qtype": "Design_Rationale" @@ -7396,9 +7396,9 @@ "tanh", "re-initialization strength alpha" ], - "figure_path": "./MISSQA/figures/1_2407.09705v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2407.09705v1_figure_2.png", "paperid": "2407.09705v1", - "paper_path": "./MISSQA/papers/2407.09705v1.json", + "paper_path": "./MISS-QA/papers/2407.09705v1.json", "figure_id": "2407.09705v1_figure_2.png", "caption": "Figure 2: Illustration of multimodal framework and the proposed Diagnosing & Re-learning method.", "qtype": "Implementation_Details" @@ -7423,9 +7423,9 @@ "masked_elements": [ "current parameters" ], - "figure_path": "./MISSQA/figures/2_2407.09705v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2407.09705v1_figure_2.png", "paperid": "2407.09705v1", - "paper_path": "./MISSQA/papers/2407.09705v1.json", + "paper_path": "./MISS-QA/papers/2407.09705v1.json", "figure_id": "2407.09705v1_figure_2.png", "caption": "Figure 2: Illustration of multimodal framework and the proposed Diagnosing & Re-learning method.", "qtype": "Implementation_Details" @@ -7450,9 +7450,9 @@ "masked_elements": [ "Distributed Prioritized Replay Buffer" ], - "figure_path": "./MISSQA/figures/0_2410.14803v3_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.14803v3_figure_1.png", "paperid": "2410.14803v3", - "paper_path": "./MISSQA/papers/2410.14803v3.json", + "paper_path": "./MISS-QA/papers/2410.14803v3.json", "figure_id": "2410.14803v3_figure_1.png", "caption": "Figure 1: Overview of On-device LLM control with DistRL.", "qtype": "Implementation_Details" @@ -7474,9 +7474,9 @@ "Host Learner", "Auto Evaluators" ], - "figure_path": "./MISSQA/figures/1_2410.14803v3_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.14803v3_figure_1.png", "paperid": "2410.14803v3", - "paper_path": "./MISSQA/papers/2410.14803v3.json", + "paper_path": "./MISS-QA/papers/2410.14803v3.json", "figure_id": "2410.14803v3_figure_1.png", "caption": "Figure 1: Overview of On-device LLM control with DistRL.", "qtype": "Implementation_Details" @@ -7501,9 +7501,9 @@ "masked_elements": [ "Distributed Prioritized Replay Buffer" ], - "figure_path": "./MISSQA/figures/2_2410.14803v3_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.14803v3_figure_1.png", "paperid": "2410.14803v3", - "paper_path": "./MISSQA/papers/2410.14803v3.json", + "paper_path": "./MISS-QA/papers/2410.14803v3.json", "figure_id": "2410.14803v3_figure_1.png", "caption": "Figure 1: Overview of On-device LLM control with DistRL.", "qtype": "Implementation_Details" @@ -7523,9 +7523,9 @@ "masked_elements": [ "Cache Store" ], - "figure_path": "./MISSQA/figures/0_2411.11739v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.11739v1_figure_1.png", "paperid": "2411.11739v1", - "paper_path": "./MISSQA/papers/2411.11739v1.json", + "paper_path": "./MISS-QA/papers/2411.11739v1.json", "figure_id": "2411.11739v1_figure_1.png", "caption": "Figure 1. (a) Representation Unmatching: multi-modal features are obtained through down-streaming task-agnostic self-supervised tasks like image-text matching. (b) Representation Unlearning: generated multi-modal representation is always only served as additional fixed static input feature for recommendation model.", "qtype": "Literature_Background" @@ -7545,9 +7545,9 @@ "masked_elements": [ "Cache Store" ], - "figure_path": "./MISSQA/figures/1_2411.11739v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.11739v1_figure_1.png", "paperid": "2411.11739v1", - "paper_path": "./MISSQA/papers/2411.11739v1.json", + "paper_path": "./MISS-QA/papers/2411.11739v1.json", "figure_id": "2411.11739v1_figure_1.png", "caption": "Figure 1. (a) Representation Unmatching: multi-modal features are obtained through down-streaming task-agnostic self-supervised tasks like image-text matching. (b) Representation Unlearning: generated multi-modal representation is always only served as additional fixed static input feature for recommendation model.", "qtype": "Literature_Background" @@ -7575,9 +7575,9 @@ "masked_elements": [ "ID-based Embedding" ], - "figure_path": "./MISSQA/figures/2_2411.11739v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.11739v1_figure_1.png", "paperid": "2411.11739v1", - "paper_path": "./MISSQA/papers/2411.11739v1.json", + "paper_path": "./MISS-QA/papers/2411.11739v1.json", "figure_id": "2411.11739v1_figure_1.png", "caption": "Figure 1. (a) Representation Unmatching: multi-modal features are obtained through down-streaming task-agnostic self-supervised tasks like image-text matching. (b) Representation Unlearning: generated multi-modal representation is always only served as additional fixed static input feature for recommendation model.", "qtype": "Experimental_Results" @@ -7598,9 +7598,9 @@ "Text-to-Image Diffusion UNet", "Mask Generator" ], - "figure_path": "./MISSQA/figures/0_2407.14326v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.14326v1_figure_1.png", "paperid": "2407.14326v1", - "paper_path": "./MISSQA/papers/2407.14326v1.json", + "paper_path": "./MISS-QA/papers/2407.14326v1.json", "figure_id": "2407.14326v1_figure_1.png", "caption": "Figure 1: \nThe overview of our framework, adapted from ODISE.\nFeatures extracted by the text-to-image diffusion model are passed to a mask generator, which outputs binary mask predictions and mask embeddings for individual objects detected in the image. These mask embeddings are then combined with category embeddings from the text encoder via a dot product to supervise the classification task. Additionally, an implicit captioner encodes the image to provide a conditioning signal for the diffusion process.", "qtype": "Experimental_Results" @@ -7619,9 +7619,9 @@ "masked_elements": [ "Text-to-Image Diffusion UNet" ], - "figure_path": "./MISSQA/figures/1_2407.14326v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.14326v1_figure_1.png", "paperid": "2407.14326v1", - "paper_path": "./MISSQA/papers/2407.14326v1.json", + "paper_path": "./MISS-QA/papers/2407.14326v1.json", "figure_id": "2407.14326v1_figure_1.png", "caption": "Figure 1: \nThe overview of our framework, adapted from ODISE.\nFeatures extracted by the text-to-image diffusion model are passed to a mask generator, which outputs binary mask predictions and mask embeddings for individual objects detected in the image. These mask embeddings are then combined with category embeddings from the text encoder via a dot product to supervise the classification task. Additionally, an implicit captioner encodes the image to provide a conditioning signal for the diffusion process.", "qtype": "Others" @@ -7642,9 +7642,9 @@ "mask embeddings", "category embeddings" ], - "figure_path": "./MISSQA/figures/2_2407.14326v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.14326v1_figure_1.png", "paperid": "2407.14326v1", - "paper_path": "./MISSQA/papers/2407.14326v1.json", + "paper_path": "./MISS-QA/papers/2407.14326v1.json", "figure_id": "2407.14326v1_figure_1.png", "caption": "Figure 1: \nThe overview of our framework, adapted from ODISE.\nFeatures extracted by the text-to-image diffusion model are passed to a mask generator, which outputs binary mask predictions and mask embeddings for individual objects detected in the image. These mask embeddings are then combined with category embeddings from the text encoder via a dot product to supervise the classification task. Additionally, an implicit captioner encodes the image to provide a conditioning signal for the diffusion process.", "qtype": "Others" @@ -7663,9 +7663,9 @@ "masked_elements": [ "Swin-Transformer Block" ], - "figure_path": "./MISSQA/figures/0_2408.00347v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.00347v2_figure_2.png", "paperid": "2408.00347v2", - "paper_path": "./MISSQA/papers/2408.00347v2.json", + "paper_path": "./MISS-QA/papers/2408.00347v2.json", "figure_id": "2408.00347v2_figure_2.png", "caption": "Figure 2: Overview of our proposed diffusion transformer segmentation(DTS) model.", "qtype": "Others" @@ -7686,9 +7686,9 @@ "Patch Merging", "Diffusion Encoder" ], - "figure_path": "./MISSQA/figures/1_2408.00347v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.00347v2_figure_2.png", "paperid": "2408.00347v2", - "paper_path": "./MISSQA/papers/2408.00347v2.json", + "paper_path": "./MISS-QA/papers/2408.00347v2.json", "figure_id": "2408.00347v2_figure_2.png", "caption": "Figure 2: Overview of our proposed diffusion transformer segmentation(DTS) model.", "qtype": "Others" @@ -7710,9 +7710,9 @@ "time embedding", "linear projection" ], - "figure_path": "./MISSQA/figures/2_2408.00347v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.00347v2_figure_2.png", "paperid": "2408.00347v2", - "paper_path": "./MISSQA/papers/2408.00347v2.json", + "paper_path": "./MISS-QA/papers/2408.00347v2.json", "figure_id": "2408.00347v2_figure_2.png", "caption": "Figure 2: Overview of our proposed diffusion transformer segmentation(DTS) model.", "qtype": "Design_Rationale" @@ -7739,9 +7739,9 @@ "masked_elements": [ "global feature map" ], - "figure_path": "./MISSQA/figures/3_2408.00347v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2408.00347v2_figure_2.png", "paperid": "2408.00347v2", - "paper_path": "./MISSQA/papers/2408.00347v2.json", + "paper_path": "./MISS-QA/papers/2408.00347v2.json", "figure_id": "2408.00347v2_figure_2.png", "caption": "Figure 2: Overview of our proposed diffusion transformer segmentation(DTS) model.", "qtype": "Design_Rationale" @@ -7769,9 +7769,9 @@ "masked_elements": [ "Query Text" ], - "figure_path": "./MISSQA/figures/0_2411.06869v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.06869v1_figure_1.png", "paperid": "2411.06869v1", - "paper_path": "./MISSQA/papers/2411.06869v1.json", + "paper_path": "./MISS-QA/papers/2411.06869v1.json", "figure_id": "2411.06869v1_figure_1.png", "caption": "Figure 1: Architectural difference from conventional CAPE methods. Previous methods (top) are support-dependent approaches requiring support images and keypoint annotations, but ours (bottom) is one of the support-free approaches which do not need any additional images and annotations, but just text description of the keypoints", "qtype": "Design_Rationale" @@ -7792,9 +7792,9 @@ "Matching", "Proposal Generator" ], - "figure_path": "./MISSQA/figures/1_2411.06869v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.06869v1_figure_1.png", "paperid": "2411.06869v1", - "paper_path": "./MISSQA/papers/2411.06869v1.json", + "paper_path": "./MISS-QA/papers/2411.06869v1.json", "figure_id": "2411.06869v1_figure_1.png", "caption": "Figure 1: Architectural difference from conventional CAPE methods. Previous methods (top) are support-dependent approaches requiring support images and keypoint annotations, but ours (bottom) is one of the support-free approaches which do not need any additional images and annotations, but just text description of the keypoints", "qtype": "Implementation_Details" @@ -7815,9 +7815,9 @@ "Decoder", "RoentGen Model" ], - "figure_path": "./MISSQA/figures/0_2409.19436v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.19436v1_figure_2.png", "paperid": "2409.19436v1", - "paper_path": "./MISSQA/papers/2409.19436v1.json", + "paper_path": "./MISS-QA/papers/2409.19436v1.json", "figure_id": "2409.19436v1_figure_2.png", "caption": "Figure 2: \nOverview of the proposed SDICE index. We input the real and synthetic dataset to the contrastive pretrained encoder to obtain similarity score distributions. The F-ratio between the two distributions after exponential normalization can be used to assess the diversity of the synthetic dataset.", "qtype": "Implementation_Details" @@ -7838,9 +7838,9 @@ "masked_elements": [ "SDICE index γ" ], - "figure_path": "./MISSQA/figures/1_2409.19436v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.19436v1_figure_2.png", "paperid": "2409.19436v1", - "paper_path": "./MISSQA/papers/2409.19436v1.json", + "paper_path": "./MISS-QA/papers/2409.19436v1.json", "figure_id": "2409.19436v1_figure_2.png", "caption": "Figure 2: \nOverview of the proposed SDICE index. We input the real and synthetic dataset to the contrastive pretrained encoder to obtain similarity score distributions. The F-ratio between the two distributions after exponential normalization can be used to assess the diversity of the synthetic dataset.", "qtype": "Literature_Background" @@ -7865,9 +7865,9 @@ "masked_elements": [ "S2-MLP Link" ], - "figure_path": "./MISSQA/figures/0_2411.12547v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.12547v1_figure_1.png", "paperid": "2411.12547v1", - "paper_path": "./MISSQA/papers/2411.12547v1.json", + "paper_path": "./MISS-QA/papers/2411.12547v1.json", "figure_id": "2411.12547v1_figure_1.png", "caption": "Figure 1: The overall framework of S3TU-Net. The framework is divided into three broad categories of modules, two novel convolutional modules (DWF-Conv/ D2BR-Conv), multi-spatial dimensional connectors (S2-MLP Link), and residual connection-based superpixel vision transformer (RM-SViT).", "qtype": "Literature_Background" @@ -7895,9 +7895,9 @@ "masked_elements": [ "RM-SViT" ], - "figure_path": "./MISSQA/figures/1_2411.12547v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.12547v1_figure_1.png", "paperid": "2411.12547v1", - "paper_path": "./MISSQA/papers/2411.12547v1.json", + "paper_path": "./MISS-QA/papers/2411.12547v1.json", "figure_id": "2411.12547v1_figure_1.png", "caption": "Figure 1: The overall framework of S3TU-Net. The framework is divided into three broad categories of modules, two novel convolutional modules (DWF-Conv/ D2BR-Conv), multi-spatial dimensional connectors (S2-MLP Link), and residual connection-based superpixel vision transformer (RM-SViT).", "qtype": "Literature_Background" @@ -7923,9 +7923,9 @@ "masked_elements": [ "DWF-Conv" ], - "figure_path": "./MISSQA/figures/2_2411.12547v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.12547v1_figure_1.png", "paperid": "2411.12547v1", - "paper_path": "./MISSQA/papers/2411.12547v1.json", + "paper_path": "./MISS-QA/papers/2411.12547v1.json", "figure_id": "2411.12547v1_figure_1.png", "caption": "Figure 1: The overall framework of S3TU-Net. The framework is divided into three broad categories of modules, two novel convolutional modules (DWF-Conv/ D2BR-Conv), multi-spatial dimensional connectors (S2-MLP Link), and residual connection-based superpixel vision transformer (RM-SViT).", "qtype": "Experimental_Results" @@ -7949,9 +7949,9 @@ "masked_elements": [ "RM-SViT" ], - "figure_path": "./MISSQA/figures/3_2411.12547v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.12547v1_figure_1.png", "paperid": "2411.12547v1", - "paper_path": "./MISSQA/papers/2411.12547v1.json", + "paper_path": "./MISS-QA/papers/2411.12547v1.json", "figure_id": "2411.12547v1_figure_1.png", "caption": "Figure 1: The overall framework of S3TU-Net. The framework is divided into three broad categories of modules, two novel convolutional modules (DWF-Conv/ D2BR-Conv), multi-spatial dimensional connectors (S2-MLP Link), and residual connection-based superpixel vision transformer (RM-SViT).", "qtype": "Experimental_Results" @@ -7977,9 +7977,9 @@ "masked_elements": [ "AdaCM" ], - "figure_path": "./MISSQA/figures/0_2411.09344v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.09344v1_figure_2.png", "paperid": "2411.09344v1", - "paper_path": "./MISSQA/papers/2411.09344v1.json", + "paper_path": "./MISS-QA/papers/2411.09344v1.json", "figure_id": "2411.09344v1_figure_2.png", "caption": "Figure 2: Overview structure of AACL. \"Awsubscript𝐴𝑤A_{w}italic_A start_POSTSUBSCRIPT italic_w end_POSTSUBSCRIPT\" and \"Assubscript𝐴𝑠A_{s}italic_A start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT\" indicate the weak augmentation and the \"USAug\" module, respectively.", "qtype": "Experimental_Results" @@ -7999,9 +7999,9 @@ "masked_elements": [ "Filter" ], - "figure_path": "./MISSQA/figures/1_2411.09344v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.09344v1_figure_2.png", "paperid": "2411.09344v1", - "paper_path": "./MISSQA/papers/2411.09344v1.json", + "paper_path": "./MISS-QA/papers/2411.09344v1.json", "figure_id": "2411.09344v1_figure_2.png", "caption": "Figure 2: Overview structure of AACL. \"Awsubscript𝐴𝑤A_{w}italic_A start_POSTSUBSCRIPT italic_w end_POSTSUBSCRIPT\" and \"Assubscript𝐴𝑠A_{s}italic_A start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT\" indicate the weak augmentation and the \"USAug\" module, respectively.", "qtype": "Others" @@ -8022,9 +8022,9 @@ "counterfactual ICL", "ICL-based Bias Suppression" ], - "figure_path": "./MISSQA/figures/0_2408.12942v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.12942v2_figure_1.png", "paperid": "2408.12942v2", - "paper_path": "./MISSQA/papers/2408.12942v2.json", + "paper_path": "./MISS-QA/papers/2408.12942v2.json", "figure_id": "2408.12942v2_figure_1.png", "caption": "Figure 1: (a) Dataset bias under causal perspective (b) Illustration of the Causal-Guided Active Learning framework.", "qtype": "Others" @@ -8053,9 +8053,9 @@ "causal invariance-based biased instances identification", "active learning framework" ], - "figure_path": "./MISSQA/figures/1_2408.12942v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.12942v2_figure_1.png", "paperid": "2408.12942v2", - "paper_path": "./MISSQA/papers/2408.12942v2.json", + "paper_path": "./MISS-QA/papers/2408.12942v2.json", "figure_id": "2408.12942v2_figure_1.png", "caption": "Figure 1: (a) Dataset bias under causal perspective (b) Illustration of the Causal-Guided Active Learning framework.", "qtype": "Design_Rationale" @@ -8081,9 +8081,9 @@ "masked_elements": [ "typical criteria" ], - "figure_path": "./MISSQA/figures/2_2408.12942v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.12942v2_figure_1.png", "paperid": "2408.12942v2", - "paper_path": "./MISSQA/papers/2408.12942v2.json", + "paper_path": "./MISS-QA/papers/2408.12942v2.json", "figure_id": "2408.12942v2_figure_1.png", "caption": "Figure 1: (a) Dataset bias under causal perspective (b) Illustration of the Causal-Guided Active Learning framework.", "qtype": "Design_Rationale" @@ -8110,9 +8110,9 @@ "optimal subgraphs", "2D SE minimization" ], - "figure_path": "./MISSQA/figures/0_2407.18274v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.18274v1_figure_2.png", "paperid": "2407.18274v1", - "paper_path": "./MISSQA/papers/2407.18274v1.json", + "paper_path": "./MISS-QA/papers/2407.18274v1.json", "figure_id": "2407.18274v1_figure_2.png", "caption": "Figure 2. The proposed ADP-SEMEvent framework. ADP-SEMEvent consists of two stages: the private message graph construction stage (stage 1) and the private message graph clustering stage (stage 2). Messages with the same color represent the same cluster according to the ground truth labels; orange edges Essubscript𝐸𝑠E_{s}italic_E start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT are derived from 1-dimensional structural entropy, and purple edges Easubscript𝐸𝑎E_{a}italic_E start_POSTSUBSCRIPT italic_a end_POSTSUBSCRIPT are derived from relevant attributes; arrows of specific colors indicate specific operations.", "qtype": "Design_Rationale" @@ -8140,9 +8140,9 @@ "Message Passing Neural Network", "Sampling Threshold" ], - "figure_path": "./MISSQA/figures/0_2410.07191v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.07191v1_figure_2.png", "paperid": "2410.07191v1", - "paper_path": "./MISSQA/papers/2410.07191v1.json", + "paper_path": "./MISS-QA/papers/2410.07191v1.json", "figure_id": "2410.07191v1_figure_2.png", "caption": "Figure 2: An overview of CRiTIC. In this architecture, Causal Discovery Network receives the map-aware agent representations and generates a causality adjacency matrix. The matrix is used by a Transformer-based prediction backbone to shape the attention toward the causal agents.", "qtype": "Implementation_Details" @@ -8169,9 +8169,9 @@ "Denoising Autoencoder", "Masked Agent Representations" ], - "figure_path": "./MISSQA/figures/1_2410.07191v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.07191v1_figure_2.png", "paperid": "2410.07191v1", - "paper_path": "./MISSQA/papers/2410.07191v1.json", + "paper_path": "./MISS-QA/papers/2410.07191v1.json", "figure_id": "2410.07191v1_figure_2.png", "caption": "Figure 2: An overview of CRiTIC. In this architecture, Causal Discovery Network receives the map-aware agent representations and generates a causality adjacency matrix. The matrix is used by a Transformer-based prediction backbone to shape the attention toward the causal agents.", "qtype": "Implementation_Details" @@ -8199,9 +8199,9 @@ "Message Passing Neural Network", "Sampling Threshold" ], - "figure_path": "./MISSQA/figures/2_2410.07191v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.07191v1_figure_2.png", "paperid": "2410.07191v1", - "paper_path": "./MISSQA/papers/2410.07191v1.json", + "paper_path": "./MISS-QA/papers/2410.07191v1.json", "figure_id": "2410.07191v1_figure_2.png", "caption": "Figure 2: An overview of CRiTIC. In this architecture, Causal Discovery Network receives the map-aware agent representations and generates a causality adjacency matrix. The matrix is used by a Transformer-based prediction backbone to shape the attention toward the causal agents.", "qtype": "Implementation_Details" @@ -8228,9 +8228,9 @@ "masked_elements": [ "Denoising Autoencoder" ], - "figure_path": "./MISSQA/figures/3_2410.07191v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.07191v1_figure_2.png", "paperid": "2410.07191v1", - "paper_path": "./MISSQA/papers/2410.07191v1.json", + "paper_path": "./MISS-QA/papers/2410.07191v1.json", "figure_id": "2410.07191v1_figure_2.png", "caption": "Figure 2: An overview of CRiTIC. In this architecture, Causal Discovery Network receives the map-aware agent representations and generates a causality adjacency matrix. The matrix is used by a Transformer-based prediction backbone to shape the attention toward the causal agents.", "qtype": "Literature_Background" @@ -8258,9 +8258,9 @@ "masked_elements": [ "Transform-neck" ], - "figure_path": "./MISSQA/figures/0_2407.19651v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.19651v1_figure_1.png", "paperid": "2407.19651v1", - "paper_path": "./MISSQA/papers/2407.19651v1.json", + "paper_path": "./MISS-QA/papers/2407.19651v1.json", "figure_id": "2407.19651v1_figure_1.png", "caption": "Figure 1: On the left is inadequate frameworks for image compression for MLLMs, where the image codec is trained for (a) human perception, (b) the downstream task network, or (c) compressing the intermediate features of the task network. On the right is the proposed transform-neck and surrogate loss under three distinct scenarios, with the image codec (d1) pre-trained for human perception, (d2) updated for joint human and machine perception, or (d3) updated for machine perception.", "qtype": "Literature_Background" @@ -8287,9 +8287,9 @@ "masked_elements": [ "hierarchical feature extraction" ], - "figure_path": "./MISSQA/figures/0_2409.02084v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.02084v1_figure_2.png", "paperid": "2409.02084v1", - "paper_path": "./MISSQA/papers/2409.02084v1.json", + "paper_path": "./MISS-QA/papers/2409.02084v1.json", "figure_id": "2409.02084v1_figure_2.png", "caption": "Figure 2: GraspSplats employs two techniques to efficiently construct feature-enhanced 3D Gaussians: hierarchical feature extraction and dense initialization from geometry regularization, which reduces the overall runtime to 1/10 of existing GS methods [38]. (High-dimensional features are visualized using PCA and the visualized Gaussian ellipsoids are trained without densification).", "qtype": "Experimental_Results" @@ -8308,9 +8308,9 @@ "masked_elements": [ "hierarchical feature extraction" ], - "figure_path": "./MISSQA/figures/1_2409.02084v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.02084v1_figure_2.png", "paperid": "2409.02084v1", - "paper_path": "./MISSQA/papers/2409.02084v1.json", + "paper_path": "./MISS-QA/papers/2409.02084v1.json", "figure_id": "2409.02084v1_figure_2.png", "caption": "Figure 2: GraspSplats employs two techniques to efficiently construct feature-enhanced 3D Gaussians: hierarchical feature extraction and dense initialization from geometry regularization, which reduces the overall runtime to 1/10 of existing GS methods [38]. (High-dimensional features are visualized using PCA and the visualized Gaussian ellipsoids are trained without densification).", "qtype": "Others" @@ -8329,9 +8329,9 @@ "masked_elements": [ "feature splatting" ], - "figure_path": "./MISSQA/figures/2_2409.02084v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.02084v1_figure_2.png", "paperid": "2409.02084v1", - "paper_path": "./MISSQA/papers/2409.02084v1.json", + "paper_path": "./MISS-QA/papers/2409.02084v1.json", "figure_id": "2409.02084v1_figure_2.png", "caption": "Figure 2: GraspSplats employs two techniques to efficiently construct feature-enhanced 3D Gaussians: hierarchical feature extraction and dense initialization from geometry regularization, which reduces the overall runtime to 1/10 of existing GS methods [38]. (High-dimensional features are visualized using PCA and the visualized Gaussian ellipsoids are trained without densification).", "qtype": "Others" @@ -8351,9 +8351,9 @@ "masked_elements": [ "Image Decomposition Module" ], - "figure_path": "./MISSQA/figures/0_2411.01432v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.01432v1_figure_1.png", "paperid": "2411.01432v1", - "paper_path": "./MISSQA/papers/2411.01432v1.json", + "paper_path": "./MISS-QA/papers/2411.01432v1.json", "figure_id": "2411.01432v1_figure_1.png", "caption": "Figure 1: Framework of the proposed method. In this work, we present an insightful meta-learning framework that exploits cross-domain invariant frequency priors to alleviate the over-fitting problems of classic meta-learning in cross-domain FSL tasks. Our method consists of an Image Decomposition Module (IDM) and a Prior Regularization Meta-Network (PRM-Net). Among them, IDM aim at explicitly decomposing every image in few-shot task into low- and high-frequency components. PRM-Net develops a prediction consistency prior and a feature reconstruction prior to jointly regularize the feature embedding network during meta-learning, aiming to learn generalizable image feature embeddings. Once the model is trained, only the main branch is retained for meta-testing on target domains.", "qtype": "Others" @@ -8373,9 +8373,9 @@ "masked_elements": [ "feature reconstruction prior" ], - "figure_path": "./MISSQA/figures/1_2411.01432v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.01432v1_figure_1.png", "paperid": "2411.01432v1", - "paper_path": "./MISSQA/papers/2411.01432v1.json", + "paper_path": "./MISS-QA/papers/2411.01432v1.json", "figure_id": "2411.01432v1_figure_1.png", "caption": "Figure 1: Framework of the proposed method. In this work, we present an insightful meta-learning framework that exploits cross-domain invariant frequency priors to alleviate the over-fitting problems of classic meta-learning in cross-domain FSL tasks. Our method consists of an Image Decomposition Module (IDM) and a Prior Regularization Meta-Network (PRM-Net). Among them, IDM aim at explicitly decomposing every image in few-shot task into low- and high-frequency components. PRM-Net develops a prediction consistency prior and a feature reconstruction prior to jointly regularize the feature embedding network during meta-learning, aiming to learn generalizable image feature embeddings. Once the model is trained, only the main branch is retained for meta-testing on target domains.", "qtype": "Others" @@ -8401,9 +8401,9 @@ "masked_elements": [ "main branch" ], - "figure_path": "./MISSQA/figures/2_2411.01432v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.01432v1_figure_1.png", "paperid": "2411.01432v1", - "paper_path": "./MISSQA/papers/2411.01432v1.json", + "paper_path": "./MISS-QA/papers/2411.01432v1.json", "figure_id": "2411.01432v1_figure_1.png", "caption": "Figure 1: Framework of the proposed method. In this work, we present an insightful meta-learning framework that exploits cross-domain invariant frequency priors to alleviate the over-fitting problems of classic meta-learning in cross-domain FSL tasks. Our method consists of an Image Decomposition Module (IDM) and a Prior Regularization Meta-Network (PRM-Net). Among them, IDM aim at explicitly decomposing every image in few-shot task into low- and high-frequency components. PRM-Net develops a prediction consistency prior and a feature reconstruction prior to jointly regularize the feature embedding network during meta-learning, aiming to learn generalizable image feature embeddings. Once the model is trained, only the main branch is retained for meta-testing on target domains.", "qtype": "Design_Rationale" @@ -8430,9 +8430,9 @@ "feature reconstruction prior", "prediction consistency prior" ], - "figure_path": "./MISSQA/figures/3_2411.01432v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.01432v1_figure_1.png", "paperid": "2411.01432v1", - "paper_path": "./MISSQA/papers/2411.01432v1.json", + "paper_path": "./MISS-QA/papers/2411.01432v1.json", "figure_id": "2411.01432v1_figure_1.png", "caption": "Figure 1: Framework of the proposed method. In this work, we present an insightful meta-learning framework that exploits cross-domain invariant frequency priors to alleviate the over-fitting problems of classic meta-learning in cross-domain FSL tasks. Our method consists of an Image Decomposition Module (IDM) and a Prior Regularization Meta-Network (PRM-Net). Among them, IDM aim at explicitly decomposing every image in few-shot task into low- and high-frequency components. PRM-Net develops a prediction consistency prior and a feature reconstruction prior to jointly regularize the feature embedding network during meta-learning, aiming to learn generalizable image feature embeddings. Once the model is trained, only the main branch is retained for meta-testing on target domains.", "qtype": "Design_Rationale" @@ -8459,9 +8459,9 @@ "masked_elements": [ "1×1 conv" ], - "figure_path": "./MISSQA/figures/0_2409.13609v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.13609v2_figure_2.png", "paperid": "2409.13609v2", - "paper_path": "./MISSQA/papers/2409.13609v2.json", + "paper_path": "./MISS-QA/papers/2409.13609v2.json", "figure_id": "2409.13609v2_figure_2.png", "caption": "Figure 2: Overall architecture of MaPPER. MaPPER freezes the pre-trained vision encoder and language encoder. For the language branch, Dynamic Prior Adapters (DyPA) utilize aligned priors generated from the Vision-aligned Prior Module to enable efficient modal alignment and adaptation. For the language branch, Local Convolution Adapters (LoCA) integrate local visual features the global prior (pre-trained visual knowledge) from the visual encoder.\nMoreover, the Prior-guided Text module for promoting the multimodal alignment.", "qtype": "Design_Rationale" @@ -8487,9 +8487,9 @@ "masked_elements": [ "vision-aligned prior" ], - "figure_path": "./MISSQA/figures/1_2409.13609v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.13609v2_figure_2.png", "paperid": "2409.13609v2", - "paper_path": "./MISSQA/papers/2409.13609v2.json", + "paper_path": "./MISS-QA/papers/2409.13609v2.json", "figure_id": "2409.13609v2_figure_2.png", "caption": "Figure 2: Overall architecture of MaPPER. MaPPER freezes the pre-trained vision encoder and language encoder. For the language branch, Dynamic Prior Adapters (DyPA) utilize aligned priors generated from the Vision-aligned Prior Module to enable efficient modal alignment and adaptation. For the language branch, Local Convolution Adapters (LoCA) integrate local visual features the global prior (pre-trained visual knowledge) from the visual encoder.\nMoreover, the Prior-guided Text module for promoting the multimodal alignment.", "qtype": "Design_Rationale" @@ -8513,9 +8513,9 @@ "masked_elements": [ "Local Convolution Adapter" ], - "figure_path": "./MISSQA/figures/2_2409.13609v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.13609v2_figure_2.png", "paperid": "2409.13609v2", - "paper_path": "./MISSQA/papers/2409.13609v2.json", + "paper_path": "./MISS-QA/papers/2409.13609v2.json", "figure_id": "2409.13609v2_figure_2.png", "caption": "Figure 2: Overall architecture of MaPPER. MaPPER freezes the pre-trained vision encoder and language encoder. For the language branch, Dynamic Prior Adapters (DyPA) utilize aligned priors generated from the Vision-aligned Prior Module to enable efficient modal alignment and adaptation. For the language branch, Local Convolution Adapters (LoCA) integrate local visual features the global prior (pre-trained visual knowledge) from the visual encoder.\nMoreover, the Prior-guided Text module for promoting the multimodal alignment.", "qtype": "Implementation_Details" @@ -8541,9 +8541,9 @@ "masked_elements": [ "Dynamic Prior Adapter" ], - "figure_path": "./MISSQA/figures/3_2409.13609v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.13609v2_figure_2.png", "paperid": "2409.13609v2", - "paper_path": "./MISSQA/papers/2409.13609v2.json", + "paper_path": "./MISS-QA/papers/2409.13609v2.json", "figure_id": "2409.13609v2_figure_2.png", "caption": "Figure 2: Overall architecture of MaPPER. MaPPER freezes the pre-trained vision encoder and language encoder. For the language branch, Dynamic Prior Adapters (DyPA) utilize aligned priors generated from the Vision-aligned Prior Module to enable efficient modal alignment and adaptation. For the language branch, Local Convolution Adapters (LoCA) integrate local visual features the global prior (pre-trained visual knowledge) from the visual encoder.\nMoreover, the Prior-guided Text module for promoting the multimodal alignment.", "qtype": "Implementation_Details" @@ -8564,9 +8564,9 @@ "Ending Interaction", "Model Training" ], - "figure_path": "./MISSQA/figures/0_2408.15787v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.15787v1_figure_1.png", "paperid": "2408.15787v1", - "paper_path": "./MISSQA/papers/2408.15787v1.json", + "paper_path": "./MISS-QA/papers/2408.15787v1.json", "figure_id": "2408.15787v1_figure_1.png", "caption": "Figure 1. The overall architecture of our simulation framework. Left panel: construction of client pool. Middle panel: data collection with interactive simulation. Right panel: model training.", "qtype": "Literature_Background" @@ -8592,9 +8592,9 @@ "masked_elements": [ "Client Pool" ], - "figure_path": "./MISSQA/figures/1_2408.15787v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.15787v1_figure_1.png", "paperid": "2408.15787v1", - "paper_path": "./MISSQA/papers/2408.15787v1.json", + "paper_path": "./MISS-QA/papers/2408.15787v1.json", "figure_id": "2408.15787v1_figure_1.png", "caption": "Figure 1. The overall architecture of our simulation framework. Left panel: construction of client pool. Middle panel: data collection with interactive simulation. Right panel: model training.", "qtype": "Experimental_Results" @@ -8623,9 +8623,9 @@ "Skill F/T Signals", "Demo Task Plan" ], - "figure_path": "./MISSQA/figures/0_2409.11863v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.11863v1_figure_2.png", "paperid": "2409.11863v1", - "paper_path": "./MISSQA/papers/2409.11863v1.json", + "paper_path": "./MISS-QA/papers/2409.11863v1.json", "figure_id": "2409.11863v1_figure_2.png", "caption": "Figure 2: Framework Overview. In bootstrapped reasoning, an LLM analyzer pre-processes the skill library, reasons about skill sequences and success conditions from multi-modal demonstration sequentially. The resulting demo task plan is used as an example for an LLM planner to plan for new tasks.", "qtype": "Experimental_Results" @@ -8646,9 +8646,9 @@ "ViTac Videos", "segmentation" ], - "figure_path": "./MISSQA/figures/1_2409.11863v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.11863v1_figure_2.png", "paperid": "2409.11863v1", - "paper_path": "./MISSQA/papers/2409.11863v1.json", + "paper_path": "./MISS-QA/papers/2409.11863v1.json", "figure_id": "2409.11863v1_figure_2.png", "caption": "Figure 2: Framework Overview. In bootstrapped reasoning, an LLM analyzer pre-processes the skill library, reasons about skill sequences and success conditions from multi-modal demonstration sequentially. The resulting demo task plan is used as an example for an LLM planner to plan for new tasks.", "qtype": "Others" @@ -8669,9 +8669,9 @@ "Input Skill Library", "PDDL Domain" ], - "figure_path": "./MISSQA/figures/2_2409.11863v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.11863v1_figure_2.png", "paperid": "2409.11863v1", - "paper_path": "./MISSQA/papers/2409.11863v1.json", + "paper_path": "./MISS-QA/papers/2409.11863v1.json", "figure_id": "2409.11863v1_figure_2.png", "caption": "Figure 2: Framework Overview. In bootstrapped reasoning, an LLM analyzer pre-processes the skill library, reasons about skill sequences and success conditions from multi-modal demonstration sequentially. The resulting demo task plan is used as an example for an LLM planner to plan for new tasks.", "qtype": "Others" @@ -8698,9 +8698,9 @@ "masked_elements": [ "HDDC" ], - "figure_path": "./MISSQA/figures/0_2410.23820v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.23820v1_figure_2.png", "paperid": "2410.23820v1", - "paper_path": "./MISSQA/papers/2410.23820v1.json", + "paper_path": "./MISS-QA/papers/2410.23820v1.json", "figure_id": "2410.23820v1_figure_2.png", "caption": "Figure 2: \nTraining framework with proposed methods. (a) During the diffusion model training, the features generated by the feature extractor are shifted towards the mean direction of the Gaussian for each feature unit based on the selected anchor, becoming the condition for the diffusion model. To ensure the diffusion U-Net effectively utilizes the conditions created by the feature extractor, a skip dropout strategy is employed. (b) The process of anchoring Gaussian distributions involves: i) initializing the Gaussian mixture, ii) performing HDDC using the EM algorithm, iii) adjusting the number of Gaussians by splitting them according to criteria, and iv) filtering out unnecessary Gaussians.", "qtype": "Others" @@ -8722,9 +8722,9 @@ "Skip Dropout", "Feature Extractor" ], - "figure_path": "./MISSQA/figures/1_2410.23820v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.23820v1_figure_2.png", "paperid": "2410.23820v1", - "paper_path": "./MISSQA/papers/2410.23820v1.json", + "paper_path": "./MISS-QA/papers/2410.23820v1.json", "figure_id": "2410.23820v1_figure_2.png", "caption": "Figure 2: \nTraining framework with proposed methods. (a) During the diffusion model training, the features generated by the feature extractor are shifted towards the mean direction of the Gaussian for each feature unit based on the selected anchor, becoming the condition for the diffusion model. To ensure the diffusion U-Net effectively utilizes the conditions created by the feature extractor, a skip dropout strategy is employed. (b) The process of anchoring Gaussian distributions involves: i) initializing the Gaussian mixture, ii) performing HDDC using the EM algorithm, iii) adjusting the number of Gaussians by splitting them according to criteria, and iv) filtering out unnecessary Gaussians.", "qtype": "Others" @@ -8749,9 +8749,9 @@ "masked_elements": [ "Feature Alignment" ], - "figure_path": "./MISSQA/figures/2_2410.23820v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.23820v1_figure_2.png", "paperid": "2410.23820v1", - "paper_path": "./MISSQA/papers/2410.23820v1.json", + "paper_path": "./MISS-QA/papers/2410.23820v1.json", "figure_id": "2410.23820v1_figure_2.png", "caption": "Figure 2: \nTraining framework with proposed methods. (a) During the diffusion model training, the features generated by the feature extractor are shifted towards the mean direction of the Gaussian for each feature unit based on the selected anchor, becoming the condition for the diffusion model. To ensure the diffusion U-Net effectively utilizes the conditions created by the feature extractor, a skip dropout strategy is employed. (b) The process of anchoring Gaussian distributions involves: i) initializing the Gaussian mixture, ii) performing HDDC using the EM algorithm, iii) adjusting the number of Gaussians by splitting them according to criteria, and iv) filtering out unnecessary Gaussians.", "qtype": "Design_Rationale" @@ -8779,9 +8779,9 @@ "Random Crop & Resize", "Salient Region" ], - "figure_path": "./MISSQA/figures/0_2411.06784v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.06784v1_figure_2.png", "paperid": "2411.06784v1", - "paper_path": "./MISSQA/papers/2411.06784v1.json", + "paper_path": "./MISS-QA/papers/2411.06784v1.json", "figure_id": "2411.06784v1_figure_2.png", "caption": "Figure 2: The overview of SWFD framework, which mainly includes two stages: (1) Salient region generation. This stage generates salient regions based on the heatmap; (2) Perturbation optimization. This stage iteratively optimizes the perturbation through the joint classification loss of the original image and auxiliary image based on the weighted feature drop.", "qtype": "Design_Rationale" @@ -8808,9 +8808,9 @@ "masked_elements": [ "Feature Map of Layer l" ], - "figure_path": "./MISSQA/figures/1_2411.06784v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.06784v1_figure_2.png", "paperid": "2411.06784v1", - "paper_path": "./MISSQA/papers/2411.06784v1.json", + "paper_path": "./MISS-QA/papers/2411.06784v1.json", "figure_id": "2411.06784v1_figure_2.png", "caption": "Figure 2: The overview of SWFD framework, which mainly includes two stages: (1) Salient region generation. This stage generates salient regions based on the heatmap; (2) Perturbation optimization. This stage iteratively optimizes the perturbation through the joint classification loss of the original image and auxiliary image based on the weighted feature drop.", "qtype": "Design_Rationale" @@ -8832,9 +8832,9 @@ "Joint Classification Loss", "original image" ], - "figure_path": "./MISSQA/figures/2_2411.06784v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.06784v1_figure_2.png", "paperid": "2411.06784v1", - "paper_path": "./MISSQA/papers/2411.06784v1.json", + "paper_path": "./MISS-QA/papers/2411.06784v1.json", "figure_id": "2411.06784v1_figure_2.png", "caption": "Figure 2: The overview of SWFD framework, which mainly includes two stages: (1) Salient region generation. This stage generates salient regions based on the heatmap; (2) Perturbation optimization. This stage iteratively optimizes the perturbation through the joint classification loss of the original image and auxiliary image based on the weighted feature drop.", "qtype": "Implementation_Details" @@ -8863,9 +8863,9 @@ "Gaussian Scale", "Mean Weights" ], - "figure_path": "./MISSQA/figures/3_2411.06784v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.06784v1_figure_2.png", "paperid": "2411.06784v1", - "paper_path": "./MISSQA/papers/2411.06784v1.json", + "paper_path": "./MISS-QA/papers/2411.06784v1.json", "figure_id": "2411.06784v1_figure_2.png", "caption": "Figure 2: The overview of SWFD framework, which mainly includes two stages: (1) Salient region generation. This stage generates salient regions based on the heatmap; (2) Perturbation optimization. This stage iteratively optimizes the perturbation through the joint classification loss of the original image and auxiliary image based on the weighted feature drop.", "qtype": "Implementation_Details" @@ -8890,9 +8890,9 @@ "masked_elements": [ "Dynamic vision encoding" ], - "figure_path": "./MISSQA/figures/0_2407.04923v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.04923v1_figure_1.png", "paperid": "2407.04923v1", - "paper_path": "./MISSQA/papers/2407.04923v1.json", + "paper_path": "./MISS-QA/papers/2407.04923v1.json", "figure_id": "2407.04923v1_figure_1.png", "caption": "Figure 1: OmChat model overall structure and training steps.", "qtype": "Implementation_Details" @@ -8918,9 +8918,9 @@ "Ring attention", "Active progressive multimodal pretraining" ], - "figure_path": "./MISSQA/figures/1_2407.04923v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.04923v1_figure_1.png", "paperid": "2407.04923v1", - "paper_path": "./MISSQA/papers/2407.04923v1.json", + "paper_path": "./MISS-QA/papers/2407.04923v1.json", "figure_id": "2407.04923v1_figure_1.png", "caption": "Figure 1: OmChat model overall structure and training steps.", "qtype": "Implementation_Details" @@ -8945,9 +8945,9 @@ "masked_elements": [ "Dynamic vision encoding" ], - "figure_path": "./MISSQA/figures/2_2407.04923v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.04923v1_figure_1.png", "paperid": "2407.04923v1", - "paper_path": "./MISSQA/papers/2407.04923v1.json", + "paper_path": "./MISS-QA/papers/2407.04923v1.json", "figure_id": "2407.04923v1_figure_1.png", "caption": "Figure 1: OmChat model overall structure and training steps.", "qtype": "Implementation_Details" @@ -8975,9 +8975,9 @@ "Active progressive multi-modal pretraining", "Ring attention" ], - "figure_path": "./MISSQA/figures/3_2407.04923v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2407.04923v1_figure_1.png", "paperid": "2407.04923v1", - "paper_path": "./MISSQA/papers/2407.04923v1.json", + "paper_path": "./MISS-QA/papers/2407.04923v1.json", "figure_id": "2407.04923v1_figure_1.png", "caption": "Figure 1: OmChat model overall structure and training steps.", "qtype": "Literature_Background" @@ -9002,9 +9002,9 @@ "masked_elements": [ "Squeeze and Excitation Network" ], - "figure_path": "./MISSQA/figures/0_2411.10863v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.10863v1_figure_1.png", "paperid": "2411.10863v1", - "paper_path": "./MISSQA/papers/2411.10863v1.json", + "paper_path": "./MISS-QA/papers/2411.10863v1.json", "figure_id": "2411.10863v1_figure_1.png", "caption": "Figure 1: Overview of ResEmoteNet for efficient facial emotion recognition.", "qtype": "Literature_Background" @@ -9030,9 +9030,9 @@ "masked_elements": [ "Simple Feature Extraction" ], - "figure_path": "./MISSQA/figures/1_2411.10863v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.10863v1_figure_1.png", "paperid": "2411.10863v1", - "paper_path": "./MISSQA/papers/2411.10863v1.json", + "paper_path": "./MISS-QA/papers/2411.10863v1.json", "figure_id": "2411.10863v1_figure_1.png", "caption": "Figure 1: Overview of ResEmoteNet for efficient facial emotion recognition.", "qtype": "Literature_Background" @@ -9060,9 +9060,9 @@ "Squeeze and Excitation Network", "CNN Feature Extraction" ], - "figure_path": "./MISSQA/figures/2_2411.10863v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.10863v1_figure_1.png", "paperid": "2411.10863v1", - "paper_path": "./MISSQA/papers/2411.10863v1.json", + "paper_path": "./MISS-QA/papers/2411.10863v1.json", "figure_id": "2411.10863v1_figure_1.png", "caption": "Figure 1: Overview of ResEmoteNet for efficient facial emotion recognition.", "qtype": "Experimental_Results" @@ -9088,9 +9088,9 @@ "masked_elements": [ "Residual Network" ], - "figure_path": "./MISSQA/figures/3_2411.10863v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.10863v1_figure_1.png", "paperid": "2411.10863v1", - "paper_path": "./MISSQA/papers/2411.10863v1.json", + "paper_path": "./MISS-QA/papers/2411.10863v1.json", "figure_id": "2411.10863v1_figure_1.png", "caption": "Figure 1: Overview of ResEmoteNet for efficient facial emotion recognition.", "qtype": "Experimental_Results" @@ -9117,9 +9117,9 @@ "masked_elements": [ "Process Supervision" ], - "figure_path": "./MISSQA/figures/0_2407.00390v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.00390v1_figure_1.png", "paperid": "2407.00390v1", - "paper_path": "./MISSQA/papers/2407.00390v1.json", + "paper_path": "./MISS-QA/papers/2407.00390v1.json", "figure_id": "2407.00390v1_figure_1.png", "caption": "Figure 1: A comparison of different methods: Traditional verifiers rely on binary labels for outcome and process supervision, whereas Tree-PLV employs preferences instead of scalar values.", "qtype": "Experimental_Results" @@ -9148,9 +9148,9 @@ "masked_elements": [ "Reasoning Tree" ], - "figure_path": "./MISSQA/figures/1_2407.00390v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.00390v1_figure_1.png", "paperid": "2407.00390v1", - "paper_path": "./MISSQA/papers/2407.00390v1.json", + "paper_path": "./MISS-QA/papers/2407.00390v1.json", "figure_id": "2407.00390v1_figure_1.png", "caption": "Figure 1: A comparison of different methods: Traditional verifiers rely on binary labels for outcome and process supervision, whereas Tree-PLV employs preferences instead of scalar values.", "qtype": "Experimental_Results" @@ -9170,9 +9170,9 @@ "masked_elements": [ "Binary Classification Verifier" ], - "figure_path": "./MISSQA/figures/2_2407.00390v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.00390v1_figure_1.png", "paperid": "2407.00390v1", - "paper_path": "./MISSQA/papers/2407.00390v1.json", + "paper_path": "./MISS-QA/papers/2407.00390v1.json", "figure_id": "2407.00390v1_figure_1.png", "caption": "Figure 1: A comparison of different methods: Traditional verifiers rely on binary labels for outcome and process supervision, whereas Tree-PLV employs preferences instead of scalar values.", "qtype": "Others" @@ -9197,9 +9197,9 @@ "lookup-free quantizer (LFQ)", "structure tokenizer" ], - "figure_path": "./MISSQA/figures/0_2410.13782v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.13782v1_figure_1.png", "paperid": "2410.13782v1", - "paper_path": "./MISSQA/papers/2410.13782v1.json", + "paper_path": "./MISS-QA/papers/2410.13782v1.json", "figure_id": "2410.13782v1_figure_1.png", "caption": "Figure 1: Overall illustration of DPLM-2.\n(A) Structure tokenization consists of a GVP-based encoder to yield invariant backbone geometric features, a lookup-free quantizer (LFQ) to discretize encoded structural features into structure tokens within a codebook, and an IPA-based decoder as de-tokenizer to convert structure tokens back to backbone atomic coordinates.\n(B) Multimodal learning and generation of protein structure and sequence with DPLM-2.\n(C) Various applications of DPLM-2 as a protein foundation model: (1) unconditional protein sequence-structure mixed-modal co-generation; (2) protein sequence-structure joint representation for predictive tasks; (3) structure prediction; (4) fixed-backbone sequence generation; (5) conditional protein generation with structure-sequence mixed-modal input and output.", "qtype": "Others" @@ -9224,9 +9224,9 @@ "masked_elements": [ "lookup-free quantizer" ], - "figure_path": "./MISSQA/figures/1_2410.13782v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.13782v1_figure_1.png", "paperid": "2410.13782v1", - "paper_path": "./MISSQA/papers/2410.13782v1.json", + "paper_path": "./MISS-QA/papers/2410.13782v1.json", "figure_id": "2410.13782v1_figure_1.png", "caption": "Figure 1: Overall illustration of DPLM-2.\n(A) Structure tokenization consists of a GVP-based encoder to yield invariant backbone geometric features, a lookup-free quantizer (LFQ) to discretize encoded structural features into structure tokens within a codebook, and an IPA-based decoder as de-tokenizer to convert structure tokens back to backbone atomic coordinates.\n(B) Multimodal learning and generation of protein structure and sequence with DPLM-2.\n(C) Various applications of DPLM-2 as a protein foundation model: (1) unconditional protein sequence-structure mixed-modal co-generation; (2) protein sequence-structure joint representation for predictive tasks; (3) structure prediction; (4) fixed-backbone sequence generation; (5) conditional protein generation with structure-sequence mixed-modal input and output.", "qtype": "Design_Rationale" @@ -9254,9 +9254,9 @@ "warmup from pre-trained sequence-based DPLM", "DPLM-2" ], - "figure_path": "./MISSQA/figures/2_2410.13782v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.13782v1_figure_1.png", "paperid": "2410.13782v1", - "paper_path": "./MISSQA/papers/2410.13782v1.json", + "paper_path": "./MISS-QA/papers/2410.13782v1.json", "figure_id": "2410.13782v1_figure_1.png", "caption": "Figure 1: Overall illustration of DPLM-2.\n(A) Structure tokenization consists of a GVP-based encoder to yield invariant backbone geometric features, a lookup-free quantizer (LFQ) to discretize encoded structural features into structure tokens within a codebook, and an IPA-based decoder as de-tokenizer to convert structure tokens back to backbone atomic coordinates.\n(B) Multimodal learning and generation of protein structure and sequence with DPLM-2.\n(C) Various applications of DPLM-2 as a protein foundation model: (1) unconditional protein sequence-structure mixed-modal co-generation; (2) protein sequence-structure joint representation for predictive tasks; (3) structure prediction; (4) fixed-backbone sequence generation; (5) conditional protein generation with structure-sequence mixed-modal input and output.", "qtype": "Design_Rationale" @@ -9282,9 +9282,9 @@ "masked_elements": [ "Reverse-complement equivariance" ], - "figure_path": "./MISSQA/figures/0_2411.04165v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.04165v1_figure_1.png", "paperid": "2411.04165v1", - "paper_path": "./MISSQA/papers/2411.04165v1.json", + "paper_path": "./MISS-QA/papers/2411.04165v1.json", "figure_id": "2411.04165v1_figure_1.png", "caption": "Figure 1: Overview of Bio-xLSTM.\nTop left: xLSTM for natural language processing tasks.\nTop right: Considered modeling approaches\nfor biological sequences:\nmasked language modeling,\nequivariance to reverse complementary sequence,\nand in-context learning.\nBottom left: DNA-xLSTM models are trained on genomic DNA sequences and then fine-tuned on downstream tasks.\nBottom center: Prot-xLSTM models are trained in a causal modeling setting with a fill-in-the-middle objective and use homologous proteins for in-context learning.\nBottom right: Chem-xLSTM models are trained to generate small molecules.\nFor an in-context learning setting, Chem-xLSTM models use\nmolecules with known properties.", "qtype": "Design_Rationale" @@ -9308,9 +9308,9 @@ "masked_elements": [ "reverse-complement equivariance" ], - "figure_path": "./MISSQA/figures/1_2411.04165v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.04165v1_figure_1.png", "paperid": "2411.04165v1", - "paper_path": "./MISSQA/papers/2411.04165v1.json", + "paper_path": "./MISS-QA/papers/2411.04165v1.json", "figure_id": "2411.04165v1_figure_1.png", "caption": "Figure 1: Overview of Bio-xLSTM.\nTop left: xLSTM for natural language processing tasks.\nTop right: Considered modeling approaches\nfor biological sequences:\nmasked language modeling,\nequivariance to reverse complementary sequence,\nand in-context learning.\nBottom left: DNA-xLSTM models are trained on genomic DNA sequences and then fine-tuned on downstream tasks.\nBottom center: Prot-xLSTM models are trained in a causal modeling setting with a fill-in-the-middle objective and use homologous proteins for in-context learning.\nBottom right: Chem-xLSTM models are trained to generate small molecules.\nFor an in-context learning setting, Chem-xLSTM models use\nmolecules with known properties.", "qtype": "Implementation_Details" @@ -9336,9 +9336,9 @@ "masked_elements": [ "reordered packing" ], - "figure_path": "./MISSQA/figures/0_2407.18267v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.18267v1_figure_1.png", "paperid": "2407.18267v1", - "paper_path": "./MISSQA/papers/2407.18267v1.json", + "paper_path": "./MISS-QA/papers/2407.18267v1.json", "figure_id": "2407.18267v1_figure_1.png", "caption": "Figure 1: MCU-MixQ Overview, the proposed HW/SW co-optimization design framework for MPNN on MCUs.", "qtype": "Implementation_Details" @@ -9364,9 +9364,9 @@ "masked_elements": [ "Search Space" ], - "figure_path": "./MISSQA/figures/1_2407.18267v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.18267v1_figure_1.png", "paperid": "2407.18267v1", - "paper_path": "./MISSQA/papers/2407.18267v1.json", + "paper_path": "./MISS-QA/papers/2407.18267v1.json", "figure_id": "2407.18267v1_figure_1.png", "caption": "Figure 1: MCU-MixQ Overview, the proposed HW/SW co-optimization design framework for MPNN on MCUs.", "qtype": "Literature_Background" @@ -9393,9 +9393,9 @@ "SLBC", "SIMD" ], - "figure_path": "./MISSQA/figures/2_2407.18267v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.18267v1_figure_1.png", "paperid": "2407.18267v1", - "paper_path": "./MISSQA/papers/2407.18267v1.json", + "paper_path": "./MISS-QA/papers/2407.18267v1.json", "figure_id": "2407.18267v1_figure_1.png", "caption": "Figure 1: MCU-MixQ Overview, the proposed HW/SW co-optimization design framework for MPNN on MCUs.", "qtype": "Literature_Background" @@ -9420,9 +9420,9 @@ "masked_elements": [ "SAM-based filtering" ], - "figure_path": "./MISSQA/figures/0_2411.15205v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.15205v1_figure_2.png", "paperid": "2411.15205v1", - "paper_path": "./MISSQA/papers/2411.15205v1.json", + "paper_path": "./MISS-QA/papers/2411.15205v1.json", "figure_id": "2411.15205v1_figure_2.png", "caption": "Figure 2: \nMethod overview.\nGiven text prompts, DAGSM generates disentangle digital humans whose bodies and clothes are represented as multiple individual GSM (Sec. 4.1).\nThe generation process includes three stages:\n1) a body generation stage that generates an unclothed body with the human priors SMPL-X [52] from the guidance of text-to-image model SD [16] (Sec. 4.2);\n2) a cloth generation stage that first creates the cloth’s mesh proxy.\nThen 2DGS 𝒢bsubscript𝒢𝑏\\mathcal{G}_{b}caligraphic_G start_POSTSUBSCRIPT italic_b end_POSTSUBSCRIPT is bound to the mesh for generating a garment with texture (Sec. 4.3);\nand 3) a view-consistent refinement stage, where we propose a cross-view attention mechanism for texture style consistency and an incident-angle-weighted denoising (IAW-DE) strategy to enhance the appearance image 𝒱^isubscript^𝒱𝑖\\hat{\\mathcal{V}}_{i}over^ start_ARG caligraphic_V end_ARG start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT (Sec. 4.4).", "qtype": "Literature_Background" @@ -9448,9 +9448,9 @@ "cross-view attention", "IAW-DE" ], - "figure_path": "./MISSQA/figures/1_2411.15205v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.15205v1_figure_2.png", "paperid": "2411.15205v1", - "paper_path": "./MISSQA/papers/2411.15205v1.json", + "paper_path": "./MISS-QA/papers/2411.15205v1.json", "figure_id": "2411.15205v1_figure_2.png", "caption": "Figure 2: \nMethod overview.\nGiven text prompts, DAGSM generates disentangle digital humans whose bodies and clothes are represented as multiple individual GSM (Sec. 4.1).\nThe generation process includes three stages:\n1) a body generation stage that generates an unclothed body with the human priors SMPL-X [52] from the guidance of text-to-image model SD [16] (Sec. 4.2);\n2) a cloth generation stage that first creates the cloth’s mesh proxy.\nThen 2DGS 𝒢bsubscript𝒢𝑏\\mathcal{G}_{b}caligraphic_G start_POSTSUBSCRIPT italic_b end_POSTSUBSCRIPT is bound to the mesh for generating a garment with texture (Sec. 4.3);\nand 3) a view-consistent refinement stage, where we propose a cross-view attention mechanism for texture style consistency and an incident-angle-weighted denoising (IAW-DE) strategy to enhance the appearance image 𝒱^isubscript^𝒱𝑖\\hat{\\mathcal{V}}_{i}over^ start_ARG caligraphic_V end_ARG start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT (Sec. 4.4).", "qtype": "Literature_Background" @@ -9471,9 +9471,9 @@ "LSNN", "Multi-Step Pulling (MSP) module" ], - "figure_path": "./MISSQA/figures/0_2411.01208v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.01208v1_figure_2.png", "paperid": "2411.01208v1", - "paper_path": "./MISSQA/papers/2411.01208v1.json", + "paper_path": "./MISS-QA/papers/2411.01208v1.json", "figure_id": "2411.01208v1_figure_2.png", "caption": "Figure 2: Overview of our method: (a) Frequency Feature Transformation (FFT) module and (b) Multi-Step Pulling (MSP) module. In (a), we learn Fourier bases hi⁢(Q)subscriptℎ𝑖𝑄h_{i}(Q)italic_h start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT ( italic_Q ) from query points Q𝑄Qitalic_Q using the Fourier layer and obtain multi-level frequency features yisubscript𝑦𝑖{y_{i}}italic_y start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT through Hadamard product. In (b), using multi-level frequency features from (a) and a linear network LSNN with shared parameters, we calculate the distance(D) of Qisubscript𝑄𝑖Q_{i}italic_Q start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT to its corresponding surface target point Qtsubscript𝑄𝑡Q_{t}italic_Q start_POSTSUBSCRIPT italic_t end_POSTSUBSCRIPT to predict a more accurate surface. We visualize the predicted SDF distribution map corresponding to the frequency features in (a) and the reconstruction from each step of SDF predictions on the right side of (b).", "qtype": "Others" @@ -9494,9 +9494,9 @@ "domain classifier", "text-aligned neural tokenizer" ], - "figure_path": "./MISSQA/figures/0_2409.00101v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.00101v1_figure_2.png", "paperid": "2409.00101v1", - "paper_path": "./MISSQA/papers/2409.00101v1.json", + "paper_path": "./MISS-QA/papers/2409.00101v1.json", "figure_id": "2409.00101v1_figure_2.png", "caption": "Figure 2: The architecture design of text-aligned neural tokenizer training. The neural tokenizer is trained by reconstructing both temporal and frequency domain of input EEG signals to discretize them into discrete neural tokens. To align EEG and text embedding space, we utilize a domain classifier through adversarial training.", "qtype": "Others" @@ -9517,9 +9517,9 @@ "temporal decoder", "frequency decoder" ], - "figure_path": "./MISSQA/figures/1_2409.00101v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.00101v1_figure_2.png", "paperid": "2409.00101v1", - "paper_path": "./MISSQA/papers/2409.00101v1.json", + "paper_path": "./MISS-QA/papers/2409.00101v1.json", "figure_id": "2409.00101v1_figure_2.png", "caption": "Figure 2: The architecture design of text-aligned neural tokenizer training. The neural tokenizer is trained by reconstructing both temporal and frequency domain of input EEG signals to discretize them into discrete neural tokens. To align EEG and text embedding space, we utilize a domain classifier through adversarial training.", "qtype": "Others" @@ -9544,9 +9544,9 @@ "masked_elements": [ "reverse gradient" ], - "figure_path": "./MISSQA/figures/2_2409.00101v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.00101v1_figure_2.png", "paperid": "2409.00101v1", - "paper_path": "./MISSQA/papers/2409.00101v1.json", + "paper_path": "./MISS-QA/papers/2409.00101v1.json", "figure_id": "2409.00101v1_figure_2.png", "caption": "Figure 2: The architecture design of text-aligned neural tokenizer training. The neural tokenizer is trained by reconstructing both temporal and frequency domain of input EEG signals to discretize them into discrete neural tokens. To align EEG and text embedding space, we utilize a domain classifier through adversarial training.", "qtype": "Design_Rationale" @@ -9573,9 +9573,9 @@ "KL divergence loss", "Teacher" ], - "figure_path": "./MISSQA/figures/0_2411.15244v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.15244v1_figure_1.png", "paperid": "2411.15244v1", - "paper_path": "./MISSQA/papers/2411.15244v1.json", + "paper_path": "./MISS-QA/papers/2411.15244v1.json", "figure_id": "2411.15244v1_figure_1.png", "caption": "Figure 1: Overview of Adversarial Prompt Distillation (APD). The student and teacher CLIP are fine-tuned simultaneously using both visual and textual prompts. The teacher model is trained on natural images to achieve high performance on clean data, while also receiving feedback from the student to help the student better align with the teacher. The student model is trained on adversarial data and aligns its output with the teacher model.", "qtype": "Design_Rationale" @@ -9600,9 +9600,9 @@ "masked_elements": [ "Adversarial Image" ], - "figure_path": "./MISSQA/figures/1_2411.15244v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.15244v1_figure_1.png", "paperid": "2411.15244v1", - "paper_path": "./MISSQA/papers/2411.15244v1.json", + "paper_path": "./MISS-QA/papers/2411.15244v1.json", "figure_id": "2411.15244v1_figure_1.png", "caption": "Figure 1: Overview of Adversarial Prompt Distillation (APD). The student and teacher CLIP are fine-tuned simultaneously using both visual and textual prompts. The teacher model is trained on natural images to achieve high performance on clean data, while also receiving feedback from the student to help the student better align with the teacher. The student model is trained on adversarial data and aligns its output with the teacher model.", "qtype": "Design_Rationale" @@ -9629,9 +9629,9 @@ "masked_elements": [ "Student Model" ], - "figure_path": "./MISSQA/figures/2_2411.15244v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.15244v1_figure_1.png", "paperid": "2411.15244v1", - "paper_path": "./MISSQA/papers/2411.15244v1.json", + "paper_path": "./MISS-QA/papers/2411.15244v1.json", "figure_id": "2411.15244v1_figure_1.png", "caption": "Figure 1: Overview of Adversarial Prompt Distillation (APD). The student and teacher CLIP are fine-tuned simultaneously using both visual and textual prompts. The teacher model is trained on natural images to achieve high performance on clean data, while also receiving feedback from the student to help the student better align with the teacher. The student model is trained on adversarial data and aligns its output with the teacher model.", "qtype": "Implementation_Details" @@ -9659,9 +9659,9 @@ "Teacher Model", "Textual Prompt" ], - "figure_path": "./MISSQA/figures/3_2411.15244v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.15244v1_figure_1.png", "paperid": "2411.15244v1", - "paper_path": "./MISSQA/papers/2411.15244v1.json", + "paper_path": "./MISS-QA/papers/2411.15244v1.json", "figure_id": "2411.15244v1_figure_1.png", "caption": "Figure 1: Overview of Adversarial Prompt Distillation (APD). The student and teacher CLIP are fine-tuned simultaneously using both visual and textual prompts. The teacher model is trained on natural images to achieve high performance on clean data, while also receiving feedback from the student to help the student better align with the teacher. The student model is trained on adversarial data and aligns its output with the teacher model.", "qtype": "Implementation_Details" @@ -9686,9 +9686,9 @@ "Specialized Compiler", "Bug Fixing LLM" ], - "figure_path": "./MISSQA/figures/0_2411.15442v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.15442v1_figure_1.png", "paperid": "2411.15442v1", - "paper_path": "./MISSQA/papers/2411.15442v1.json", + "paper_path": "./MISS-QA/papers/2411.15442v1.json", "figure_id": "2411.15442v1_figure_1.png", "caption": "Figure 1: The proposed AssertCraft full-stack flow employs two novel techniques—sub-tasked fine-tuning, and iterative prompting—to produce high-accuracy assertion statements from the specification document. Moreover, this flow was completed by adding an additional dataset and scoreboard for assessing the ability of the model to generate high-quality assertions.", "qtype": "Implementation_Details" @@ -9715,9 +9715,9 @@ "LLM for FSM Extraction", "LLM for variable Extraction" ], - "figure_path": "./MISSQA/figures/1_2411.15442v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.15442v1_figure_1.png", "paperid": "2411.15442v1", - "paper_path": "./MISSQA/papers/2411.15442v1.json", + "paper_path": "./MISS-QA/papers/2411.15442v1.json", "figure_id": "2411.15442v1_figure_1.png", "caption": "Figure 1: The proposed AssertCraft full-stack flow employs two novel techniques—sub-tasked fine-tuning, and iterative prompting—to produce high-accuracy assertion statements from the specification document. Moreover, this flow was completed by adding an additional dataset and scoreboard for assessing the ability of the model to generate high-quality assertions.", "qtype": "Literature_Background" @@ -9743,9 +9743,9 @@ "masked_elements": [ "Alternating Momentum Updating (AMU)" ], - "figure_path": "./MISSQA/figures/0_2410.01573v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.01573v1_figure_2.png", "paperid": "2410.01573v1", - "paper_path": "./MISSQA/papers/2410.01573v1.json", + "paper_path": "./MISS-QA/papers/2410.01573v1.json", "figure_id": "2410.01573v1_figure_2.png", "caption": "Figure 2: An overview of the proposed PASS approach, comprising three components: (1) an input decorator to reduce the style shift, (2) a cross-attention prompt modulator to mitigate the shape inconsistency,\nand (3) an alternating momentum parameter updating strategy with the teacher/student framework to avoid error accumulation during the adaptation stage. The weights of the teacher model are updated by the exponential moving average (EMA) with a decay factor from the online updating student model. For each new test data, the student model is reset by the teacher model to recover the historical knowledge and is adapted to the given sample.", "qtype": "Literature_Background" @@ -9769,9 +9769,9 @@ "masked_elements": [ "Input Decorator" ], - "figure_path": "./MISSQA/figures/1_2410.01573v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.01573v1_figure_2.png", "paperid": "2410.01573v1", - "paper_path": "./MISSQA/papers/2410.01573v1.json", + "paper_path": "./MISS-QA/papers/2410.01573v1.json", "figure_id": "2410.01573v1_figure_2.png", "caption": "Figure 2: An overview of the proposed PASS approach, comprising three components: (1) an input decorator to reduce the style shift, (2) a cross-attention prompt modulator to mitigate the shape inconsistency,\nand (3) an alternating momentum parameter updating strategy with the teacher/student framework to avoid error accumulation during the adaptation stage. The weights of the teacher model are updated by the exponential moving average (EMA) with a decay factor from the online updating student model. For each new test data, the student model is reset by the teacher model to recover the historical knowledge and is adapted to the given sample.", "qtype": "Experimental_Results" @@ -9799,9 +9799,9 @@ "masked_elements": [ "student model" ], - "figure_path": "./MISSQA/figures/2_2410.01573v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.01573v1_figure_2.png", "paperid": "2410.01573v1", - "paper_path": "./MISSQA/papers/2410.01573v1.json", + "paper_path": "./MISS-QA/papers/2410.01573v1.json", "figure_id": "2410.01573v1_figure_2.png", "caption": "Figure 2: An overview of the proposed PASS approach, comprising three components: (1) an input decorator to reduce the style shift, (2) a cross-attention prompt modulator to mitigate the shape inconsistency,\nand (3) an alternating momentum parameter updating strategy with the teacher/student framework to avoid error accumulation during the adaptation stage. The weights of the teacher model are updated by the exponential moving average (EMA) with a decay factor from the online updating student model. For each new test data, the student model is reset by the teacher model to recover the historical knowledge and is adapted to the given sample.", "qtype": "Experimental_Results" @@ -9822,9 +9822,9 @@ "Decoder", "deformation matrix" ], - "figure_path": "./MISSQA/figures/0_2410.22909v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.22909v1_figure_1.png", "paperid": "2410.22909v1", - "paper_path": "./MISSQA/papers/2410.22909v1.json", + "paper_path": "./MISS-QA/papers/2410.22909v1.json", "figure_id": "2410.22909v1_figure_1.png", "caption": "Figure 1: UniRiT performs a rigid transformation phase between the source 𝐏𝒮subscript𝐏𝒮\\mathbf{P}_{\\mathcal{S}}bold_P start_POSTSUBSCRIPT caligraphic_S end_POSTSUBSCRIPT and target 𝐏𝒯subscript𝐏𝒯\\mathbf{P}_{\\mathcal{T}}bold_P start_POSTSUBSCRIPT caligraphic_T end_POSTSUBSCRIPT point clouds, where the features of both point clouds are extracted using MLPs. These features are then passed through a decoder composed of fully connected (FC) layers, which iteratively generates rotation and translation matrices over n𝑛nitalic_n cycles. The transformed point cloud output from the rigid module is subsequently utilized along with the target point cloud to re-extract features. These features are concatenated with the coordinate information and then input into the decoder to generate a deformation matrix, which applied to 𝐏𝒮′superscriptsubscript𝐏𝒮′\\mathbf{P}_{\\mathcal{S}}^{\\prime}bold_P start_POSTSUBSCRIPT caligraphic_S end_POSTSUBSCRIPT start_POSTSUPERSCRIPT ′ end_POSTSUPERSCRIPT, yields the final transformed point cloud 𝐏𝒮^^subscript𝐏𝒮\\hat{\\mathbf{P}_{\\mathcal{S}}}over^ start_ARG bold_P start_POSTSUBSCRIPT caligraphic_S end_POSTSUBSCRIPT end_ARG.", "qtype": "Others" @@ -9843,9 +9843,9 @@ "masked_elements": [ "Fusion MLP" ], - "figure_path": "./MISSQA/figures/0_2410.11650v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.11650v1_figure_1.png", "paperid": "2410.11650v1", - "paper_path": "./MISSQA/papers/2410.11650v1.json", + "paper_path": "./MISS-QA/papers/2410.11650v1.json", "figure_id": "2410.11650v1_figure_1.png", "caption": "Figure 1. The overview of ED-ViT which consists of four steps: Model Splitting, Model Pruning, Model Assignment and Model Fusion.", "qtype": "Others" @@ -9870,9 +9870,9 @@ "masked_elements": [ "Model Assignment" ], - "figure_path": "./MISSQA/figures/1_2410.11650v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.11650v1_figure_1.png", "paperid": "2410.11650v1", - "paper_path": "./MISSQA/papers/2410.11650v1.json", + "paper_path": "./MISS-QA/papers/2410.11650v1.json", "figure_id": "2410.11650v1_figure_1.png", "caption": "Figure 1. The overview of ED-ViT which consists of four steps: Model Splitting, Model Pruning, Model Assignment and Model Fusion.", "qtype": "Others" @@ -9892,9 +9892,9 @@ "masked_elements": [ "Weight Selection" ], - "figure_path": "./MISSQA/figures/2_2410.11650v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.11650v1_figure_1.png", "paperid": "2410.11650v1", - "paper_path": "./MISSQA/papers/2410.11650v1.json", + "paper_path": "./MISS-QA/papers/2410.11650v1.json", "figure_id": "2410.11650v1_figure_1.png", "caption": "Figure 1. The overview of ED-ViT which consists of four steps: Model Splitting, Model Pruning, Model Assignment and Model Fusion.", "qtype": "Design_Rationale" @@ -9921,9 +9921,9 @@ "Model Assignment", "sub-models" ], - "figure_path": "./MISSQA/figures/3_2410.11650v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2410.11650v1_figure_1.png", "paperid": "2410.11650v1", - "paper_path": "./MISSQA/papers/2410.11650v1.json", + "paper_path": "./MISS-QA/papers/2410.11650v1.json", "figure_id": "2410.11650v1_figure_1.png", "caption": "Figure 1. The overview of ED-ViT which consists of four steps: Model Splitting, Model Pruning, Model Assignment and Model Fusion.", "qtype": "Design_Rationale" @@ -9952,9 +9952,9 @@ "Frame-level Captioning", "Redundancy Removal" ], - "figure_path": "./MISSQA/figures/0_2411.14794v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.14794v1_figure_2.png", "paperid": "2411.14794v1", - "paper_path": "./MISSQA/papers/2411.14794v1.json", + "paper_path": "./MISS-QA/papers/2411.14794v1.json", "figure_id": "2411.14794v1_figure_2.png", "caption": "Figure 2: The automatic generation pipeline of VideoEspresso. (i) Question-Answer Pair Construction: We use video frame-leveled captions to extract the key frames of the video and group descriptions of these frames. Then, we prompt GPT-4 to design questions for each group of video frames. (ii) Multimodal Chain-of-Thought Annotation: We extract key evidence text and generate captions with the highest relevance to the question with GPT-4o. Additionally, we annotate spatial and temporal information for key items, which results in multimodal Chain of Thought data pairs grounded in both temporal and spatial dimensions.", "qtype": "Design_Rationale" @@ -9981,9 +9981,9 @@ "Frame-level Captioning", "Frames Grouping" ], - "figure_path": "./MISSQA/figures/1_2411.14794v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.14794v1_figure_2.png", "paperid": "2411.14794v1", - "paper_path": "./MISSQA/papers/2411.14794v1.json", + "paper_path": "./MISS-QA/papers/2411.14794v1.json", "figure_id": "2411.14794v1_figure_2.png", "caption": "Figure 2: The automatic generation pipeline of VideoEspresso. (i) Question-Answer Pair Construction: We use video frame-leveled captions to extract the key frames of the video and group descriptions of these frames. Then, we prompt GPT-4 to design questions for each group of video frames. (ii) Multimodal Chain-of-Thought Annotation: We extract key evidence text and generate captions with the highest relevance to the question with GPT-4o. Additionally, we annotate spatial and temporal information for key items, which results in multimodal Chain of Thought data pairs grounded in both temporal and spatial dimensions.", "qtype": "Implementation_Details" @@ -10003,9 +10003,9 @@ "masked_elements": [ "Sampling Y*_w" ], - "figure_path": "./MISSQA/figures/0_2410.04350v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.04350v1_figure_1.png", "paperid": "2410.04350v1", - "paper_path": "./MISSQA/papers/2410.04350v1.json", + "paper_path": "./MISS-QA/papers/2410.04350v1.json", "figure_id": "2410.04350v1_figure_1.png", "caption": "Figure 1: In real data, different tokens have varying rewards, with low-reward tokens present even in winning responses. DPO treats all tokens equally, introducing noise and reducing optimization efficiency. Our TIS-DPO performs importance sampling on the optimal data distribution (where each token has equal reward) using actual data, introducing token weights to improve optimization efficiency.", "qtype": "Implementation_Details" @@ -10026,9 +10026,9 @@ "TIS-DPO Objective", "Weighted Y_l" ], - "figure_path": "./MISSQA/figures/1_2410.04350v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.04350v1_figure_1.png", "paperid": "2410.04350v1", - "paper_path": "./MISSQA/papers/2410.04350v1.json", + "paper_path": "./MISS-QA/papers/2410.04350v1.json", "figure_id": "2410.04350v1_figure_1.png", "caption": "Figure 1: In real data, different tokens have varying rewards, with low-reward tokens present even in winning responses. DPO treats all tokens equally, introducing noise and reducing optimization efficiency. Our TIS-DPO performs importance sampling on the optimal data distribution (where each token has equal reward) using actual data, introducing token weights to improve optimization efficiency.", "qtype": "Implementation_Details" @@ -10049,9 +10049,9 @@ "Importance Sampling", "maximum likelihood" ], - "figure_path": "./MISSQA/figures/2_2410.04350v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.04350v1_figure_1.png", "paperid": "2410.04350v1", - "paper_path": "./MISSQA/papers/2410.04350v1.json", + "paper_path": "./MISS-QA/papers/2410.04350v1.json", "figure_id": "2410.04350v1_figure_1.png", "caption": "Figure 1: In real data, different tokens have varying rewards, with low-reward tokens present even in winning responses. DPO treats all tokens equally, introducing noise and reducing optimization efficiency. Our TIS-DPO performs importance sampling on the optimal data distribution (where each token has equal reward) using actual data, introducing token weights to improve optimization efficiency.", "qtype": "Literature_Background" @@ -10072,9 +10072,9 @@ "Weighted Y_w", "Weighted Y_l" ], - "figure_path": "./MISSQA/figures/3_2410.04350v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2410.04350v1_figure_1.png", "paperid": "2410.04350v1", - "paper_path": "./MISSQA/papers/2410.04350v1.json", + "paper_path": "./MISS-QA/papers/2410.04350v1.json", "figure_id": "2410.04350v1_figure_1.png", "caption": "Figure 1: In real data, different tokens have varying rewards, with low-reward tokens present even in winning responses. DPO treats all tokens equally, introducing noise and reducing optimization efficiency. Our TIS-DPO performs importance sampling on the optimal data distribution (where each token has equal reward) using actual data, introducing token weights to improve optimization efficiency.", "qtype": "Literature_Background" @@ -10102,9 +10102,9 @@ "behavior-level", "interaction-level" ], - "figure_path": "./MISSQA/figures/0_2411.12179v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.12179v1_figure_1.png", "paperid": "2411.12179v1", - "paper_path": "./MISSQA/papers/2411.12179v1.json", + "paper_path": "./MISS-QA/papers/2411.12179v1.json", "figure_id": "2411.12179v1_figure_1.png", "caption": "Figure 1. motivation of our work", "qtype": "Literature_Background" @@ -10135,9 +10135,9 @@ "masked_elements": [ "interaction-view" ], - "figure_path": "./MISSQA/figures/1_2411.12179v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.12179v1_figure_1.png", "paperid": "2411.12179v1", - "paper_path": "./MISSQA/papers/2411.12179v1.json", + "paper_path": "./MISS-QA/papers/2411.12179v1.json", "figure_id": "2411.12179v1_figure_1.png", "caption": "Figure 1. motivation of our work", "qtype": "Literature_Background" @@ -10164,9 +10164,9 @@ "masked_elements": [ "behavior-aware multi-grained preference" ], - "figure_path": "./MISSQA/figures/2_2411.12179v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.12179v1_figure_1.png", "paperid": "2411.12179v1", - "paper_path": "./MISSQA/papers/2411.12179v1.json", + "paper_path": "./MISS-QA/papers/2411.12179v1.json", "figure_id": "2411.12179v1_figure_1.png", "caption": "Figure 1. motivation of our work", "qtype": "Experimental_Results" @@ -10191,9 +10191,9 @@ "AdaIN", "TransDiffuser" ], - "figure_path": "./MISSQA/figures/0_2408.03558v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.03558v1_figure_2.png", "paperid": "2408.03558v1", - "paper_path": "./MISSQA/papers/2408.03558v1.json", + "paper_path": "./MISS-QA/papers/2408.03558v1.json", "figure_id": "2408.03558v1_figure_2.png", "caption": "Figure 2: The architecture of the proposed method. The content and style images are encoded using a pretrained VQ-GAN encoder. The encoded input is passed through the diffusion prior conditioned on the AdaIN [21] features. VQ-GAN decoder is then used to obtain the resultant image. The dotted line indicates that the diffusion prior is trained separately from the decoder.", "qtype": "Others" @@ -10213,9 +10213,9 @@ "masked_elements": [ "VQ-GAN encoder" ], - "figure_path": "./MISSQA/figures/1_2408.03558v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.03558v1_figure_2.png", "paperid": "2408.03558v1", - "paper_path": "./MISSQA/papers/2408.03558v1.json", + "paper_path": "./MISS-QA/papers/2408.03558v1.json", "figure_id": "2408.03558v1_figure_2.png", "caption": "Figure 2: The architecture of the proposed method. The content and style images are encoded using a pretrained VQ-GAN encoder. The encoded input is passed through the diffusion prior conditioned on the AdaIN [21] features. VQ-GAN decoder is then used to obtain the resultant image. The dotted line indicates that the diffusion prior is trained separately from the decoder.", "qtype": "Others" @@ -10239,9 +10239,9 @@ "masked_elements": [ "Pearson Correlation" ], - "figure_path": "./MISSQA/figures/0_2408.03358v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.03358v1_figure_1.png", "paperid": "2408.03358v1", - "paper_path": "./MISSQA/papers/2408.03358v1.json", + "paper_path": "./MISS-QA/papers/2408.03358v1.json", "figure_id": "2408.03358v1_figure_1.png", "caption": "Figure 1: The overall workflow of the MLC-GCN contains: a data preprocessing module (a), a multi-graph generator (b), and a multi-level GCN-based predictor. In module a, a brain atlas is used to extract n time series from n brain regions. In b, temporal embedding and STFE are used to extract sparse temporal features at different hierarchy and to calculate the corresponding graphs (connectomes). In c, GCNs are used to encode the generated graphs at different levels into higher level graphical data features. These features are concatenated and input to a multi-layer perceptron (MLP) to classify AD.", "qtype": "Others" @@ -10262,9 +10262,9 @@ "STFE", "GCN" ], - "figure_path": "./MISSQA/figures/1_2408.03358v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.03358v1_figure_1.png", "paperid": "2408.03358v1", - "paper_path": "./MISSQA/papers/2408.03358v1.json", + "paper_path": "./MISS-QA/papers/2408.03358v1.json", "figure_id": "2408.03358v1_figure_1.png", "caption": "Figure 1: The overall workflow of the MLC-GCN contains: a data preprocessing module (a), a multi-graph generator (b), and a multi-level GCN-based predictor. In module a, a brain atlas is used to extract n time series from n brain regions. In b, temporal embedding and STFE are used to extract sparse temporal features at different hierarchy and to calculate the corresponding graphs (connectomes). In c, GCNs are used to encode the generated graphs at different levels into higher level graphical data features. These features are concatenated and input to a multi-layer perceptron (MLP) to classify AD.", "qtype": "Others" @@ -10292,9 +10292,9 @@ "masked_elements": [ "causation-sensitive recommendation" ], - "figure_path": "./MISSQA/figures/0_2408.08906v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.08906v1_figure_1.png", "paperid": "2408.08906v1", - "paper_path": "./MISSQA/papers/2408.08906v1.json", + "paper_path": "./MISS-QA/papers/2408.08906v1.json", "figure_id": "2408.08906v1_figure_1.png", "caption": "Figure 1: Motivating examples of multi-prospect causation in bundle recommendation.", "qtype": "Design_Rationale" @@ -10324,9 +10324,9 @@ "masked_elements": [ "Multi-prospect causation" ], - "figure_path": "./MISSQA/figures/1_2408.08906v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.08906v1_figure_1.png", "paperid": "2408.08906v1", - "paper_path": "./MISSQA/papers/2408.08906v1.json", + "paper_path": "./MISS-QA/papers/2408.08906v1.json", "figure_id": "2408.08906v1_figure_1.png", "caption": "Figure 1: Motivating examples of multi-prospect causation in bundle recommendation.", "qtype": "Implementation_Details" @@ -10347,9 +10347,9 @@ "geometry cloak", "pre-defined pattern" ], - "figure_path": "./MISSQA/figures/0_2410.22705v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.22705v1_figure_1.png", "paperid": "2410.22705v1", - "paper_path": "./MISSQA/papers/2410.22705v1.json", + "paper_path": "./MISS-QA/papers/2410.22705v1.json", "figure_id": "2410.22705v1_figure_1.png", "caption": "Figure 1: Overview of our scenario. (a) Images without protection. Images can be easily reconstructed into 3D models by malicious users with TGS [52], posing a threat to the copyright of the image owner.\n(b) Digital Watermarking offers a solution by embedding copyright messages into the view-image before 3D reconstruction. However, the embedded message cannot be extracted from novel rendered views. (c) Geometry Cloak. Our geometry cloak utilizes the disturbance-prone components of TGS, achieving view-specific watermark embedding. Our method can compromise the unauthorized reconstructed 3D model while providing a verifiable pattern for copyright claim.", "qtype": "Implementation_Details" @@ -10370,9 +10370,9 @@ "geometry cloak", "pre-defined pattern" ], - "figure_path": "./MISSQA/figures/1_2410.22705v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.22705v1_figure_1.png", "paperid": "2410.22705v1", - "paper_path": "./MISSQA/papers/2410.22705v1.json", + "paper_path": "./MISS-QA/papers/2410.22705v1.json", "figure_id": "2410.22705v1_figure_1.png", "caption": "Figure 1: Overview of our scenario. (a) Images without protection. Images can be easily reconstructed into 3D models by malicious users with TGS [52], posing a threat to the copyright of the image owner.\n(b) Digital Watermarking offers a solution by embedding copyright messages into the view-image before 3D reconstruction. However, the embedded message cannot be extracted from novel rendered views. (c) Geometry Cloak. Our geometry cloak utilizes the disturbance-prone components of TGS, achieving view-specific watermark embedding. Our method can compromise the unauthorized reconstructed 3D model while providing a verifiable pattern for copyright claim.", "qtype": "Implementation_Details" @@ -10393,9 +10393,9 @@ "Digital Watermark Encoder", "geometry cloak" ], - "figure_path": "./MISSQA/figures/2_2410.22705v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.22705v1_figure_1.png", "paperid": "2410.22705v1", - "paper_path": "./MISSQA/papers/2410.22705v1.json", + "paper_path": "./MISS-QA/papers/2410.22705v1.json", "figure_id": "2410.22705v1_figure_1.png", "caption": "Figure 1: Overview of our scenario. (a) Images without protection. Images can be easily reconstructed into 3D models by malicious users with TGS [52], posing a threat to the copyright of the image owner.\n(b) Digital Watermarking offers a solution by embedding copyright messages into the view-image before 3D reconstruction. However, the embedded message cannot be extracted from novel rendered views. (c) Geometry Cloak. Our geometry cloak utilizes the disturbance-prone components of TGS, achieving view-specific watermark embedding. Our method can compromise the unauthorized reconstructed 3D model while providing a verifiable pattern for copyright claim.", "qtype": "Literature_Background" @@ -10420,9 +10420,9 @@ "masked_elements": [ "Operation Composition Unit" ], - "figure_path": "./MISSQA/figures/0_2411.13754v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.13754v1_figure_2.png", "paperid": "2411.13754v1", - "paper_path": "./MISSQA/papers/2411.13754v1.json", + "paper_path": "./MISS-QA/papers/2411.13754v1.json", "figure_id": "2411.13754v1_figure_2.png", "caption": "Figure 2: IPRM’s computation flow diagram. First, a new set of N-parallel latent operations 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT are retrieved from language features 𝐗𝐋subscript𝐗𝐋\\mathbf{X_{L}}bold_X start_POSTSUBSCRIPT bold_L end_POSTSUBSCRIPT conditioned on prior operation states 𝐌𝐨𝐩subscript𝐌𝐨𝐩\\mathbf{M_{op}}bold_M start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT. Then, visual features 𝐗𝐕subscript𝐗𝐕\\mathbf{X_{V}}bold_X start_POSTSUBSCRIPT bold_V end_POSTSUBSCRIPT are queried conditioned on both 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT and prior result states results 𝐌𝐫𝐞𝐬subscript𝐌𝐫𝐞𝐬\\mathbf{M_{res}}bold_M start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT, to form the new results 𝐙𝐫𝐞𝐬subscript𝐙𝐫𝐞𝐬\\mathbf{Z_{res}}bold_Z start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT. Finally, both 𝐙𝐫𝐞𝐬subscript𝐙𝐫𝐞𝐬\\mathbf{Z_{res}}bold_Z start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT and 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT are passed to the Operation Composition Unit (see 2.3), the output of which becomes the new memory state 𝐌𝐌\\mathbf{M}bold_M.", "qtype": "Literature_Background" @@ -10449,9 +10449,9 @@ "masked_elements": [ "Operation Formation" ], - "figure_path": "./MISSQA/figures/1_2411.13754v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.13754v1_figure_2.png", "paperid": "2411.13754v1", - "paper_path": "./MISSQA/papers/2411.13754v1.json", + "paper_path": "./MISS-QA/papers/2411.13754v1.json", "figure_id": "2411.13754v1_figure_2.png", "caption": "Figure 2: IPRM’s computation flow diagram. First, a new set of N-parallel latent operations 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT are retrieved from language features 𝐗𝐋subscript𝐗𝐋\\mathbf{X_{L}}bold_X start_POSTSUBSCRIPT bold_L end_POSTSUBSCRIPT conditioned on prior operation states 𝐌𝐨𝐩subscript𝐌𝐨𝐩\\mathbf{M_{op}}bold_M start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT. Then, visual features 𝐗𝐕subscript𝐗𝐕\\mathbf{X_{V}}bold_X start_POSTSUBSCRIPT bold_V end_POSTSUBSCRIPT are queried conditioned on both 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT and prior result states results 𝐌𝐫𝐞𝐬subscript𝐌𝐫𝐞𝐬\\mathbf{M_{res}}bold_M start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT, to form the new results 𝐙𝐫𝐞𝐬subscript𝐙���𝐞𝐬\\mathbf{Z_{res}}bold_Z start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT. Finally, both 𝐙𝐫𝐞𝐬subscript𝐙𝐫𝐞𝐬\\mathbf{Z_{res}}bold_Z start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT and 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT are passed to the Operation Composition Unit (see 2.3), the output of which becomes the new memory state 𝐌𝐌\\mathbf{M}bold_M.", "qtype": "Experimental_Results" @@ -10482,9 +10482,9 @@ "Operation Composition Unit", "memory state" ], - "figure_path": "./MISSQA/figures/2_2411.13754v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.13754v1_figure_2.png", "paperid": "2411.13754v1", - "paper_path": "./MISSQA/papers/2411.13754v1.json", + "paper_path": "./MISS-QA/papers/2411.13754v1.json", "figure_id": "2411.13754v1_figure_2.png", "caption": "Figure 2: IPRM’s computation flow diagram. First, a new set of N-parallel latent operations 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT are retrieved from language features 𝐗𝐋subscript𝐗𝐋\\mathbf{X_{L}}bold_X start_POSTSUBSCRIPT bold_L end_POSTSUBSCRIPT conditioned on prior operation states 𝐌𝐨𝐩subscript𝐌𝐨𝐩\\mathbf{M_{op}}bold_M start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT. Then, visual features 𝐗𝐕subscript𝐗𝐕\\mathbf{X_{V}}bold_X start_POSTSUBSCRIPT bold_V end_POSTSUBSCRIPT are queried conditioned on both 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT and prior result states results 𝐌𝐫𝐞𝐬subscript𝐌𝐫𝐞𝐬\\mathbf{M_{res}}bold_M start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT, to form the new results 𝐙𝐫𝐞𝐬subscript𝐙𝐫𝐞𝐬\\mathbf{Z_{res}}bold_Z start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT. Finally, both 𝐙𝐫𝐞𝐬subscript𝐙𝐫𝐞𝐬\\mathbf{Z_{res}}bold_Z start_POSTSUBSCRIPT bold_res end_POSTSUBSCRIPT and 𝐙𝐨𝐩subscript𝐙𝐨𝐩\\mathbf{Z_{op}}bold_Z start_POSTSUBSCRIPT bold_op end_POSTSUBSCRIPT are passed to the Operation Composition Unit (see 2.3), the output of which becomes the new memory state 𝐌𝐌\\mathbf{M}bold_M.", "qtype": "Experimental_Results" @@ -10514,9 +10514,9 @@ "stacking", "meta model" ], - "figure_path": "./MISSQA/figures/0_2411.16561v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.16561v1_figure_1.png", "paperid": "2411.16561v1", - "paper_path": "./MISSQA/papers/2411.16561v1.json", + "paper_path": "./MISS-QA/papers/2411.16561v1.json", "figure_id": "2411.16561v1_figure_1.png", "caption": "Figure 1: A Comparative Overview of Vulnerability Detection Techniques. (1) Traditional LLM-based processing, which directly outputs predictions but experiences notable data loss, (2) Traditional+Meta models from previous studies that integrate a meta-classifier to enhance LLM outputs, and (3) the proposed EnStack framework, which leverages an ensemble of multiple LLMs combined through stacking methods. EnStack incorporates a meta-model to further refine predictions, aiming for improved accuracy in vulnerability detection by effectively combining strengths of various LLMs and meta-model architectures.", "qtype": "Experimental_Results" @@ -10543,9 +10543,9 @@ "masked_elements": [ "ensemble methods" ], - "figure_path": "./MISSQA/figures/1_2411.16561v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.16561v1_figure_1.png", "paperid": "2411.16561v1", - "paper_path": "./MISSQA/papers/2411.16561v1.json", + "paper_path": "./MISS-QA/papers/2411.16561v1.json", "figure_id": "2411.16561v1_figure_1.png", "caption": "Figure 1: A Comparative Overview of Vulnerability Detection Techniques. (1) Traditional LLM-based processing, which directly outputs predictions but experiences notable data loss, (2) Traditional+Meta models from previous studies that integrate a meta-classifier to enhance LLM outputs, and (3) the proposed EnStack framework, which leverages an ensemble of multiple LLMs combined through stacking methods. EnStack incorporates a meta-model to further refine predictions, aiming for improved accuracy in vulnerability detection by effectively combining strengths of various LLMs and meta-model architectures.", "qtype": "Experimental_Results" @@ -10566,9 +10566,9 @@ "Ensemble methods", "LLM" ], - "figure_path": "./MISSQA/figures/2_2411.16561v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.16561v1_figure_1.png", "paperid": "2411.16561v1", - "paper_path": "./MISSQA/papers/2411.16561v1.json", + "paper_path": "./MISS-QA/papers/2411.16561v1.json", "figure_id": "2411.16561v1_figure_1.png", "caption": "Figure 1: A Comparative Overview of Vulnerability Detection Techniques. (1) Traditional LLM-based processing, which directly outputs predictions but experiences notable data loss, (2) Traditional+Meta models from previous studies that integrate a meta-classifier to enhance LLM outputs, and (3) the proposed EnStack framework, which leverages an ensemble of multiple LLMs combined through stacking methods. EnStack incorporates a meta-model to further refine predictions, aiming for improved accuracy in vulnerability detection by effectively combining strengths of various LLMs and meta-model architectures.", "qtype": "Others" @@ -10588,9 +10588,9 @@ "masked_elements": [ "Stacking" ], - "figure_path": "./MISSQA/figures/3_2411.16561v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.16561v1_figure_1.png", "paperid": "2411.16561v1", - "paper_path": "./MISSQA/papers/2411.16561v1.json", + "paper_path": "./MISS-QA/papers/2411.16561v1.json", "figure_id": "2411.16561v1_figure_1.png", "caption": "Figure 1: A Comparative Overview of Vulnerability Detection Techniques. (1) Traditional LLM-based processing, which directly outputs predictions but experiences notable data loss, (2) Traditional+Meta models from previous studies that integrate a meta-classifier to enhance LLM outputs, and (3) the proposed EnStack framework, which leverages an ensemble of multiple LLMs combined through stacking methods. EnStack incorporates a meta-model to further refine predictions, aiming for improved accuracy in vulnerability detection by effectively combining strengths of various LLMs and meta-model architectures.", "qtype": "Others" @@ -10609,9 +10609,9 @@ "masked_elements": [ "Attribute Decoupling Mechanism" ], - "figure_path": "./MISSQA/figures/0_2411.17223v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.17223v1_figure_2.png", "paperid": "2411.17223v1", - "paper_path": "./MISSQA/papers/2411.17223v1.json", + "paper_path": "./MISS-QA/papers/2411.17223v1.json", "figure_id": "2411.17223v1_figure_2.png", "caption": "Figure 2: Overview of DreamMix. During finetuning, we use the source data {𝒙s,𝒑s}subscript𝒙𝑠subscript𝒑𝑠\\{\\boldsymbol{x}_{s},\\boldsymbol{p}_{s}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT } along with regular data {𝒙r,𝒑r}subscript𝒙𝑟subscript𝒑𝑟\\{\\boldsymbol{x}_{r},\\boldsymbol{p}_{r}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT } constructed via an attribute decoupling mechanism (Sec. 3.3), to enable pretrained Text-to-Image (T2I) inpainting models to efficiently adapt to subject generation. At testing, we employ a disentangled inpainting framework (Sec. 3.2), which divides the denoising process into two stages: Local Content Generation (LCG) and Global Context Harmonization (GCH). Additionally, we propose a textual attribute substitution module (Sec. 3.4) to generate a decomposed text embedding to enhance the editability of our method during testing.", "qtype": "Others" @@ -10632,9 +10632,9 @@ "VLM Agent", "Textual Attribute Substitution" ], - "figure_path": "./MISSQA/figures/1_2411.17223v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.17223v1_figure_2.png", "paperid": "2411.17223v1", - "paper_path": "./MISSQA/papers/2411.17223v1.json", + "paper_path": "./MISS-QA/papers/2411.17223v1.json", "figure_id": "2411.17223v1_figure_2.png", "caption": "Figure 2: Overview of DreamMix. During finetuning, we use the source data {𝒙s,𝒑s}subscript𝒙𝑠subscript𝒑𝑠\\{\\boldsymbol{x}_{s},\\boldsymbol{p}_{s}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT } along with regular data {𝒙r,𝒑r}subscript𝒙𝑟subscript𝒑𝑟\\{\\boldsymbol{x}_{r},\\boldsymbol{p}_{r}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT } constructed via an attribute decoupling mechanism (Sec. 3.3), to enable pretrained Text-to-Image (T2I) inpainting models to efficiently adapt to subject generation. At testing, we employ a disentangled inpainting framework (Sec. 3.2), which divides the denoising process into two stages: Local Content Generation (LCG) and Global Context Harmonization (GCH). Additionally, we propose a textual attribute substitution module (Sec. 3.4) to generate a decomposed text embedding to enhance the editability of our method during testing.", "qtype": "Others" @@ -10662,9 +10662,9 @@ "Local Content Generation", "Global Context Harmonization" ], - "figure_path": "./MISSQA/figures/2_2411.17223v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.17223v1_figure_2.png", "paperid": "2411.17223v1", - "paper_path": "./MISSQA/papers/2411.17223v1.json", + "paper_path": "./MISS-QA/papers/2411.17223v1.json", "figure_id": "2411.17223v1_figure_2.png", "caption": "Figure 2: Overview of DreamMix. During finetuning, we use the source data {𝒙s,𝒑s}subscript𝒙𝑠subscript𝒑𝑠\\{\\boldsymbol{x}_{s},\\boldsymbol{p}_{s}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT } along with regular data {𝒙r,𝒑r}subscript𝒙𝑟subscript𝒑𝑟\\{\\boldsymbol{x}_{r},\\boldsymbol{p}_{r}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT } constructed via an attribute decoupling mechanism (Sec. 3.3), to enable pretrained Text-to-Image (T2I) inpainting models to efficiently adapt to subject generation. At testing, we employ a disentangled inpainting framework (Sec. 3.2), which divides the denoising process into two stages: Local Content Generation (LCG) and Global Context Harmonization (GCH). Additionally, we propose a textual attribute substitution module (Sec. 3.4) to generate a decomposed text embedding to enhance the editability of our method during testing.", "qtype": "Design_Rationale" @@ -10692,9 +10692,9 @@ "orthogonal decomposition block", "decomposed text embedding" ], - "figure_path": "./MISSQA/figures/3_2411.17223v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2411.17223v1_figure_2.png", "paperid": "2411.17223v1", - "paper_path": "./MISSQA/papers/2411.17223v1.json", + "paper_path": "./MISS-QA/papers/2411.17223v1.json", "figure_id": "2411.17223v1_figure_2.png", "caption": "Figure 2: Overview of DreamMix. During finetuning, we use the source data {𝒙s,𝒑s}subscript𝒙𝑠subscript𝒑𝑠\\{\\boldsymbol{x}_{s},\\boldsymbol{p}_{s}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_s end_POSTSUBSCRIPT } along with regular data {𝒙r,𝒑r}subscript𝒙𝑟subscript𝒑𝑟\\{\\boldsymbol{x}_{r},\\boldsymbol{p}_{r}\\}{ bold_italic_x start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT , bold_italic_p start_POSTSUBSCRIPT italic_r end_POSTSUBSCRIPT } constructed via an attribute decoupling mechanism (Sec. 3.3), to enable pretrained Text-to-Image (T2I) inpainting models to efficiently adapt to subject generation. At testing, we employ a disentangled inpainting framework (Sec. 3.2), which divides the denoising process into two stages: Local Content Generation (LCG) and Global Context Harmonization (GCH). Additionally, we propose a textual attribute substitution module (Sec. 3.4) to generate a decomposed text embedding to enhance the editability of our method during testing.", "qtype": "Design_Rationale" @@ -10723,9 +10723,9 @@ "Multi-Group Tri-Plane", "Global Context" ], - "figure_path": "./MISSQA/figures/0_2407.15771v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2407.15771v1_figure_2.png", "paperid": "2407.15771v1", - "paper_path": "./MISSQA/papers/2407.15771v1.json", + "paper_path": "./MISS-QA/papers/2407.15771v1.json", "figure_id": "2407.15771v1_figure_2.png", "caption": "Figure 2: Model architecture of the proposed local occupancy-enhanced object grasping. It first identifies a number of local occupancy regions of interest. Then multi-group tri-plane aggregates the scene context for local occupancy estimation. Finally the occupancy-enhanced local shape feature in each grasp region is extracted by fusing the information of both explicit voxels and implicit queried features, and is decoded to grasp poses.", "qtype": "Design_Rationale" @@ -10753,9 +10753,9 @@ "masked_elements": [ "Implicit Feature" ], - "figure_path": "./MISSQA/figures/1_2407.15771v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2407.15771v1_figure_2.png", "paperid": "2407.15771v1", - "paper_path": "./MISSQA/papers/2407.15771v1.json", + "paper_path": "./MISS-QA/papers/2407.15771v1.json", "figure_id": "2407.15771v1_figure_2.png", "caption": "Figure 2: Model architecture of the proposed local occupancy-enhanced object grasping. It first identifies a number of local occupancy regions of interest. Then multi-group tri-plane aggregates the scene context for local occupancy estimation. Finally the occupancy-enhanced local shape feature in each grasp region is extracted by fusing the information of both explicit voxels and implicit queried features, and is decoded to grasp poses.", "qtype": "Design_Rationale" @@ -10780,9 +10780,9 @@ "masked_elements": [ "Multi-Group Tri-Plane" ], - "figure_path": "./MISSQA/figures/2_2407.15771v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2407.15771v1_figure_2.png", "paperid": "2407.15771v1", - "paper_path": "./MISSQA/papers/2407.15771v1.json", + "paper_path": "./MISS-QA/papers/2407.15771v1.json", "figure_id": "2407.15771v1_figure_2.png", "caption": "Figure 2: Model architecture of the proposed local occupancy-enhanced object grasping. It first identifies a number of local occupancy regions of interest. Then multi-group tri-plane aggregates the scene context for local occupancy estimation. Finally the occupancy-enhanced local shape feature in each grasp region is extracted by fusing the information of both explicit voxels and implicit queried features, and is decoded to grasp poses.", "qtype": "Implementation_Details" @@ -10810,9 +10810,9 @@ "Global Context", "Local Context" ], - "figure_path": "./MISSQA/figures/3_2407.15771v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2407.15771v1_figure_2.png", "paperid": "2407.15771v1", - "paper_path": "./MISSQA/papers/2407.15771v1.json", + "paper_path": "./MISS-QA/papers/2407.15771v1.json", "figure_id": "2407.15771v1_figure_2.png", "caption": "Figure 2: Model architecture of the proposed local occupancy-enhanced object grasping. It first identifies a number of local occupancy regions of interest. Then multi-group tri-plane aggregates the scene context for local occupancy estimation. Finally the occupancy-enhanced local shape feature in each grasp region is extracted by fusing the information of both explicit voxels and implicit queried features, and is decoded to grasp poses.", "qtype": "Implementation_Details" @@ -10833,9 +10833,9 @@ "Pkwn", "Punk" ], - "figure_path": "./MISSQA/figures/0_2409.00397v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.00397v1_figure_2.png", "paperid": "2409.00397v1", - "paper_path": "./MISSQA/papers/2409.00397v1.json", + "paper_path": "./MISS-QA/papers/2409.00397v1.json", "figure_id": "2409.00397v1_figure_2.png", "caption": "Figure 2: The architecture overview of COSMo, where ℱvsubscriptℱ𝑣\\mathcal{F}_{v}caligraphic_F start_POSTSUBSCRIPT italic_v end_POSTSUBSCRIPT and ℱtsubscriptℱ𝑡\\mathcal{F}_{t}caligraphic_F start_POSTSUBSCRIPT italic_t end_POSTSUBSCRIPT are the frozen pretrained CLIP s image and text encoders, respectively. Pk w nsubscript𝑃𝑘𝑤𝑛P_{kwn}italic_P start_POSTSUBSCRIPT italic_k italic_w italic_n end_POSTSUBSCRIPT and Pu n ksubscript𝑃𝑢𝑛𝑘P_{unk}italic_P start_POSTSUBSCRIPT italic_u italic_n italic_k end_POSTSUBSCRIPT denote the prompts for the known and unknown classes, respectively. ℬθ (⋅)subscriptℬ𝜃⋅\\mathcal{B}_{\\theta}(\\cdot)caligraphic_B start_POSTSUBSCRIPT italic_θ end_POSTSUBSCRIPT ( ⋅ ) represents the domain specific bias network, which generates the domain-bias context tokens β𝛽\\betaitalic_β. Best view in color.", "qtype": "Literature_Background" @@ -10856,9 +10856,9 @@ "masked_elements": [ "Motion Event Loss" ], - "figure_path": "./MISSQA/figures/0_2410.16995v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.16995v1_figure_1.png", "paperid": "2410.16995v1", - "paper_path": "./MISSQA/papers/2410.16995v1.json", + "paper_path": "./MISS-QA/papers/2410.16995v1.json", "figure_id": "2410.16995v1_figure_1.png", "caption": "Figure 1: Overview of the proposed E-3DGS framework. This framework integrates motion and exposure events for training 3DGS to effectively handle diverse real-world conditions. We utilize Temporal-to-Intensity Mapping to convert exposure events into intensity images, which yield camera trajectories and a sparse point cloud for 3DGS training. The optimization of 3DGS parameters is supervised through motion event loss and exposure event loss.", "qtype": "Literature_Background" @@ -10880,9 +10880,9 @@ "Exposure Event Loss", "Adaptive Density Control" ], - "figure_path": "./MISSQA/figures/1_2410.16995v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.16995v1_figure_1.png", "paperid": "2410.16995v1", - "paper_path": "./MISSQA/papers/2410.16995v1.json", + "paper_path": "./MISS-QA/papers/2410.16995v1.json", "figure_id": "2410.16995v1_figure_1.png", "caption": "Figure 1: Overview of the proposed E-3DGS framework. This framework integrates motion and exposure events for training 3DGS to effectively handle diverse real-world conditions. We utilize Temporal-to-Intensity Mapping to convert exposure events into intensity images, which yield camera trajectories and a sparse point cloud for 3DGS training. The optimization of 3DGS parameters is supervised through motion event loss and exposure event loss.", "qtype": "Literature_Background" @@ -10904,9 +10904,9 @@ "Adaptive Density Control", "Differentiable Rasterization" ], - "figure_path": "./MISSQA/figures/2_2410.16995v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.16995v1_figure_1.png", "paperid": "2410.16995v1", - "paper_path": "./MISSQA/papers/2410.16995v1.json", + "paper_path": "./MISS-QA/papers/2410.16995v1.json", "figure_id": "2410.16995v1_figure_1.png", "caption": "Figure 1: Overview of the proposed E-3DGS framework. This framework integrates motion and exposure events for training 3DGS to effectively handle diverse real-world conditions. We utilize Temporal-to-Intensity Mapping to convert exposure events into intensity images, which yield camera trajectories and a sparse point cloud for 3DGS training. The optimization of 3DGS parameters is supervised through motion event loss and exposure event loss.", "qtype": "Experimental_Results" @@ -10936,9 +10936,9 @@ "Uncertainty Modeling", "Order Learning" ], - "figure_path": "./MISSQA/figures/0_2409.00603v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.00603v1_figure_2.png", "paperid": "2409.00603v1", - "paper_path": "./MISSQA/papers/2409.00603v1.json", + "paper_path": "./MISS-QA/papers/2409.00603v1.json", "figure_id": "2409.00603v1_figure_2.png", "caption": "Figure 2: (a) The training phase of UOL. The order of distributions is constrained by cross entropy loss and hinge loss, and the dispersion of the distributions is constrained by KL loss. (b) The estimation phase of UOL. In uncertainty modeling, the FB of a facial image is modeled by a multi-dimensional Gaussian distribution whose mean μ𝜇\\muitalic_μ and diagonal covariance ΣΣ\\Sigmaroman_Σ are learned by VGG from the image. In distribution comparison, we sample from both the distributions of test image and reference image to form a pair and predict its order by a comparator in order learning. After having the order relations of T𝑇Titalic_T pairs between reference images and the test image, the Bradley-Terry model is applied to estimate the score of the test image.", "qtype": "Experimental_Results" @@ -10963,9 +10963,9 @@ "masked_elements": [ "Bradley-Terry Estimation Module" ], - "figure_path": "./MISSQA/figures/1_2409.00603v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.00603v1_figure_2.png", "paperid": "2409.00603v1", - "paper_path": "./MISSQA/papers/2409.00603v1.json", + "paper_path": "./MISS-QA/papers/2409.00603v1.json", "figure_id": "2409.00603v1_figure_2.png", "caption": "Figure 2: (a) The training phase of UOL. The order of distributions is constrained by cross entropy loss and hinge loss, and the dispersion of the distributions is constrained by KL loss. (b) The estimation phase of UOL. In uncertainty modeling, the FB of a facial image is modeled by a multi-dimensional Gaussian distribution whose mean μ𝜇\\muitalic_μ and diagonal covariance ΣΣ\\Sigmaroman_Σ are learned by VGG from the image. In distribution comparison, we sample from both the distributions of test image and reference image to form a pair and predict its order by a comparator in order learning. After having the order relations of T𝑇Titalic_T pairs between reference images and the test image, the Bradley-Terry model is applied to estimate the score of the test image.", "qtype": "Experimental_Results" @@ -10985,9 +10985,9 @@ "masked_elements": [ "Monte Carlo sampling" ], - "figure_path": "./MISSQA/figures/2_2409.00603v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.00603v1_figure_2.png", "paperid": "2409.00603v1", - "paper_path": "./MISSQA/papers/2409.00603v1.json", + "paper_path": "./MISS-QA/papers/2409.00603v1.json", "figure_id": "2409.00603v1_figure_2.png", "caption": "Figure 2: (a) The training phase of UOL. The order of distributions is constrained by cross entropy loss and hinge loss, and the dispersion of the distributions is constrained by KL loss. (b) The estimation phase of UOL. In uncertainty modeling, the FB of a facial image is modeled by a multi-dimensional Gaussian distribution whose mean μ𝜇\\muitalic_μ and diagonal covariance ΣΣ\\Sigmaroman_Σ are learned by VGG from the image. In distribution comparison, we sample from both the distributions of test image and reference image to form a pair and predict its order by a comparator in order learning. After having the order relations of T𝑇Titalic_T pairs between reference images and the test image, the Bradley-Terry model is applied to estimate the score of the test image.", "qtype": "Others" @@ -11007,9 +11007,9 @@ "masked_elements": [ "reference set" ], - "figure_path": "./MISSQA/figures/3_2409.00603v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.00603v1_figure_2.png", "paperid": "2409.00603v1", - "paper_path": "./MISSQA/papers/2409.00603v1.json", + "paper_path": "./MISS-QA/papers/2409.00603v1.json", "figure_id": "2409.00603v1_figure_2.png", "caption": "Figure 2: (a) The training phase of UOL. The order of distributions is constrained by cross entropy loss and hinge loss, and the dispersion of the distributions is constrained by KL loss. (b) The estimation phase of UOL. In uncertainty modeling, the FB of a facial image is modeled by a multi-dimensional Gaussian distribution whose mean μ𝜇\\muitalic_μ and diagonal covariance ΣΣ\\Sigmaroman_Σ are learned by VGG from the image. In distribution comparison, we sample from both the distributions of test image and reference image to form a pair and predict its order by a comparator in order learning. After having the order relations of T𝑇Titalic_T pairs between reference images and the test image, the Bradley-Terry model is applied to estimate the score of the test image.", "qtype": "Others" @@ -11029,9 +11029,9 @@ "masked_elements": [ "MaxRényi-K%" ], - "figure_path": "./MISSQA/figures/0_2411.02902v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.02902v1_figure_1.png", "paperid": "2411.02902v1", - "paper_path": "./MISSQA/papers/2411.02902v1.json", + "paper_path": "./MISS-QA/papers/2411.02902v1.json", "figure_id": "2411.02902v1_figure_1.png", "caption": "Figure 1: MIAs against VLLMs. Top: Our image detection pipeline: In the generation stage, we feed the image and instruction to the target model to obtain a description; then during the inference stage, we input the image, instruction, and generated description to the model, and extract the logits slices to calculate metrics. Bottom: MaxRényi-K% metric: we first get the Rényi entropy of each token position, then select the largest k%percent𝑘k\\%italic_k % tokens and calculate the average Rényi entropy.", "qtype": "Others" @@ -11051,9 +11051,9 @@ "masked_elements": [ "description" ], - "figure_path": "./MISSQA/figures/1_2411.02902v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.02902v1_figure_1.png", "paperid": "2411.02902v1", - "paper_path": "./MISSQA/papers/2411.02902v1.json", + "paper_path": "./MISS-QA/papers/2411.02902v1.json", "figure_id": "2411.02902v1_figure_1.png", "caption": "Figure 1: MIAs against VLLMs. Top: Our image detection pipeline: In the generation stage, we feed the image and instruction to the target model to obtain a description; then during the inference stage, we input the image, instruction, and generated description to the model, and extract the logits slices to calculate metrics. Bottom: MaxRényi-K% metric: we first get the Rényi entropy of each token position, then select the largest k%percent𝑘k\\%italic_k % tokens and calculate the average Rényi entropy.", "qtype": "Others" @@ -11079,9 +11079,9 @@ "generation stage", "inference stage" ], - "figure_path": "./MISSQA/figures/2_2411.02902v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.02902v1_figure_1.png", "paperid": "2411.02902v1", - "paper_path": "./MISSQA/papers/2411.02902v1.json", + "paper_path": "./MISS-QA/papers/2411.02902v1.json", "figure_id": "2411.02902v1_figure_1.png", "caption": "Figure 1: MIAs against VLLMs. Top: Our image detection pipeline: In the generation stage, we feed the image and instruction to the target model to obtain a description; then during the inference stage, we input the image, instruction, and generated description to the model, and extract the logits slices to calculate metrics. Bottom: MaxRényi-K% metric: we first get the Rényi entropy of each token position, then select the largest k%percent𝑘k\\%italic_k % tokens and calculate the average Rényi entropy.", "qtype": "Design_Rationale" @@ -11102,9 +11102,9 @@ "masked_elements": [ "critic C" ], - "figure_path": "./MISSQA/figures/0_2409.09125v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.09125v1_figure_1.png", "paperid": "2409.09125v1", - "paper_path": "./MISSQA/papers/2409.09125v1.json", + "paper_path": "./MISS-QA/papers/2409.09125v1.json", "figure_id": "2409.09125v1_figure_1.png", "caption": "Figure 1: Illustration of the model architecture. (A) Architecture of the model, with generator G producing generated samples, and dataset D producing biological samples, which are both used as input for critic C. (B) Architecture of generator. In the upper left corner, the generator composed of several sub-generators is shown. The bottom part shows that each sub-generator is a quantum circuit following a re-uploading scheme. Here a noise-encoding layer and a parametrized layer are repeated for l𝑙litalic_l layers, with the parametrized layer ansatz of each parametrized layer shown in the top right side. After trained, the generator can be used to produce samples (D) similar to samples obtained from the biological dataset (C).", "qtype": "Implementation_Details" @@ -11125,9 +11125,9 @@ "generator G", "critic C" ], - "figure_path": "./MISSQA/figures/1_2409.09125v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.09125v1_figure_1.png", "paperid": "2409.09125v1", - "paper_path": "./MISSQA/papers/2409.09125v1.json", + "paper_path": "./MISS-QA/papers/2409.09125v1.json", "figure_id": "2409.09125v1_figure_1.png", "caption": "Figure 1: Illustration of the model architecture. (A) Architecture of the model, with generator G producing generated samples, and dataset D producing biological samples, which are both used as input for critic C. (B) Architecture of generator. In the upper left corner, the generator composed of several sub-generators is shown. The bottom part shows that each sub-generator is a quantum circuit following a re-uploading scheme. Here a noise-encoding layer and a parametrized layer are repeated for l𝑙litalic_l layers, with the parametrized layer ansatz of each parametrized layer shown in the top right side. After trained, the generator can be used to produce samples (D) similar to samples obtained from the biological dataset (C).", "qtype": "Literature_Background" @@ -11151,9 +11151,9 @@ "masked_elements": [ "Context Attention" ], - "figure_path": "./MISSQA/figures/0_2408.02271v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.02271v1_figure_2.png", "paperid": "2408.02271v1", - "paper_path": "./MISSQA/papers/2408.02271v1.json", + "paper_path": "./MISS-QA/papers/2408.02271v1.json", "figure_id": "2408.02271v1_figure_2.png", "caption": "Figure 2: The architecture of our proposed method that contains a multi-grained prefix encoder and personality reinforcement module.", "qtype": "Literature_Background" @@ -11181,9 +11181,9 @@ "masked_elements": [ "Multi-Grained Prefix Encoder" ], - "figure_path": "./MISSQA/figures/1_2408.02271v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.02271v1_figure_2.png", "paperid": "2408.02271v1", - "paper_path": "./MISSQA/papers/2408.02271v1.json", + "paper_path": "./MISS-QA/papers/2408.02271v1.json", "figure_id": "2408.02271v1_figure_2.png", "caption": "Figure 2: The architecture of our proposed method that contains a multi-grained prefix encoder and personality reinforcement module.", "qtype": "Experimental_Results" @@ -11210,9 +11210,9 @@ "Learnable Query", "Implicit Pose Indicator" ], - "figure_path": "./MISSQA/figures/0_2410.10306v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.10306v1_figure_2.png", "paperid": "2410.10306v1", - "paper_path": "./MISSQA/papers/2410.10306v1.json", + "paper_path": "./MISS-QA/papers/2410.10306v1.json", "figure_id": "2410.10306v1_figure_2.png", "caption": "Figure 2: (a) The overview of our Animate-X. Given a reference image Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT, we first extract CLIP image feature fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and latent feature fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT via CLIP image encoder ΦΦ\\Phiroman_Φ and VAE encoder ℰℰ\\mathcal{E}caligraphic_E.\nThe proposed Implicit Pose Indicator (IPI) and Explicit Pose Indicator (EPI)\nproduce motion feature fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and pose feature fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT, respectively. fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT is concatenated with the noised input ϵitalic-ϵ\\epsilonitalic_ϵ along the channel dimension, then further concatenated with fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT along the temporal dimension. This serves as the input to the diffusion model ϵθsubscriptitalic-ϵ𝜃\\epsilon_{\\theta}italic_ϵ start_POSTSUBSCRIPT italic_θ end_POSTSUBSCRIPT for progressive denoising. During the denoising process, fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT provide appearance condition from Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT and motion condition from I1:Fdsubscriptsuperscript𝐼𝑑:1𝐹I^{d}_{1:F}italic_I start_POSTSUPERSCRIPT italic_d end_POSTSUPERSCRIPT start_POSTSUBSCRIPT 1 : italic_F end_POSTSUBSCRIPT.\nAt last, a VAE decoder 𝒟𝒟\\mathcal{D}caligraphic_D is adopted to map the generated latent representation z0subscript𝑧0z_{0}italic_z start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT to the animation video. (b) The detailed structure of Implicit Pose Indicator. (c) The pipeline of pose transformation by Explicit Pose Indicator.", "qtype": "Experimental_Results" @@ -11238,9 +11238,9 @@ "masked_elements": [ "Explicit Pose Indicator" ], - "figure_path": "./MISSQA/figures/1_2410.10306v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.10306v1_figure_2.png", "paperid": "2410.10306v1", - "paper_path": "./MISSQA/papers/2410.10306v1.json", + "paper_path": "./MISS-QA/papers/2410.10306v1.json", "figure_id": "2410.10306v1_figure_2.png", "caption": "Figure 2: (a) The overview of our Animate-X. Given a reference image Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT, we first extract CLIP image feature fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and latent feature fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT via CLIP image encoder ΦΦ\\Phiroman_Φ and VAE encoder ℰℰ\\mathcal{E}caligraphic_E.\nThe proposed Implicit Pose Indicator (IPI) and Explicit Pose Indicator (EPI)\nproduce motion feature fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and pose feature fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT, respectively. fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT is concatenated with the noised input ϵitalic-ϵ\\epsilonitalic_ϵ along the channel dimension, then further concatenated with fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT along the temporal dimension. This serves as the input to the diffusion model ϵθsubscriptitalic-ϵ𝜃\\epsilon_{\\theta}italic_ϵ start_POSTSUBSCRIPT italic_θ end_POSTSUBSCRIPT for progressive denoising. During the denoising process, fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT provide appearance condition from Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT and motion condition from I1:Fdsubscriptsuperscript𝐼𝑑:1𝐹I^{d}_{1:F}italic_I start_POSTSUPERSCRIPT italic_d end_POSTSUPERSCRIPT start_POSTSUBSCRIPT 1 : italic_F end_POSTSUBSCRIPT.\nAt last, a VAE decoder 𝒟𝒟\\mathcal{D}caligraphic_D is adopted to map the generated latent representation z0subscript𝑧0z_{0}italic_z start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT to the animation video. (b) The detailed structure of Implicit Pose Indicator. (c) The pipeline of pose transformation by Explicit Pose Indicator.", "qtype": "Experimental_Results" @@ -11261,9 +11261,9 @@ "CLIP feature", "Implicit Pose Indicator" ], - "figure_path": "./MISSQA/figures/2_2410.10306v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.10306v1_figure_2.png", "paperid": "2410.10306v1", - "paper_path": "./MISSQA/papers/2410.10306v1.json", + "paper_path": "./MISS-QA/papers/2410.10306v1.json", "figure_id": "2410.10306v1_figure_2.png", "caption": "Figure 2: (a) The overview of our Animate-X. Given a reference image Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT, we first extract CLIP image feature fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and latent feature fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT via CLIP image encoder ΦΦ\\Phiroman_Φ and VAE encoder ℰℰ\\mathcal{E}caligraphic_E.\nThe proposed Implicit Pose Indicator (IPI) and Explicit Pose Indicator (EPI)\nproduce motion feature fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and pose feature fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT, respectively. fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT is concatenated with the noised input ϵitalic-ϵ\\epsilonitalic_ϵ along the channel dimension, then further concatenated with fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT along the temporal dimension. This serves as the input to the diffusion model ϵθsubscriptitalic-ϵ𝜃\\epsilon_{\\theta}italic_ϵ start_POSTSUBSCRIPT italic_θ end_POSTSUBSCRIPT for progressive denoising. During the denoising process, fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT provide appearance condition from Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT and motion condition from I1:Fdsubscriptsuperscript𝐼𝑑:1𝐹I^{d}_{1:F}italic_I start_POSTSUPERSCRIPT italic_d end_POSTSUPERSCRIPT start_POSTSUBSCRIPT 1 : italic_F end_POSTSUBSCRIPT.\nAt last, a VAE decoder 𝒟𝒟\\mathcal{D}caligraphic_D is adopted to map the generated latent representation z0subscript𝑧0z_{0}italic_z start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT to the animation video. (b) The detailed structure of Implicit Pose Indicator. (c) The pipeline of pose transformation by Explicit Pose Indicator.", "qtype": "Others" @@ -11284,9 +11284,9 @@ "VAE encoder", "Spatial Attention" ], - "figure_path": "./MISSQA/figures/3_2410.10306v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.10306v1_figure_2.png", "paperid": "2410.10306v1", - "paper_path": "./MISSQA/papers/2410.10306v1.json", + "paper_path": "./MISS-QA/papers/2410.10306v1.json", "figure_id": "2410.10306v1_figure_2.png", "caption": "Figure 2: (a) The overview of our Animate-X. Given a reference image Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT, we first extract CLIP image feature fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and latent feature fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT via CLIP image encoder ΦΦ\\Phiroman_Φ and VAE encoder ℰℰ\\mathcal{E}caligraphic_E.\nThe proposed Implicit Pose Indicator (IPI) and Explicit Pose Indicator (EPI)\nproduce motion feature fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT and pose feature fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT, respectively. fesubscript𝑓𝑒f_{e}italic_f start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT is concatenated with the noised input ϵitalic-ϵ\\epsilonitalic_ϵ along the channel dimension, then further concatenated with fersubscriptsuperscript𝑓𝑟𝑒f^{r}_{e}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_e end_POSTSUBSCRIPT along the temporal dimension. This serves as the input to the diffusion model ϵθsubscriptitalic-ϵ𝜃\\epsilon_{\\theta}italic_ϵ start_POSTSUBSCRIPT italic_θ end_POSTSUBSCRIPT for progressive denoising. During the denoising process, fφrsubscriptsuperscript𝑓𝑟𝜑f^{r}_{\\varphi}italic_f start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT start_POSTSUBSCRIPT italic_φ end_POSTSUBSCRIPT and fisubscript𝑓𝑖f_{i}italic_f start_POSTSUBSCRIPT italic_i end_POSTSUBSCRIPT provide appearance condition from Irsuperscript𝐼𝑟I^{r}italic_I start_POSTSUPERSCRIPT italic_r end_POSTSUPERSCRIPT and motion condition from I1:Fdsubscriptsuperscript𝐼𝑑:1𝐹I^{d}_{1:F}italic_I start_POSTSUPERSCRIPT italic_d end_POSTSUPERSCRIPT start_POSTSUBSCRIPT 1 : italic_F end_POSTSUBSCRIPT.\nAt last, a VAE decoder 𝒟𝒟\\mathcal{D}caligraphic_D is adopted to map the generated latent representation z0subscript𝑧0z_{0}italic_z start_POSTSUBSCRIPT 0 end_POSTSUBSCRIPT to the animation video. (b) The detailed structure of Implicit Pose Indicator. (c) The pipeline of pose transformation by Explicit Pose Indicator.", "qtype": "Others" @@ -11305,9 +11305,9 @@ "masked_elements": [ "Concept Encoding Shelf" ], - "figure_path": "./MISSQA/figures/0_2408.16119v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.16119v1_figure_1.png", "paperid": "2408.16119v1", - "paper_path": "./MISSQA/papers/2408.16119v1.json", + "paper_path": "./MISS-QA/papers/2408.16119v1.json", "figure_id": "2408.16119v1_figure_1.png", "caption": "Figure 1. With Data Formulator 2, analysts can navigate the iteration history in Data Threads and select previous designs to be reused towards new ones; then, using Concept Encoding Shelf, analysts specify their chart design using blended UI and natural language inputs, delegating data transformation effort to AI. When new charts are created, data threads are updated for future reference. Data Formulator 2 is available at https://github.com/microsoft/data-formulator.", "qtype": "Others" @@ -11332,9 +11332,9 @@ "masked_elements": [ "Data Threads" ], - "figure_path": "./MISSQA/figures/1_2408.16119v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.16119v1_figure_1.png", "paperid": "2408.16119v1", - "paper_path": "./MISSQA/papers/2408.16119v1.json", + "paper_path": "./MISS-QA/papers/2408.16119v1.json", "figure_id": "2408.16119v1_figure_1.png", "caption": "Figure 1. With Data Formulator 2, analysts can navigate the iteration history in Data Threads and select previous designs to be reused towards new ones; then, using Concept Encoding Shelf, analysts specify their chart design using blended UI and natural language inputs, delegating data transformation effort to AI. When new charts are created, data threads are updated for future reference. Data Formulator 2 is available at https://github.com/microsoft/data-formulator.", "qtype": "Design_Rationale" @@ -11361,9 +11361,9 @@ "masked_elements": [ "Unified Prompts & Action Spaces" ], - "figure_path": "./MISSQA/figures/0_2408.06327v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2408.06327v1_figure_2.png", "paperid": "2408.06327v1", - "paper_path": "./MISSQA/papers/2408.06327v1.json", + "paper_path": "./MISS-QA/papers/2408.06327v1.json", "figure_id": "2408.06327v1_figure_2.png", "caption": "Figure 2: VisualAgentBench is the first systematic benchmark to evaluate LMM-as-Visual-Agent across a diverse set of practical challenges. Beside benchmarking, it is also the first to offer SFT trajectory data for behavior cloning training over all target environments, demonstrating the huge potential to improve open LMMs for serving as visual foundation agents.", "qtype": "Design_Rationale" @@ -11392,9 +11392,9 @@ "Behavior Cloning", "Proprietary LMM APIs" ], - "figure_path": "./MISSQA/figures/1_2408.06327v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2408.06327v1_figure_2.png", "paperid": "2408.06327v1", - "paper_path": "./MISSQA/papers/2408.06327v1.json", + "paper_path": "./MISS-QA/papers/2408.06327v1.json", "figure_id": "2408.06327v1_figure_2.png", "caption": "Figure 2: VisualAgentBench is the first systematic benchmark to evaluate LMM-as-Visual-Agent across a diverse set of practical challenges. Beside benchmarking, it is also the first to offer SFT trajectory data for behavior cloning training over all target environments, demonstrating the huge potential to improve open LMMs for serving as visual foundation agents.", "qtype": "Design_Rationale" @@ -11414,9 +11414,9 @@ "masked_elements": [ "Unified Prompts" ], - "figure_path": "./MISSQA/figures/2_2408.06327v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2408.06327v1_figure_2.png", "paperid": "2408.06327v1", - "paper_path": "./MISSQA/papers/2408.06327v1.json", + "paper_path": "./MISS-QA/papers/2408.06327v1.json", "figure_id": "2408.06327v1_figure_2.png", "caption": "Figure 2: VisualAgentBench is the first systematic benchmark to evaluate LMM-as-Visual-Agent across a diverse set of practical challenges. Beside benchmarking, it is also the first to offer SFT trajectory data for behavior cloning training over all target environments, demonstrating the huge potential to improve open LMMs for serving as visual foundation agents.", "qtype": "Implementation_Details" @@ -11444,9 +11444,9 @@ "masked_elements": [ "Behavior Cloning" ], - "figure_path": "./MISSQA/figures/3_2408.06327v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2408.06327v1_figure_2.png", "paperid": "2408.06327v1", - "paper_path": "./MISSQA/papers/2408.06327v1.json", + "paper_path": "./MISS-QA/papers/2408.06327v1.json", "figure_id": "2408.06327v1_figure_2.png", "caption": "Figure 2: VisualAgentBench is the first systematic benchmark to evaluate LMM-as-Visual-Agent across a diverse set of practical challenges. Beside benchmarking, it is also the first to offer SFT trajectory data for behavior cloning training over all target environments, demonstrating the huge potential to improve open LMMs for serving as visual foundation agents.", "qtype": "Implementation_Details" @@ -11472,9 +11472,9 @@ "CLIP Encoder", "UNet" ], - "figure_path": "./MISSQA/figures/0_2409.07452v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2409.07452v1_figure_2.png", "paperid": "2409.07452v1", - "paper_path": "./MISSQA/papers/2409.07452v1.json", + "paper_path": "./MISS-QA/papers/2409.07452v1.json", "figure_id": "2409.07452v1_figure_2.png", "caption": "Figure 2. An overview of our proposed Hi3D. Our Hi3D fully exploits the capabilities of large-scale pre-trained video diffusion models to effectively trigger high-resolution image-to-3D generation. Specifically, in the first stage of basic multi-view generation, Hi3D remoulds video diffusion model with additional camera pose condition, aiming to transform single-view image into low-resolution 3D-aware sequential images. Next, in the second stage of 3D-aware multi-view refinement, we feed this low-resolution orbit video into 3D-aware video-to-video refiner with additional depth condition, leading to high-resolution orbit video with highly detailed texture. Finally, we augment the resultant multi-view images with more novel views through 3D Gaussian Splatting and employ SDF-based reconstruction to extract high-quality 3D meshes.", "qtype": "Implementation_Details" @@ -11501,9 +11501,9 @@ "Depth Estimate", "VAE Encoder" ], - "figure_path": "./MISSQA/figures/1_2409.07452v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.07452v1_figure_2.png", "paperid": "2409.07452v1", - "paper_path": "./MISSQA/papers/2409.07452v1.json", + "paper_path": "./MISS-QA/papers/2409.07452v1.json", "figure_id": "2409.07452v1_figure_2.png", "caption": "Figure 2. An overview of our proposed Hi3D. Our Hi3D fully exploits the capabilities of large-scale pre-trained video diffusion models to effectively trigger high-resolution image-to-3D generation. Specifically, in the first stage of basic multi-view generation, Hi3D remoulds video diffusion model with additional camera pose condition, aiming to transform single-view image into low-resolution 3D-aware sequential images. Next, in the second stage of 3D-aware multi-view refinement, we feed this low-resolution orbit video into 3D-aware video-to-video refiner with additional depth condition, leading to high-resolution orbit video with highly detailed texture. Finally, we augment the resultant multi-view images with more novel views through 3D Gaussian Splatting and employ SDF-based reconstruction to extract high-quality 3D meshes.", "qtype": "Implementation_Details" @@ -11530,9 +11530,9 @@ "3D Gaussian Splatting", "SDF-based reconstruction" ], - "figure_path": "./MISSQA/figures/2_2409.07452v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.07452v1_figure_2.png", "paperid": "2409.07452v1", - "paper_path": "./MISSQA/papers/2409.07452v1.json", + "paper_path": "./MISS-QA/papers/2409.07452v1.json", "figure_id": "2409.07452v1_figure_2.png", "caption": "Figure 2. An overview of our proposed Hi3D. Our Hi3D fully exploits the capabilities of large-scale pre-trained video diffusion models to effectively trigger high-resolution image-to-3D generation. Specifically, in the first stage of basic multi-view generation, Hi3D remoulds video diffusion model with additional camera pose condition, aiming to transform single-view image into low-resolution 3D-aware sequential images. Next, in the second stage of 3D-aware multi-view refinement, we feed this low-resolution orbit video into 3D-aware video-to-video refiner with additional depth condition, leading to high-resolution orbit video with highly detailed texture. Finally, we augment the resultant multi-view images with more novel views through 3D Gaussian Splatting and employ SDF-based reconstruction to extract high-quality 3D meshes.", "qtype": "Literature_Background" @@ -11552,9 +11552,9 @@ "masked_elements": [ "camera elevation" ], - "figure_path": "./MISSQA/figures/3_2409.07452v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.07452v1_figure_2.png", "paperid": "2409.07452v1", - "paper_path": "./MISSQA/papers/2409.07452v1.json", + "paper_path": "./MISS-QA/papers/2409.07452v1.json", "figure_id": "2409.07452v1_figure_2.png", "caption": "Figure 2. An overview of our proposed Hi3D. Our Hi3D fully exploits the capabilities of large-scale pre-trained video diffusion models to effectively trigger high-resolution image-to-3D generation. Specifically, in the first stage of basic multi-view generation, Hi3D remoulds video diffusion model with additional camera pose condition, aiming to transform single-view image into low-resolution 3D-aware sequential images. Next, in the second stage of 3D-aware multi-view refinement, we feed this low-resolution orbit video into 3D-aware video-to-video refiner with additional depth condition, leading to high-resolution orbit video with highly detailed texture. Finally, we augment the resultant multi-view images with more novel views through 3D Gaussian Splatting and employ SDF-based reconstruction to extract high-quality 3D meshes.", "qtype": "Literature_Background" @@ -11581,9 +11581,9 @@ "masked_elements": [ "Feature Alignment" ], - "figure_path": "./MISSQA/figures/0_2410.14143v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.14143v1_figure_2.png", "paperid": "2410.14143v1", - "paper_path": "./MISSQA/papers/2410.14143v1.json", + "paper_path": "./MISS-QA/papers/2410.14143v1.json", "figure_id": "2410.14143v1_figure_2.png", "caption": "Figure 2: The overall framework of our proposed PCKD. We first augment samples, extract features and perform feature alignment (ℒF⁢Asubscriptℒ𝐹𝐴\\mathcal{L}_{FA}caligraphic_L start_POSTSUBSCRIPT italic_F italic_A end_POSTSUBSCRIPT), category center alignment (ℒC⁢Asubscriptℒ𝐶𝐴\\mathcal{L}_{CA}caligraphic_L start_POSTSUBSCRIPT italic_C italic_A end_POSTSUBSCRIPT), and category center contrast (ℒC⁢Csubscriptℒ𝐶𝐶\\mathcal{L}_{CC}caligraphic_L start_POSTSUBSCRIPT italic_C italic_C end_POSTSUBSCRIPT).\nThen our preview strategy can assign dynamic weights to each sample based on its difficulty score.", "qtype": "Literature_Background" @@ -11608,9 +11608,9 @@ "masked_elements": [ "Category Center Contrast" ], - "figure_path": "./MISSQA/figures/1_2410.14143v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.14143v1_figure_2.png", "paperid": "2410.14143v1", - "paper_path": "./MISSQA/papers/2410.14143v1.json", + "paper_path": "./MISS-QA/papers/2410.14143v1.json", "figure_id": "2410.14143v1_figure_2.png", "caption": "Figure 2: The overall framework of our proposed PCKD. We first augment samples, extract features and perform feature alignment (ℒF⁢Asubscriptℒ𝐹𝐴\\mathcal{L}_{FA}caligraphic_L start_POSTSUBSCRIPT italic_F italic_A end_POSTSUBSCRIPT), category center alignment (ℒC⁢Asubscriptℒ𝐶𝐴\\mathcal{L}_{CA}caligraphic_L start_POSTSUBSCRIPT italic_C italic_A end_POSTSUBSCRIPT), and category center contrast (ℒC⁢Csubscriptℒ𝐶𝐶\\mathcal{L}_{CC}caligraphic_L start_POSTSUBSCRIPT italic_C italic_C end_POSTSUBSCRIPT).\nThen our preview strategy can assign dynamic weights to each sample based on its difficulty score.", "qtype": "Literature_Background" @@ -11639,9 +11639,9 @@ "Preview-based Learning Strategy", "Difficulty Score" ], - "figure_path": "./MISSQA/figures/2_2410.14143v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.14143v1_figure_2.png", "paperid": "2410.14143v1", - "paper_path": "./MISSQA/papers/2410.14143v1.json", + "paper_path": "./MISS-QA/papers/2410.14143v1.json", "figure_id": "2410.14143v1_figure_2.png", "caption": "Figure 2: The overall framework of our proposed PCKD. We first augment samples, extract features and perform feature alignment (ℒF⁢Asubscriptℒ𝐹𝐴\\mathcal{L}_{FA}caligraphic_L start_POSTSUBSCRIPT italic_F italic_A end_POSTSUBSCRIPT), category center alignment (ℒC⁢Asubscriptℒ𝐶𝐴\\mathcal{L}_{CA}caligraphic_L start_POSTSUBSCRIPT italic_C italic_A end_POSTSUBSCRIPT), and category center contrast (ℒC⁢Csubscriptℒ𝐶𝐶\\mathcal{L}_{CC}caligraphic_L start_POSTSUBSCRIPT italic_C italic_C end_POSTSUBSCRIPT).\nThen our preview strategy can assign dynamic weights to each sample based on its difficulty score.", "qtype": "Experimental_Results" @@ -11668,9 +11668,9 @@ "Denoising Autoencoder", "Convolutional Neural Network" ], - "figure_path": "./MISSQA/figures/0_2411.08082v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.08082v1_figure_1.png", "paperid": "2411.08082v1", - "paper_path": "./MISSQA/papers/2411.08082v1.json", + "paper_path": "./MISS-QA/papers/2411.08082v1.json", "figure_id": "2411.08082v1_figure_1.png", "caption": "Figure 1: SERS deep learning framework development pipeline. Illustrated are the SERS measurement process applied (A), and the computational framework pipeline. Benchmark comparisons of alternative methodology are presented on the right. Preprocessing methods (B) are marked in orange and light red, quantification methods (C) in blue, and explainability methods (D) in dark red. Asymmetric least squares (ALS) baselining is applied to all spectra prior to assessing the framework or the benchmarks. SERS = surface-enhanced raman spectroscopy, AuNP = gold nanoparticle, CB[8] = cucurbit[8]uril, CNN = convolutional neural network, XGBoost = extreme gradient boosting trees, PLSR = partial least squares regression, SVM = support vector machines, CRIME = context representative interpretable model explanations, LEN = logic explained networks, SHAP = Shapley additive explanations.", "qtype": "Experimental_Results" @@ -11699,9 +11699,9 @@ "masked_elements": [ "CRIME" ], - "figure_path": "./MISSQA/figures/1_2411.08082v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.08082v1_figure_1.png", "paperid": "2411.08082v1", - "paper_path": "./MISSQA/papers/2411.08082v1.json", + "paper_path": "./MISS-QA/papers/2411.08082v1.json", "figure_id": "2411.08082v1_figure_1.png", "caption": "Figure 1: SERS deep learning framework development pipeline. Illustrated are the SERS measurement process applied (A), and the computational framework pipeline. Benchmark comparisons of alternative methodology are presented on the right. Preprocessing methods (B) are marked in orange and light red, quantification methods (C) in blue, and explainability methods (D) in dark red. Asymmetric least squares (ALS) baselining is applied to all spectra prior to assessing the framework or the benchmarks. SERS = surface-enhanced raman spectroscopy, AuNP = gold nanoparticle, CB[8] = cucurbit[8]uril, CNN = convolutional neural network, XGBoost = extreme gradient boosting trees, PLSR = partial least squares regression, SVM = support vector machines, CRIME = context representative interpretable model explanations, LEN = logic explained networks, SHAP = Shapley additive explanations.", "qtype": "Experimental_Results" @@ -11720,9 +11720,9 @@ "masked_elements": [ "Denoising Autoencoder" ], - "figure_path": "./MISSQA/figures/2_2411.08082v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.08082v1_figure_1.png", "paperid": "2411.08082v1", - "paper_path": "./MISSQA/papers/2411.08082v1.json", + "paper_path": "./MISS-QA/papers/2411.08082v1.json", "figure_id": "2411.08082v1_figure_1.png", "caption": "Figure 1: SERS deep learning framework development pipeline. Illustrated are the SERS measurement process applied (A), and the computational framework pipeline. Benchmark comparisons of alternative methodology are presented on the right. Preprocessing methods (B) are marked in orange and light red, quantification methods (C) in blue, and explainability methods (D) in dark red. Asymmetric least squares (ALS) baselining is applied to all spectra prior to assessing the framework or the benchmarks. SERS = surface-enhanced raman spectroscopy, AuNP = gold nanoparticle, CB[8] = cucurbit[8]uril, CNN = convolutional neural network, XGBoost = extreme gradient boosting trees, PLSR = partial least squares regression, SVM = support vector machines, CRIME = context representative interpretable model explanations, LEN = logic explained networks, SHAP = Shapley additive explanations.", "qtype": "Others" @@ -11741,9 +11741,9 @@ "masked_elements": [ "CRIME" ], - "figure_path": "./MISSQA/figures/3_2411.08082v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.08082v1_figure_1.png", "paperid": "2411.08082v1", - "paper_path": "./MISSQA/papers/2411.08082v1.json", + "paper_path": "./MISS-QA/papers/2411.08082v1.json", "figure_id": "2411.08082v1_figure_1.png", "caption": "Figure 1: SERS deep learning framework development pipeline. Illustrated are the SERS measurement process applied (A), and the computational framework pipeline. Benchmark comparisons of alternative methodology are presented on the right. Preprocessing methods (B) are marked in orange and light red, quantification methods (C) in blue, and explainability methods (D) in dark red. Asymmetric least squares (ALS) baselining is applied to all spectra prior to assessing the framework or the benchmarks. SERS = surface-enhanced raman spectroscopy, AuNP = gold nanoparticle, CB[8] = cucurbit[8]uril, CNN = convolutional neural network, XGBoost = extreme gradient boosting trees, PLSR = partial least squares regression, SVM = support vector machines, CRIME = context representative interpretable model explanations, LEN = logic explained networks, SHAP = Shapley additive explanations.", "qtype": "Others" @@ -11762,9 +11762,9 @@ "masked_elements": [ "Secure Input Formatting" ], - "figure_path": "./MISSQA/figures/0_2410.21492v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.21492v2_figure_1.png", "paperid": "2410.21492v2", - "paper_path": "./MISSQA/papers/2410.21492v2.json", + "paper_path": "./MISS-QA/papers/2410.21492v2.json", "figure_id": "2410.21492v2_figure_1.png", "caption": "Figure 1: An illustration of Formatting Authentication with Hash-based Tags.", "qtype": "Others" @@ -11783,9 +11783,9 @@ "masked_elements": [ "Authentication Verification" ], - "figure_path": "./MISSQA/figures/1_2410.21492v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.21492v2_figure_1.png", "paperid": "2410.21492v2", - "paper_path": "./MISSQA/papers/2410.21492v2.json", + "paper_path": "./MISS-QA/papers/2410.21492v2.json", "figure_id": "2410.21492v2_figure_1.png", "caption": "Figure 1: An illustration of Formatting Authentication with Hash-based Tags.", "qtype": "Others" @@ -11811,9 +11811,9 @@ "masked_elements": [ "secure input formatting" ], - "figure_path": "./MISSQA/figures/2_2410.21492v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2410.21492v2_figure_1.png", "paperid": "2410.21492v2", - "paper_path": "./MISSQA/papers/2410.21492v2.json", + "paper_path": "./MISS-QA/papers/2410.21492v2.json", "figure_id": "2410.21492v2_figure_1.png", "caption": "Figure 1: An illustration of Formatting Authentication with Hash-based Tags.", "qtype": "Design_Rationale" @@ -11842,9 +11842,9 @@ "reference multi-level feature", "anchor feature" ], - "figure_path": "./MISSQA/figures/0_2411.12787v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.12787v1_figure_1.png", "paperid": "2411.12787v1", - "paper_path": "./MISSQA/papers/2411.12787v1.json", + "paper_path": "./MISS-QA/papers/2411.12787v1.json", "figure_id": "2411.12787v1_figure_1.png", "caption": "Figure 1: The two training stages of our framework. In the Vision Projector Pretraining stage, we incorporate the proposed Visual Cue Enhancement module (VCE) to enhance the final vision feature map by capturing detailed local information from multi-level visual feature maps. In the Visual Instruction Fine-Tuning stage, we use the proposed Dual Low-Rank Adaptation (Dual-LoRA) as the LLM adapter to learn task-specific knowledge while mitigating data conflicts, which is set trainable alone with VCE and vision projector.", "qtype": "Design_Rationale" @@ -11874,9 +11874,9 @@ "LLM", "vision projector" ], - "figure_path": "./MISSQA/figures/1_2411.12787v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.12787v1_figure_1.png", "paperid": "2411.12787v1", - "paper_path": "./MISSQA/papers/2411.12787v1.json", + "paper_path": "./MISS-QA/papers/2411.12787v1.json", "figure_id": "2411.12787v1_figure_1.png", "caption": "Figure 1: The two training stages of our framework. In the Vision Projector Pretraining stage, we incorporate the proposed Visual Cue Enhancement module (VCE) to enhance the final vision feature map by capturing detailed local information from multi-level visual feature maps. In the Visual Instruction Fine-Tuning stage, we use the proposed Dual Low-Rank Adaptation (Dual-LoRA) as the LLM adapter to learn task-specific knowledge while mitigating data conflicts, which is set trainable alone with VCE and vision projector.", "qtype": "Design_Rationale" @@ -11903,9 +11903,9 @@ "VCE", "vision projector" ], - "figure_path": "./MISSQA/figures/2_2411.12787v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.12787v1_figure_1.png", "paperid": "2411.12787v1", - "paper_path": "./MISSQA/papers/2411.12787v1.json", + "paper_path": "./MISS-QA/papers/2411.12787v1.json", "figure_id": "2411.12787v1_figure_1.png", "caption": "Figure 1: The two training stages of our framework. In the Vision Projector Pretraining stage, we incorporate the proposed Visual Cue Enhancement module (VCE) to enhance the final vision feature map by capturing detailed local information from multi-level visual feature maps. In the Visual Instruction Fine-Tuning stage, we use the proposed Dual Low-Rank Adaptation (Dual-LoRA) as the LLM adapter to learn task-specific knowledge while mitigating data conflicts, which is set trainable alone with VCE and vision projector.", "qtype": "Implementation_Details" @@ -11932,9 +11932,9 @@ "masked_elements": [ "LLM" ], - "figure_path": "./MISSQA/figures/3_2411.12787v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.12787v1_figure_1.png", "paperid": "2411.12787v1", - "paper_path": "./MISSQA/papers/2411.12787v1.json", + "paper_path": "./MISS-QA/papers/2411.12787v1.json", "figure_id": "2411.12787v1_figure_1.png", "caption": "Figure 1: The two training stages of our framework. In the Vision Projector Pretraining stage, we incorporate the proposed Visual Cue Enhancement module (VCE) to enhance the final vision feature map by capturing detailed local information from multi-level visual feature maps. In the Visual Instruction Fine-Tuning stage, we use the proposed Dual Low-Rank Adaptation (Dual-LoRA) as the LLM adapter to learn task-specific knowledge while mitigating data conflicts, which is set trainable alone with VCE and vision projector.", "qtype": "Implementation_Details" @@ -11960,9 +11960,9 @@ "masked_elements": [ "ControlNet" ], - "figure_path": "./MISSQA/figures/0_2410.07753v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.07753v2_figure_2.png", "paperid": "2410.07753v2", - "paper_path": "./MISSQA/papers/2410.07753v2.json", + "paper_path": "./MISS-QA/papers/2410.07753v2.json", "figure_id": "2410.07753v2_figure_2.png", "caption": "Figure 2: Overview of the diffusion approach to generate a multi-class dataset. Stage-1111 involves training the SD inpainting model using the real images and masks for each organ separately. In stage-2222, pre-trained ControlNet is plugged into the SSI model (SSI-CN) to precisely generate each anatomical structure using extracted edges from the segmentation mask. The image composition in stage-3333 includes cutting out each organ from the generated image and combining them together to form the multi-class surgical dataset. Stage-4444 (optional) includes an image refinement process using SDEdit [36] to rectify inconsistencies during the composition operation and generate the multi-class images. We skip stage-1111 for the simulated masks and start directly with the inference stages to generate the synthetic datasets.", "qtype": "Implementation_Details" @@ -11986,9 +11986,9 @@ "masked_elements": [ "image composition" ], - "figure_path": "./MISSQA/figures/1_2410.07753v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.07753v2_figure_2.png", "paperid": "2410.07753v2", - "paper_path": "./MISSQA/papers/2410.07753v2.json", + "paper_path": "./MISS-QA/papers/2410.07753v2.json", "figure_id": "2410.07753v2_figure_2.png", "caption": "Figure 2: Overview of the diffusion approach to generate a multi-class dataset. Stage-1111 involves training the SD inpainting model using the real images and masks for each organ separately. In stage-2222, pre-trained ControlNet is plugged into the SSI model (SSI-CN) to precisely generate each anatomical structure using extracted edges from the segmentation mask. The image composition in stage-3333 includes cutting out each organ from the generated image and combining them together to form the multi-class surgical dataset. Stage-4444 (optional) includes an image refinement process using SDEdit [36] to rectify inconsistencies during the composition operation and generate the multi-class images. We skip stage-1111 for the simulated masks and start directly with the inference stages to generate the synthetic datasets.", "qtype": "Implementation_Details" @@ -12015,9 +12015,9 @@ "SSI", "ControlNet" ], - "figure_path": "./MISSQA/figures/2_2410.07753v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.07753v2_figure_2.png", "paperid": "2410.07753v2", - "paper_path": "./MISSQA/papers/2410.07753v2.json", + "paper_path": "./MISS-QA/papers/2410.07753v2.json", "figure_id": "2410.07753v2_figure_2.png", "caption": "Figure 2: Overview of the diffusion approach to generate a multi-class dataset. Stage-1111 involves training the SD inpainting model using the real images and masks for each organ separately. In stage-2222, pre-trained ControlNet is plugged into the SSI model (SSI-CN) to precisely generate each anatomical structure using extracted edges from the segmentation mask. The image composition in stage-3333 includes cutting out each organ from the generated image and combining them together to form the multi-class surgical dataset. Stage-4444 (optional) includes an image refinement process using SDEdit [36] to rectify inconsistencies during the composition operation and generate the multi-class images. We skip stage-1111 for the simulated masks and start directly with the inference stages to generate the synthetic datasets.", "qtype": "Literature_Background" @@ -12043,9 +12043,9 @@ "masked_elements": [ "Image enhancement" ], - "figure_path": "./MISSQA/figures/3_2410.07753v2_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.07753v2_figure_2.png", "paperid": "2410.07753v2", - "paper_path": "./MISSQA/papers/2410.07753v2.json", + "paper_path": "./MISS-QA/papers/2410.07753v2.json", "figure_id": "2410.07753v2_figure_2.png", "caption": "Figure 2: Overview of the diffusion approach to generate a multi-class dataset. Stage-1111 involves training the SD inpainting model using the real images and masks for each organ separately. In stage-2222, pre-trained ControlNet is plugged into the SSI model (SSI-CN) to precisely generate each anatomical structure using extracted edges from the segmentation mask. The image composition in stage-3333 includes cutting out each organ from the generated image and combining them together to form the multi-class surgical dataset. Stage-4444 (optional) includes an image refinement process using SDEdit [36] to rectify inconsistencies during the composition operation and generate the multi-class images. We skip stage-1111 for the simulated masks and start directly with the inference stages to generate the synthetic datasets.", "qtype": "Literature_Background" @@ -12066,9 +12066,9 @@ "Code Verification Judge", "Simulation" ], - "figure_path": "./MISSQA/figures/0_2411.02317v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.02317v1_figure_1.png", "paperid": "2411.02317v1", - "paper_path": "./MISSQA/papers/2411.02317v1.json", + "paper_path": "./MISS-QA/papers/2411.02317v1.json", "figure_id": "2411.02317v1_figure_1.png", "caption": "Figure 1: Benchmarking LLM Physical Safety in Drone Control: Threats, Process, Datasets, and Results. Top: This figure categorizes safety threats for drones, including attacks on humans, objects, infrastructure, and FAA regulations, highlighting the process by which an LLM-controlled drone could be used to carry out physical harm and damage. Mid: Flowchart illustrating the benchmark process for drone control. Given a specific LLM, the evaluation process involves providing a test prompt and categorizing the output based on whether code is included in the response. The output is then assessed by two AI judges. The code is further tested in a simulation environment to detect potential collision risks, contributing to the final safety evaluation for the model. Bottom left: Composition of the benchmark’s evaluation datasets, which are primarily categorized into four main types: deliberate attacks, unintentional attacks, violation attacks, and utility. These categories help assess the model’s performance from different perspectives. Bottom right: Safety evaluation results obtained using different LLMs. It is observed that LLMs with higher scores in utility and code fidelity (indicating better control over the drone using code) tend to show higher safety risks than others. The safety metrics are formally defined in Materials and Methods. Please refer to the supplementary file for the video demo.", "qtype": "Literature_Background" @@ -12089,9 +12089,9 @@ "Instruction from benchmark", "Safety Evaluation Judge" ], - "figure_path": "./MISSQA/figures/1_2411.02317v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.02317v1_figure_1.png", "paperid": "2411.02317v1", - "paper_path": "./MISSQA/papers/2411.02317v1.json", + "paper_path": "./MISS-QA/papers/2411.02317v1.json", "figure_id": "2411.02317v1_figure_1.png", "caption": "Figure 1: Benchmarking LLM Physical Safety in Drone Control: Threats, Process, Datasets, and Results. Top: This figure categorizes safety threats for drones, including attacks on humans, objects, infrastructure, and FAA regulations, highlighting the process by which an LLM-controlled drone could be used to carry out physical harm and damage. Mid: Flowchart illustrating the benchmark process for drone control. Given a specific LLM, the evaluation process involves providing a test prompt and categorizing the output based on whether code is included in the response. The output is then assessed by two AI judges. The code is further tested in a simulation environment to detect potential collision risks, contributing to the final safety evaluation for the model. Bottom left: Composition of the benchmark’s evaluation datasets, which are primarily categorized into four main types: deliberate attacks, unintentional attacks, violation attacks, and utility. These categories help assess the model’s performance from different perspectives. Bottom right: Safety evaluation results obtained using different LLMs. It is observed that LLMs with higher scores in utility and code fidelity (indicating better control over the drone using code) tend to show higher safety risks than others. The safety metrics are formally defined in Materials and Methods. Please refer to the supplementary file for the video demo.", "qtype": "Literature_Background" @@ -12117,9 +12117,9 @@ "masked_elements": [ "Response with code" ], - "figure_path": "./MISSQA/figures/2_2411.02317v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.02317v1_figure_1.png", "paperid": "2411.02317v1", - "paper_path": "./MISSQA/papers/2411.02317v1.json", + "paper_path": "./MISS-QA/papers/2411.02317v1.json", "figure_id": "2411.02317v1_figure_1.png", "caption": "Figure 1: Benchmarking LLM Physical Safety in Drone Control: Threats, Process, Datasets, and Results. Top: This figure categorizes safety threats for drones, including attacks on humans, objects, infrastructure, and FAA regulations, highlighting the process by which an LLM-controlled drone could be used to carry out physical harm and damage. Mid: Flowchart illustrating the benchmark process for drone control. Given a specific LLM, the evaluation process involves providing a test prompt and categorizing the output based on whether code is included in the response. The output is then assessed by two AI judges. The code is further tested in a simulation environment to detect potential collision risks, contributing to the final safety evaluation for the model. Bottom left: Composition of the benchmark’s evaluation datasets, which are primarily categorized into four main types: deliberate attacks, unintentional attacks, violation attacks, and utility. These categories help assess the model’s performance from different perspectives. Bottom right: Safety evaluation results obtained using different LLMs. It is observed that LLMs with higher scores in utility and code fidelity (indicating better control over the drone using code) tend to show higher safety risks than others. The safety metrics are formally defined in Materials and Methods. Please refer to the supplementary file for the video demo.", "qtype": "Experimental_Results" @@ -12145,9 +12145,9 @@ "Simulation", "Safety Evaluation Judge" ], - "figure_path": "./MISSQA/figures/3_2411.02317v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.02317v1_figure_1.png", "paperid": "2411.02317v1", - "paper_path": "./MISSQA/papers/2411.02317v1.json", + "paper_path": "./MISS-QA/papers/2411.02317v1.json", "figure_id": "2411.02317v1_figure_1.png", "caption": "Figure 1: Benchmarking LLM Physical Safety in Drone Control: Threats, Process, Datasets, and Results. Top: This figure categorizes safety threats for drones, including attacks on humans, objects, infrastructure, and FAA regulations, highlighting the process by which an LLM-controlled drone could be used to carry out physical harm and damage. Mid: Flowchart illustrating the benchmark process for drone control. Given a specific LLM, the evaluation process involves providing a test prompt and categorizing the output based on whether code is included in the response. The output is then assessed by two AI judges. The code is further tested in a simulation environment to detect potential collision risks, contributing to the final safety evaluation for the model. Bottom left: Composition of the benchmark’s evaluation datasets, which are primarily categorized into four main types: deliberate attacks, unintentional attacks, violation attacks, and utility. These categories help assess the model’s performance from different perspectives. Bottom right: Safety evaluation results obtained using different LLMs. It is observed that LLMs with higher scores in utility and code fidelity (indicating better control over the drone using code) tend to show higher safety risks than others. The safety metrics are formally defined in Materials and Methods. Please refer to the supplementary file for the video demo.", "qtype": "Experimental_Results" @@ -12174,9 +12174,9 @@ "joint tokens", "Temporal Transformer" ], - "figure_path": "./MISSQA/figures/0_2410.16337v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.16337v1_figure_2.png", "paperid": "2410.16337v1", - "paper_path": "./MISSQA/papers/2410.16337v1.json", + "paper_path": "./MISS-QA/papers/2410.16337v1.json", "figure_id": "2410.16337v1_figure_2.png", "caption": "Figure 2: Our method comprises two pivotal modules: (1) a Spatial Transformer (S-Trans) for normal map prediction and (2) a Temporal Transformer (T-Trans) for temporal information extraction from sequences of normal maps. Different procedures are employed for visible (orange box) and invisible (green box) point, with the primary distinction lying in the normal map prediction module. Two S-Trans with identical structures are employed to separately predict the front and back maps. The front-normal S-Trans uses the image as input, while the back-normal S-Trans utilizes the front normal map. The T-Trans utilizes the same module across both prediction processes. The Joint Tokens are employed to guide the correspondence between adjacent frames in network learning. The output joint positions serve as a supervisory signal for the network. In addition, to enhance the network’s learning in ambiguous areas, we introduced random mask during the training process in the second stage. Finally, the input of the implicit function consists of 2D features (Normal map), 3D features (SDF), and temporal features (T-Trans).", "qtype": "Experimental_Results" @@ -12202,9 +12202,9 @@ "random mask", "Implicit Function" ], - "figure_path": "./MISSQA/figures/1_2410.16337v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.16337v1_figure_2.png", "paperid": "2410.16337v1", - "paper_path": "./MISSQA/papers/2410.16337v1.json", + "paper_path": "./MISS-QA/papers/2410.16337v1.json", "figure_id": "2410.16337v1_figure_2.png", "caption": "Figure 2: Our method comprises two pivotal modules: (1) a Spatial Transformer (S-Trans) for normal map prediction and (2) a Temporal Transformer (T-Trans) for temporal information extraction from sequences of normal maps. Different procedures are employed for visible (orange box) and invisible (green box) point, with the primary distinction lying in the normal map prediction module. Two S-Trans with identical structures are employed to separately predict the front and back maps. The front-normal S-Trans uses the image as input, while the back-normal S-Trans utilizes the front normal map. The T-Trans utilizes the same module across both prediction processes. The Joint Tokens are employed to guide the correspondence between adjacent frames in network learning. The output joint positions serve as a supervisory signal for the network. In addition, to enhance the network’s learning in ambiguous areas, we introduced random mask during the training process in the second stage. Finally, the input of the implicit function consists of 2D features (Normal map), 3D features (SDF), and temporal features (T-Trans).", "qtype": "Experimental_Results" @@ -12229,9 +12229,9 @@ "masked_elements": [ "Spatial Transformer" ], - "figure_path": "./MISSQA/figures/2_2410.16337v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.16337v1_figure_2.png", "paperid": "2410.16337v1", - "paper_path": "./MISSQA/papers/2410.16337v1.json", + "paper_path": "./MISS-QA/papers/2410.16337v1.json", "figure_id": "2410.16337v1_figure_2.png", "caption": "Figure 2: Our method comprises two pivotal modules: (1) a Spatial Transformer (S-Trans) for normal map prediction and (2) a Temporal Transformer (T-Trans) for temporal information extraction from sequences of normal maps. Different procedures are employed for visible (orange box) and invisible (green box) point, with the primary distinction lying in the normal map prediction module. Two S-Trans with identical structures are employed to separately predict the front and back maps. The front-normal S-Trans uses the image as input, while the back-normal S-Trans utilizes the front normal map. The T-Trans utilizes the same module across both prediction processes. The Joint Tokens are employed to guide the correspondence between adjacent frames in network learning. The output joint positions serve as a supervisory signal for the network. In addition, to enhance the network’s learning in ambiguous areas, we introduced random mask during the training process in the second stage. Finally, the input of the implicit function consists of 2D features (Normal map), 3D features (SDF), and temporal features (T-Trans).", "qtype": "Others" @@ -12251,9 +12251,9 @@ "masked_elements": [ "Joint Tokens" ], - "figure_path": "./MISSQA/figures/3_2410.16337v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2410.16337v1_figure_2.png", "paperid": "2410.16337v1", - "paper_path": "./MISSQA/papers/2410.16337v1.json", + "paper_path": "./MISS-QA/papers/2410.16337v1.json", "figure_id": "2410.16337v1_figure_2.png", "caption": "Figure 2: Our method comprises two pivotal modules: (1) a Spatial Transformer (S-Trans) for normal map prediction and (2) a Temporal Transformer (T-Trans) for temporal information extraction from sequences of normal maps. Different procedures are employed for visible (orange box) and invisible (green box) point, with the primary distinction lying in the normal map prediction module. Two S-Trans with identical structures are employed to separately predict the front and back maps. The front-normal S-Trans uses the image as input, while the back-normal S-Trans utilizes the front normal map. The T-Trans utilizes the same module across both prediction processes. The Joint Tokens are employed to guide the correspondence between adjacent frames in network learning. The output joint positions serve as a supervisory signal for the network. In addition, to enhance the network’s learning in ambiguous areas, we introduced random mask during the training process in the second stage. Finally, the input of the implicit function consists of 2D features (Normal map), 3D features (SDF), and temporal features (T-Trans).", "qtype": "Others" @@ -12274,9 +12274,9 @@ "Specialist learning", "Pre-trained T2I model (SD)" ], - "figure_path": "./MISSQA/figures/0_2411.07199v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.07199v1_figure_2.png", "paperid": "2411.07199v1", - "paper_path": "./MISSQA/papers/2411.07199v1.json", + "paper_path": "./MISS-QA/papers/2411.07199v1.json", "figure_id": "2411.07199v1_figure_2.png", "caption": "Figure 2: Overview of the Omni-Edit training pipeline.", "qtype": "Others" @@ -12295,9 +12295,9 @@ "masked_elements": [ "Confidence Scoring function" ], - "figure_path": "./MISSQA/figures/1_2411.07199v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.07199v1_figure_2.png", "paperid": "2411.07199v1", - "paper_path": "./MISSQA/papers/2411.07199v1.json", + "paper_path": "./MISS-QA/papers/2411.07199v1.json", "figure_id": "2411.07199v1_figure_2.png", "caption": "Figure 2: Overview of the Omni-Edit training pipeline.", "qtype": "Others" @@ -12324,9 +12324,9 @@ "specialist models", "Omni-Edit" ], - "figure_path": "./MISSQA/figures/2_2411.07199v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.07199v1_figure_2.png", "paperid": "2411.07199v1", - "paper_path": "./MISSQA/papers/2411.07199v1.json", + "paper_path": "./MISS-QA/papers/2411.07199v1.json", "figure_id": "2411.07199v1_figure_2.png", "caption": "Figure 2: Overview of the Omni-Edit training pipeline.", "qtype": "Design_Rationale" @@ -12352,9 +12352,9 @@ "masked_elements": [ "FRCAB" ], - "figure_path": "./MISSQA/figures/0_2411.18409v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.18409v1_figure_1.png", "paperid": "2411.18409v1", - "paper_path": "./MISSQA/papers/2411.18409v1.json", + "paper_path": "./MISS-QA/papers/2411.18409v1.json", "figure_id": "2411.18409v1_figure_1.png", "caption": "Figure 1: The framework of our DFENet, which consists of: (a) a RGB and thermal stream encoder; (b) Modal-coordinated perception attention (MPA); (c) an edge-guided frequency pyramid decoder (EFPD) and Fourier residual channel attention block (FRCAB); and (d) Frequency-decomposed edge-aware module (FEM) and edge frequency extraction block (EFEB).", "qtype": "Implementation_Details" @@ -12379,9 +12379,9 @@ "masked_elements": [ "FEM" ], - "figure_path": "./MISSQA/figures/1_2411.18409v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.18409v1_figure_1.png", "paperid": "2411.18409v1", - "paper_path": "./MISSQA/papers/2411.18409v1.json", + "paper_path": "./MISS-QA/papers/2411.18409v1.json", "figure_id": "2411.18409v1_figure_1.png", "caption": "Figure 1: The framework of our DFENet, which consists of: (a) a RGB and thermal stream encoder; (b) Modal-coordinated perception attention (MPA); (c) an edge-guided frequency pyramid decoder (EFPD) and Fourier residual channel attention block (FRCAB); and (d) Frequency-decomposed edge-aware module (FEM) and edge frequency extraction block (EFEB).", "qtype": "Implementation_Details" @@ -12408,9 +12408,9 @@ "masked_elements": [ "Bayesian-guided Label Mapping" ], - "figure_path": "./MISSQA/figures/0_2410.24018v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2410.24018v1_figure_2.png", "paperid": "2410.24018v1", - "paper_path": "./MISSQA/papers/2410.24018v1.json", + "paper_path": "./MISS-QA/papers/2410.24018v1.json", "figure_id": "2410.24018v1_figure_2.png", "caption": "Figure 2: Learning strategy of BLM and BLM+. First, input images, incorporated with VR watermarking or padding patterns, are fed into a fixed pretrained model to obtain logits and predicted labels.\nThen, the true labels (of yTsuperscript𝑦Ty^{\\rm T}italic_y start_POSTSUPERSCRIPT roman_T end_POSTSUPERSCRIPT) and predicted labels (of ySsuperscript𝑦Sy^{\\rm S}italic_y start_POSTSUPERSCRIPT roman_S end_POSTSUPERSCRIPT) are used to estimate ωBLMsubscript𝜔BLM\\omega_{\\rm BLM}italic_ω start_POSTSUBSCRIPT roman_BLM end_POSTSUBSCRIPT or ωBLM+subscript𝜔subscriptBLM\\omega_{\\rm BLM_{\\rm+}}italic_ω start_POSTSUBSCRIPT roman_BLM start_POSTSUBSCRIPT + end_POSTSUBSCRIPT end_POSTSUBSCRIPT.\nNext, using ωBLMsubscript𝜔BLM\\omega_{\\rm BLM}italic_ω start_POSTSUBSCRIPT roman_BLM end_POSTSUBSCRIPT or ωBLM+subscript𝜔subscriptBLM\\omega_{\\rm BLM_{\\rm+}}italic_ω start_POSTSUBSCRIPT roman_BLM start_POSTSUBSCRIPT + end_POSTSUBSCRIPT end_POSTSUBSCRIPT that reweights output logits of pretrained models for the downstream labels, the predicted results can be derived. Finally, backpropagation is performed to update the input VR.", "qtype": "Implementation_Details" @@ -12438,9 +12438,9 @@ "masked_elements": [ "Input Visual Reprogramming" ], - "figure_path": "./MISSQA/figures/1_2410.24018v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2410.24018v1_figure_2.png", "paperid": "2410.24018v1", - "paper_path": "./MISSQA/papers/2410.24018v1.json", + "paper_path": "./MISS-QA/papers/2410.24018v1.json", "figure_id": "2410.24018v1_figure_2.png", "caption": "Figure 2: Learning strategy of BLM and BLM+. First, input images, incorporated with VR watermarking or padding patterns, are fed into a fixed pretrained model to obtain logits and predicted labels.\nThen, the true labels (of yTsuperscript𝑦Ty^{\\rm T}italic_y start_POSTSUPERSCRIPT roman_T end_POSTSUPERSCRIPT) and predicted labels (of ySsuperscript𝑦Sy^{\\rm S}italic_y start_POSTSUPERSCRIPT roman_S end_POSTSUPERSCRIPT) are used to estimate ωBLMsubscript𝜔BLM\\omega_{\\rm BLM}italic_ω start_POSTSUBSCRIPT roman_BLM end_POSTSUBSCRIPT or ωBLM+subscript𝜔subscriptBLM\\omega_{\\rm BLM_{\\rm+}}italic_ω start_POSTSUBSCRIPT roman_BLM start_POSTSUBSCRIPT + end_POSTSUBSCRIPT end_POSTSUBSCRIPT.\nNext, using ωBLMsubscript𝜔BLM\\omega_{\\rm BLM}italic_ω start_POSTSUBSCRIPT roman_BLM end_POSTSUBSCRIPT or ωBLM+subscript𝜔subscriptBLM\\omega_{\\rm BLM_{\\rm+}}italic_ω start_POSTSUBSCRIPT roman_BLM start_POSTSUBSCRIPT + end_POSTSUBSCRIPT end_POSTSUBSCRIPT that reweights output logits of pretrained models for the downstream labels, the predicted results can be derived. Finally, backpropagation is performed to update the input VR.", "qtype": "Literature_Background" @@ -12469,9 +12469,9 @@ "Frequency Distribution", "Bayesian-guided Label Mapping" ], - "figure_path": "./MISSQA/figures/2_2410.24018v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2410.24018v1_figure_2.png", "paperid": "2410.24018v1", - "paper_path": "./MISSQA/papers/2410.24018v1.json", + "paper_path": "./MISS-QA/papers/2410.24018v1.json", "figure_id": "2410.24018v1_figure_2.png", "caption": "Figure 2: Learning strategy of BLM and BLM+. First, input images, incorporated with VR watermarking or padding patterns, are fed into a fixed pretrained model to obtain logits and predicted labels.\nThen, the true labels (of yTsuperscript𝑦Ty^{\\rm T}italic_y start_POSTSUPERSCRIPT roman_T end_POSTSUPERSCRIPT) and predicted labels (of ySsuperscript𝑦Sy^{\\rm S}italic_y start_POSTSUPERSCRIPT roman_S end_POSTSUPERSCRIPT) are used to estimate ωBLMsubscript𝜔BLM\\omega_{\\rm BLM}italic_ω start_POSTSUBSCRIPT roman_BLM end_POSTSUBSCRIPT or ωBLM+subscript𝜔subscriptBLM\\omega_{\\rm BLM_{\\rm+}}italic_ω start_POSTSUBSCRIPT roman_BLM start_POSTSUBSCRIPT + end_POSTSUBSCRIPT end_POSTSUBSCRIPT.\nNext, using ωBLMsubscript𝜔BLM\\omega_{\\rm BLM}italic_ω start_POSTSUBSCRIPT roman_BLM end_POSTSUBSCRIPT or ωBLM+subscript𝜔subscriptBLM\\omega_{\\rm BLM_{\\rm+}}italic_ω start_POSTSUBSCRIPT roman_BLM start_POSTSUBSCRIPT + end_POSTSUBSCRIPT end_POSTSUBSCRIPT that reweights output logits of pretrained models for the downstream labels, the predicted results can be derived. Finally, backpropagation is performed to update the input VR.", "qtype": "Literature_Background" @@ -12497,9 +12497,9 @@ "masked_elements": [ "Extracting Facts from Wikidata" ], - "figure_path": "./MISSQA/figures/0_2408.12076v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.12076v1_figure_1.png", "paperid": "2408.12076v1", - "paper_path": "./MISSQA/papers/2408.12076v1.json", + "paper_path": "./MISS-QA/papers/2408.12076v1.json", "figure_id": "2408.12076v1_figure_1.png", "caption": "Figure 1: The pipeline of ConflictBank construction. (1) We extract facts from Wikidata and (2) transform them into conflict claims based on different causes, then (3) employ LLM to generate evidence in three text styles, and finally (4) apply three processes to control data quality: feature filtering, fact-evidence entailment checking, and conflict confirmation between evidence.", "qtype": "Literature_Background" @@ -12524,9 +12524,9 @@ "masked_elements": [ "Text2Param" ], - "figure_path": "./MISSQA/figures/0_2411.04788v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.04788v1_figure_1.png", "paperid": "2411.04788v1", - "paper_path": "./MISSQA/papers/2411.04788v1.json", + "paper_path": "./MISS-QA/papers/2411.04788v1.json", "figure_id": "2411.04788v1_figure_1.png", "caption": "Figure 1. Overview of proposed multi-agent collaboration framework with unified RAG & tool function calling.", "qtype": "Experimental_Results" @@ -12554,9 +12554,9 @@ "Single", "Dual" ], - "figure_path": "./MISSQA/figures/1_2411.04788v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.04788v1_figure_1.png", "paperid": "2411.04788v1", - "paper_path": "./MISSQA/papers/2411.04788v1.json", + "paper_path": "./MISS-QA/papers/2411.04788v1.json", "figure_id": "2411.04788v1_figure_1.png", "caption": "Figure 1. Overview of proposed multi-agent collaboration framework with unified RAG & tool function calling.", "qtype": "Experimental_Results" @@ -12577,9 +12577,9 @@ "retrieve_content", "Text2Param" ], - "figure_path": "./MISSQA/figures/2_2411.04788v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.04788v1_figure_1.png", "paperid": "2411.04788v1", - "paper_path": "./MISSQA/papers/2411.04788v1.json", + "paper_path": "./MISS-QA/papers/2411.04788v1.json", "figure_id": "2411.04788v1_figure_1.png", "caption": "Figure 1. Overview of proposed multi-agent collaboration framework with unified RAG & tool function calling.", "qtype": "Others" @@ -12600,9 +12600,9 @@ "transportation cost", "classifier consolidation" ], - "figure_path": "./MISSQA/figures/0_2410.00911v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2410.00911v1_figure_1.png", "paperid": "2410.00911v1", - "paper_path": "./MISSQA/papers/2410.00911v1.json", + "paper_path": "./MISS-QA/papers/2410.00911v1.json", "figure_id": "2410.00911v1_figure_1.png", "caption": "Figure 1: Illustration of Duct.\nTop: Representation consolidation. We utilize the pre-trained model as initialization and optimize it for each domain, obtaining the task vectors. Afterward, we combine the pre-trained model and all seen task vectors to build the unified embedding space. Bottom: Classifier consolidation. To align the classifiers with consolidated features, we design the new classifier retraining and old classifier transport to consolidate classifiers. Class-wise semantic information is utilized in classifier transport.", "qtype": "Others" @@ -12629,9 +12629,9 @@ "masked_elements": [ "transportation cost" ], - "figure_path": "./MISSQA/figures/1_2410.00911v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2410.00911v1_figure_1.png", "paperid": "2410.00911v1", - "paper_path": "./MISSQA/papers/2410.00911v1.json", + "paper_path": "./MISS-QA/papers/2410.00911v1.json", "figure_id": "2410.00911v1_figure_1.png", "caption": "Figure 1: Illustration of Duct.\nTop: Representation consolidation. We utilize the pre-trained model as initialization and optimize it for each domain, obtaining the task vectors. Afterward, we combine the pre-trained model and all seen task vectors to build the unified embedding space. Bottom: Classifier consolidation. To align the classifiers with consolidated features, we design the new classifier retraining and old classifier transport to consolidate classifiers. Class-wise semantic information is utilized in classifier transport.", "qtype": "Design_Rationale" @@ -12658,9 +12658,9 @@ "masked_elements": [ "pseudo open-set sampling" ], - "figure_path": "./MISSQA/figures/0_2411.09986v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.09986v1_figure_2.png", "paperid": "2411.09986v1", - "paper_path": "./MISSQA/papers/2411.09986v1.json", + "paper_path": "./MISS-QA/papers/2411.09986v1.json", "figure_id": "2411.09986v1_figure_2.png", "caption": "Figure 2: \nOverall training framework of OAL-OFL.\n(a) In Stage 1, the feature encoder and a learnable open-set prototype undergo distance-based meta-learning [37] with an additional class representing the open set. (b) In Stage 2, feature encoder and prototypes are further transfer-learned to the target task under an open-set-free condition. Open-set training examples can be alternatively drawn from the base training dataset (green) or from a subset of the closed-set categories that is randomly selected as a pseudo open set (purple).", "qtype": "Implementation_Details" @@ -12689,9 +12689,9 @@ "MLP", "Segmentation-Aware 3D Features" ], - "figure_path": "./MISSQA/figures/0_2411.07184v2_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.07184v2_figure_2.png", "paperid": "2411.07184v2", - "paper_path": "./MISSQA/papers/2411.07184v2.json", + "paper_path": "./MISS-QA/papers/2411.07184v2.json", "figure_id": "2411.07184v2_figure_2.png", "caption": "Figure 2: An overview pipeline of SAMPart3D. (a) We first pre-train 3D backbone PTv3-object on 3D large-scale data Objaverse, distilling visual features from FeatUp-DINOv2. (b) Next, we train light-weight MLPs to distill 2D masks to scale-conditioned grouping. (c) Finally, we cluster the feature of point clouds and highlight the consistent 2D part area with 2D-3D mapping on multi-view renderings, and then query semantics from MLLMs.", "qtype": "Implementation_Details" @@ -12720,9 +12720,9 @@ "DINOv2", "PTv3-object" ], - "figure_path": "./MISSQA/figures/1_2411.07184v2_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.07184v2_figure_2.png", "paperid": "2411.07184v2", - "paper_path": "./MISSQA/papers/2411.07184v2.json", + "paper_path": "./MISS-QA/papers/2411.07184v2.json", "figure_id": "2411.07184v2_figure_2.png", "caption": "Figure 2: An overview pipeline of SAMPart3D. (a) We first pre-train 3D backbone PTv3-object on 3D large-scale data Objaverse, distilling visual features from FeatUp-DINOv2. (b) Next, we train light-weight MLPs to distill 2D masks to scale-conditioned grouping. (c) Finally, we cluster the feature of point clouds and highlight the consistent 2D part area with 2D-3D mapping on multi-view renderings, and then query semantics from MLLMs.", "qtype": "Literature_Background" @@ -12749,9 +12749,9 @@ "MLP", "contrastive learning" ], - "figure_path": "./MISSQA/figures/2_2411.07184v2_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2411.07184v2_figure_2.png", "paperid": "2411.07184v2", - "paper_path": "./MISSQA/papers/2411.07184v2.json", + "paper_path": "./MISS-QA/papers/2411.07184v2.json", "figure_id": "2411.07184v2_figure_2.png", "caption": "Figure 2: An overview pipeline of SAMPart3D. (a) We first pre-train 3D backbone PTv3-object on 3D large-scale data Objaverse, distilling visual features from FeatUp-DINOv2. (b) Next, we train light-weight MLPs to distill 2D masks to scale-conditioned grouping. (c) Finally, we cluster the feature of point clouds and highlight the consistent 2D part area with 2D-3D mapping on multi-view renderings, and then query semantics from MLLMs.", "qtype": "Literature_Background" @@ -12776,9 +12776,9 @@ "masked_elements": [ "Data Synthesis" ], - "figure_path": "./MISSQA/figures/0_2408.13467v2_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2408.13467v2_figure_1.png", "paperid": "2408.13467v2", - "paper_path": "./MISSQA/papers/2408.13467v2.json", + "paper_path": "./MISS-QA/papers/2408.13467v2.json", "figure_id": "2408.13467v2_figure_1.png", "caption": "Figure 1: The LLMOps pipeline namely LlamaDuo for migrating from service LLMs to small-scale local LLMs involves three phases. In the Development/PoC phase, ① users manually engineer prompts to interact with service LLMs and ② collect satisfying (prompt, response) pairs into train and test datasets. In the Alignment phase, ③ local LLMs are aligned with the train dataset, ④ tested on the test dataset, and ⑤ evaluated by service LLMs. ⑥ Synthetic data is generated iteratively until the performance of the aligned model meets a threshold.\nIn the Deployment phase, ⑦ the satisfactory model is deployed in constrained environments.", "qtype": "Literature_Background" @@ -12803,9 +12803,9 @@ "masked_elements": [ "Evaluation" ], - "figure_path": "./MISSQA/figures/1_2408.13467v2_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2408.13467v2_figure_1.png", "paperid": "2408.13467v2", - "paper_path": "./MISSQA/papers/2408.13467v2.json", + "paper_path": "./MISS-QA/papers/2408.13467v2.json", "figure_id": "2408.13467v2_figure_1.png", "caption": "Figure 1: The LLMOps pipeline namely LlamaDuo for migrating from service LLMs to small-scale local LLMs involves three phases. In the Development/PoC phase, ① users manually engineer prompts to interact with service LLMs and ② collect satisfying (prompt, response) pairs into train and test datasets. In the Alignment phase, ③ local LLMs are aligned with the train dataset, ④ tested on the test dataset, and ⑤ evaluated by service LLMs. ⑥ Synthetic data is generated iteratively until the performance of the aligned model meets a threshold.\nIn the Deployment phase, ⑦ the satisfactory model is deployed in constrained environments.", "qtype": "Literature_Background" @@ -12830,9 +12830,9 @@ "masked_elements": [ "Data Synthesis" ], - "figure_path": "./MISSQA/figures/2_2408.13467v2_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2408.13467v2_figure_1.png", "paperid": "2408.13467v2", - "paper_path": "./MISSQA/papers/2408.13467v2.json", + "paper_path": "./MISS-QA/papers/2408.13467v2.json", "figure_id": "2408.13467v2_figure_1.png", "caption": "Figure 1: The LLMOps pipeline namely LlamaDuo for migrating from service LLMs to small-scale local LLMs involves three phases. In the Development/PoC phase, ① users manually engineer prompts to interact with service LLMs and ② collect satisfying (prompt, response) pairs into train and test datasets. In the Alignment phase, ③ local LLMs are aligned with the train dataset, ④ tested on the test dataset, and ⑤ evaluated by service LLMs. ⑥ Synthetic data is generated iteratively until the performance of the aligned model meets a threshold.\nIn the Deployment phase, ⑦ the satisfactory model is deployed in constrained environments.", "qtype": "Literature_Background" @@ -12857,9 +12857,9 @@ "masked_elements": [ "Batch Inference" ], - "figure_path": "./MISSQA/figures/3_2408.13467v2_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2408.13467v2_figure_1.png", "paperid": "2408.13467v2", - "paper_path": "./MISSQA/papers/2408.13467v2.json", + "paper_path": "./MISS-QA/papers/2408.13467v2.json", "figure_id": "2408.13467v2_figure_1.png", "caption": "Figure 1: The LLMOps pipeline namely LlamaDuo for migrating from service LLMs to small-scale local LLMs involves three phases. In the Development/PoC phase, ① users manually engineer prompts to interact with service LLMs and ② collect satisfying (prompt, response) pairs into train and test datasets. In the Alignment phase, ③ local LLMs are aligned with the train dataset, ④ tested on the test dataset, and ⑤ evaluated by service LLMs. ⑥ Synthetic data is generated iteratively until the performance of the aligned model meets a threshold.\nIn the Deployment phase, ⑦ the satisfactory model is deployed in constrained environments.", "qtype": "Experimental_Results" @@ -12883,9 +12883,9 @@ "masked_elements": [ "SWF Module" ], - "figure_path": "./MISSQA/figures/1_2409.14827v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2409.14827v1_figure_2.png", "paperid": "2409.14827v1", - "paper_path": "./MISSQA/papers/2409.14827v1.json", + "paper_path": "./MISS-QA/papers/2409.14827v1.json", "figure_id": "2409.14827v1_figure_2.png", "caption": "Figure 2: An overview of the proposed network. SC[25], SE[17], and ShuffleAttn[54] are plug-and-play attention modules. SWF and GA stand for Saliency-Weighted Feature Module and Gated Attention, respectively.", "qtype": "Implementation_Details" @@ -12905,9 +12905,9 @@ "masked_elements": [ "ShuffleAttn" ], - "figure_path": "./MISSQA/figures/2_2409.14827v1_figure_2.png", + "figure_path": "./MISS-QA/figures/2_2409.14827v1_figure_2.png", "paperid": "2409.14827v1", - "paper_path": "./MISSQA/papers/2409.14827v1.json", + "paper_path": "./MISS-QA/papers/2409.14827v1.json", "figure_id": "2409.14827v1_figure_2.png", "caption": "Figure 2: An overview of the proposed network. SC[25], SE[17], and ShuffleAttn[54] are plug-and-play attention modules. SWF and GA stand for Saliency-Weighted Feature Module and Gated Attention, respectively.", "qtype": "Literature_Background" @@ -12933,9 +12933,9 @@ "SWF Module", "SCAM" ], - "figure_path": "./MISSQA/figures/3_2409.14827v1_figure_2.png", + "figure_path": "./MISS-QA/figures/3_2409.14827v1_figure_2.png", "paperid": "2409.14827v1", - "paper_path": "./MISSQA/papers/2409.14827v1.json", + "paper_path": "./MISS-QA/papers/2409.14827v1.json", "figure_id": "2409.14827v1_figure_2.png", "caption": "Figure 2: An overview of the proposed network. SC[25], SE[17], and ShuffleAttn[54] are plug-and-play attention modules. SWF and GA stand for Saliency-Weighted Feature Module and Gated Attention, respectively.", "qtype": "Literature_Background" @@ -12963,9 +12963,9 @@ "CAN", "CGN" ], - "figure_path": "./MISSQA/figures/0_2407.11086v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2407.11086v1_figure_1.png", "paperid": "2407.11086v1", - "paper_path": "./MISSQA/papers/2407.11086v1.json", + "paper_path": "./MISS-QA/papers/2407.11086v1.json", "figure_id": "2407.11086v1_figure_1.png", "caption": "Figure 1: \nOverview of Fractional denoising (Frad). a. An illustration of the molecular conformational changes. The local structures can vibrate in small scale, while some single bonds can rotate flexibly.\nb. The noise-adding process in the Frad framework. A two-phase hybrid random noise is applied to the equilibrium conformation, including the chemical-aware noise (CAN) that describes the molecular conformational changes and coordinate Gaussian noise (CGN). We present two versions of CAN.\nc. Pre-training process of Frad. The unlabeled molecular data is processed by adding noise and then utilized as the input of the graph neural networks to predict the CGN. This task is proved to be equivalent to learning the approximate atomic forces in the molecule.\nd. Fine-tuning process of Frad. The GNN model inherits the pre-trained weights and continues to be updated together with a prediction head for specific downstream tasks.\ne. Advancements of Frad over with coordinate denoising methods (Coord) [28, 33, 30, 31, 29, 32] through the perspective of chemical priors and physical interpretations. The noise of Frad is customizable, enabling capturing both rotations and vibrations in molecular conformation changes. Frad s superior modeling of molecular distribution further enabling larger sampling coverage and more accurate force targets in the equivalent force learning task, resulting in effective pre-training and improved downstream performance.\n\\added[id=2] f. An illustration of model architecture. The model primarily follows the TorchMD-NET framework, with our minor modifications highlighted in dotted orange boxes.", "qtype": "Literature_Background" @@ -12990,9 +12990,9 @@ "masked_elements": [ "update layer" ], - "figure_path": "./MISSQA/figures/1_2407.11086v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2407.11086v1_figure_1.png", "paperid": "2407.11086v1", - "paper_path": "./MISSQA/papers/2407.11086v1.json", + "paper_path": "./MISS-QA/papers/2407.11086v1.json", "figure_id": "2407.11086v1_figure_1.png", "caption": "Figure 1: \nOverview of Fractional denoising (Frad). a. An illustration of the molecular conformational changes. The local structures can vibrate in small scale, while some single bonds can rotate flexibly.\nb. The noise-adding process in the Frad framework. A two-phase hybrid random noise is applied to the equilibrium conformation, including the chemical-aware noise (CAN) that describes the molecular conformational changes and coordinate Gaussian noise (CGN). We present two versions of CAN.\nc. Pre-training process of Frad. The unlabeled molecular data is processed by adding noise and then utilized as the input of the graph neural networks to predict the CGN. This task is proved to be equivalent to learning the approximate atomic forces in the molecule.\nd. Fine-tuning process of Frad. The GNN model inherits the pre-trained weights and continues to be updated together with a prediction head for specific downstream tasks.\ne. Advancements of Frad over with coordinate denoising methods (Coord) [28, 33, 30, 31, 29, 32] through the perspective of chemical priors and physical interpretations. The noise of Frad is customizable, enabling capturing both rotations and vibrations in molecular conformation changes. Frad s superior modeling of molecular distribution further enabling larger sampling coverage and more accurate force targets in the equivalent force learning task, resulting in effective pre-training and improved downstream performance.\n\\added[id=2] f. An illustration of model architecture. The model primarily follows the TorchMD-NET framework, with our minor modifications highlighted in dotted orange boxes.", "qtype": "Literature_Background" @@ -13021,9 +13021,9 @@ "Chemical-Aware Noise (CAN)", "Coordinate Gaussian Noise (CGN)" ], - "figure_path": "./MISSQA/figures/2_2407.11086v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2407.11086v1_figure_1.png", "paperid": "2407.11086v1", - "paper_path": "./MISSQA/papers/2407.11086v1.json", + "paper_path": "./MISS-QA/papers/2407.11086v1.json", "figure_id": "2407.11086v1_figure_1.png", "caption": "Figure 1: \nOverview of Fractional denoising (Frad). a. An illustration of the molecular conformational changes. The local structures can vibrate in small scale, while some single bonds can rotate flexibly.\nb. The noise-adding process in the Frad framework. A two-phase hybrid random noise is applied to the equilibrium conformation, including the chemical-aware noise (CAN) that describes the molecular conformational changes and coordinate Gaussian noise (CGN). We present two versions of CAN.\nc. Pre-training process of Frad. The unlabeled molecular data is processed by adding noise and then utilized as the input of the graph neural networks to predict the CGN. This task is proved to be equivalent to learning the approximate atomic forces in the molecule.\nd. Fine-tuning process of Frad. The GNN model inherits the pre-trained weights and continues to be updated together with a prediction head for specific downstream tasks.\ne. Advancements of Frad over with coordinate denoising methods (Coord) [28, 33, 30, 31, 29, 32] through the perspective of chemical priors and physical interpretations. The noise of Frad is customizable, enabling capturing both rotations and vibrations in molecular conformation changes. Frad s superior modeling of molecular distribution further enabling larger sampling coverage and more accurate force targets in the equivalent force learning task, resulting in effective pre-training and improved downstream performance.\n\\added[id=2] f. An illustration of model architecture. The model primarily follows the TorchMD-NET framework, with our minor modifications highlighted in dotted orange boxes.", "qtype": "Experimental_Results" @@ -13050,9 +13050,9 @@ "masked_elements": [ "Self Attention" ], - "figure_path": "./MISSQA/figures/0_2409.05884v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2409.05884v1_figure_1.png", "paperid": "2409.05884v1", - "paper_path": "./MISSQA/papers/2409.05884v1.json", + "paper_path": "./MISS-QA/papers/2409.05884v1.json", "figure_id": "2409.05884v1_figure_1.png", "caption": "Figure 1: \nIllustration of the proposed load forecasting framework using the Railway dataset.\nPanel a displays the ”expected future”, including timetables,\nschedules and gross ton-kilometers (GTKM) estimates derived\nfrom the operational planning of the railway operator.\nThis information is provided for the next day\nacross all of Switzerland in the Railway-agg dataset\nand on a regional level (e.g. East) in the Railway dataset.\nTraditionally, methods such as pure timeseries forecasting a.1 and\nregression models a.2 were employed for load forecasting.\nOur proposed approach introduces the use of transformer architecture to learn a unified representation\nof the time series regression task (c).\nTo efficiently integrate both past and future information for this task,\nwe propose dividing the input data at the current time point t𝑡titalic_t (the present) (b). We then apply distinct embedding strategies for past data (b.1) and\nfuture contextual information (b.2) .", "qtype": "Experimental_Results" @@ -13081,9 +13081,9 @@ "masked_elements": [ "Cross Attention" ], - "figure_path": "./MISSQA/figures/1_2409.05884v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2409.05884v1_figure_1.png", "paperid": "2409.05884v1", - "paper_path": "./MISSQA/papers/2409.05884v1.json", + "paper_path": "./MISS-QA/papers/2409.05884v1.json", "figure_id": "2409.05884v1_figure_1.png", "caption": "Figure 1: \nIllustration of the proposed load forecasting framework using the Railway dataset.\nPanel a displays the ”expected future”, including timetables,\nschedules and gross ton-kilometers (GTKM) estimates derived\nfrom the operational planning of the railway operator.\nThis information is provided for the next day\nacross all of Switzerland in the Railway-agg dataset\nand on a regional level (e.g. East) in the Railway dataset.\nTraditionally, methods such as pure timeseries forecasting a.1 and\nregression models a.2 were employed for load forecasting.\nOur proposed approach introduces the use of transformer architecture to learn a unified representation\nof the time series regression task (c).\nTo efficiently integrate both past and future information for this task,\nwe propose dividing the input data at the current time point t𝑡titalic_t (the present) (b). We then apply distinct embedding strategies for past data (b.1) and\nfuture contextual information (b.2) .", "qtype": "Experimental_Results" @@ -13104,9 +13104,9 @@ "Non-Causal Attention", "Future Contextual Information" ], - "figure_path": "./MISSQA/figures/2_2409.05884v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2409.05884v1_figure_1.png", "paperid": "2409.05884v1", - "paper_path": "./MISSQA/papers/2409.05884v1.json", + "paper_path": "./MISS-QA/papers/2409.05884v1.json", "figure_id": "2409.05884v1_figure_1.png", "caption": "Figure 1: \nIllustration of the proposed load forecasting framework using the Railway dataset.\nPanel a displays the ”expected future”, including timetables,\nschedules and gross ton-kilometers (GTKM) estimates derived\nfrom the operational planning of the railway operator.\nThis information is provided for the next day\nacross all of Switzerland in the Railway-agg dataset\nand on a regional level (e.g. East) in the Railway dataset.\nTraditionally, methods such as pure timeseries forecasting a.1 and\nregression models a.2 were employed for load forecasting.\nOur proposed approach introduces the use of transformer architecture to learn a unified representation\nof the time series regression task (c).\nTo efficiently integrate both past and future information for this task,\nwe propose dividing the input data at the current time point t𝑡titalic_t (the present) (b). We then apply distinct embedding strategies for past data (b.1) and\nfuture contextual information (b.2) .", "qtype": "Others" @@ -13126,9 +13126,9 @@ "masked_elements": [ "Embedding Strategy" ], - "figure_path": "./MISSQA/figures/3_2409.05884v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2409.05884v1_figure_1.png", "paperid": "2409.05884v1", - "paper_path": "./MISSQA/papers/2409.05884v1.json", + "paper_path": "./MISS-QA/papers/2409.05884v1.json", "figure_id": "2409.05884v1_figure_1.png", "caption": "Figure 1: \nIllustration of the proposed load forecasting framework using the Railway dataset.\nPanel a displays the ”expected future”, including timetables,\nschedules and gross ton-kilometers (GTKM) estimates derived\nfrom the operational planning of the railway operator.\nThis information is provided for the next day\nacross all of Switzerland in the Railway-agg dataset\nand on a regional level (e.g. East) in the Railway dataset.\nTraditionally, methods such as pure timeseries forecasting a.1 and\nregression models a.2 were employed for load forecasting.\nOur proposed approach introduces the use of transformer architecture to learn a unified representation\nof the time series regression task (c).\nTo efficiently integrate both past and future information for this task,\nwe propose dividing the input data at the current time point t𝑡titalic_t (the present) (b). We then apply distinct embedding strategies for past data (b.1) and\nfuture contextual information (b.2) .", "qtype": "Others" @@ -13147,9 +13147,9 @@ "masked_elements": [ "Scribble expansion" ], - "figure_path": "./MISSQA/figures/0_2411.11636v1_figure_2.png", + "figure_path": "./MISS-QA/figures/0_2411.11636v1_figure_2.png", "paperid": "2411.11636v1", - "paper_path": "./MISSQA/papers/2411.11636v1.json", + "paper_path": "./MISS-QA/papers/2411.11636v1.json", "figure_id": "2411.11636v1_figure_2.png", "caption": "Figure 2: a) An overview of the proposed method. b) Illustration of superpixel-based scribble expansion. c) Illustration of pseudo-label refinement with superpixel filtered by dynamic threshold. d) Illustration of superpixel uncertainty assessment.", "qtype": "Others" @@ -13174,9 +13174,9 @@ "dynamic thresholding", "Refined by superpixel" ], - "figure_path": "./MISSQA/figures/1_2411.11636v1_figure_2.png", + "figure_path": "./MISS-QA/figures/1_2411.11636v1_figure_2.png", "paperid": "2411.11636v1", - "paper_path": "./MISSQA/papers/2411.11636v1.json", + "paper_path": "./MISS-QA/papers/2411.11636v1.json", "figure_id": "2411.11636v1_figure_2.png", "caption": "Figure 2: a) An overview of the proposed method. b) Illustration of superpixel-based scribble expansion. c) Illustration of pseudo-label refinement with superpixel filtered by dynamic threshold. d) Illustration of superpixel uncertainty assessment.", "qtype": "Design_Rationale" @@ -13202,9 +13202,9 @@ "Laplacian Smoothing", "Laplacian Sharpening" ], - "figure_path": "./MISSQA/figures/0_2411.02692v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.02692v1_figure_1.png", "paperid": "2411.02692v1", - "paper_path": "./MISSQA/papers/2411.02692v1.json", + "paper_path": "./MISS-QA/papers/2411.02692v1.json", "figure_id": "2411.02692v1_figure_1.png", "caption": "Figure 1. The Model Framework for JPEC", "qtype": "Design_Rationale" @@ -13232,9 +13232,9 @@ "masked_elements": [ "Laplacian Eigenmap" ], - "figure_path": "./MISSQA/figures/1_2411.02692v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.02692v1_figure_1.png", "paperid": "2411.02692v1", - "paper_path": "./MISSQA/papers/2411.02692v1.json", + "paper_path": "./MISS-QA/papers/2411.02692v1.json", "figure_id": "2411.02692v1_figure_1.png", "caption": "Figure 1. The Model Framework for JPEC", "qtype": "Design_Rationale" @@ -13260,9 +13260,9 @@ "Laplacian smoothing", "Supply Chain" ], - "figure_path": "./MISSQA/figures/2_2411.02692v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.02692v1_figure_1.png", "paperid": "2411.02692v1", - "paper_path": "./MISSQA/papers/2411.02692v1.json", + "paper_path": "./MISS-QA/papers/2411.02692v1.json", "figure_id": "2411.02692v1_figure_1.png", "caption": "Figure 1. The Model Framework for JPEC", "qtype": "Implementation_Details" @@ -13281,9 +13281,9 @@ "masked_elements": [ "Doppler Driven Block" ], - "figure_path": "./MISSQA/figures/0_2411.12676v1_figure_1.png", + "figure_path": "./MISS-QA/figures/0_2411.12676v1_figure_1.png", "paperid": "2411.12676v1", - "paper_path": "./MISSQA/papers/2411.12676v1.json", + "paper_path": "./MISS-QA/papers/2411.12676v1.json", "figure_id": "2411.12676v1_figure_1.png", "caption": "Figure 1: Overall flow chart of IE-PONet Model Structure.", "qtype": "Implementation_Details" @@ -13304,9 +13304,9 @@ "C3D Block", "Doppler Driven Block" ], - "figure_path": "./MISSQA/figures/1_2411.12676v1_figure_1.png", + "figure_path": "./MISS-QA/figures/1_2411.12676v1_figure_1.png", "paperid": "2411.12676v1", - "paper_path": "./MISSQA/papers/2411.12676v1.json", + "paper_path": "./MISS-QA/papers/2411.12676v1.json", "figure_id": "2411.12676v1_figure_1.png", "caption": "Figure 1: Overall flow chart of IE-PONet Model Structure.", "qtype": "Implementation_Details" @@ -13326,9 +13326,9 @@ "masked_elements": [ "Doppler Driven Block" ], - "figure_path": "./MISSQA/figures/2_2411.12676v1_figure_1.png", + "figure_path": "./MISS-QA/figures/2_2411.12676v1_figure_1.png", "paperid": "2411.12676v1", - "paper_path": "./MISSQA/papers/2411.12676v1.json", + "paper_path": "./MISS-QA/papers/2411.12676v1.json", "figure_id": "2411.12676v1_figure_1.png", "caption": "Figure 1: Overall flow chart of IE-PONet Model Structure.", "qtype": "Literature_Background" @@ -13349,9 +13349,9 @@ "C3D Block", "2D Pose Extraction Block" ], - "figure_path": "./MISSQA/figures/3_2411.12676v1_figure_1.png", + "figure_path": "./MISS-QA/figures/3_2411.12676v1_figure_1.png", "paperid": "2411.12676v1", - "paper_path": "./MISSQA/papers/2411.12676v1.json", + "paper_path": "./MISS-QA/papers/2411.12676v1.json", "figure_id": "2411.12676v1_figure_1.png", "caption": "Figure 1: Overall flow chart of IE-PONet Model Structure.", "qtype": "Literature_Background"