diff --git a/.obsidian/workspace.json b/.obsidian/workspace.json index 484adf2..91ffad9 100644 --- a/.obsidian/workspace.json +++ b/.obsidian/workspace.json @@ -13,7 +13,7 @@ "state": { "type": "markdown", "state": { - "file": "Paper/CLIP/未命名.md", + "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "mode": "source", "source": false } @@ -57,14 +57,6 @@ } } }, - { - "id": "c8c6dfe89e01b54d", - "type": "leaf", - "state": { - "type": "starred", - "state": {} - } - }, { "id": "467ca686d8cb4c95", "type": "leaf", @@ -73,8 +65,7 @@ "state": {} } } - ], - "currentTab": 2 + ] } ], "direction": "horizontal", @@ -94,7 +85,7 @@ "state": { "type": "backlink", "state": { - "file": "Paper/CLIP/未命名.md", + "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "collapseAll": false, "extraContext": false, "sortOrder": "alphabetical", @@ -111,7 +102,7 @@ "state": { "type": "outgoing-link", "state": { - "file": "Paper/CLIP/未命名.md", + "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "linksCollapsed": false, "unlinkedCollapsed": true } @@ -134,7 +125,7 @@ "state": { "type": "outline", "state": { - "file": "Paper/CLIP/未命名.md" + "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md" } } }, @@ -166,9 +157,9 @@ "notion-like-tables:Create loom": false } }, - "active": "c8c6dfe89e01b54d", + "active": "e144afbc26630891", "lastOpenFiles": [ - "Paper/CLIP/未命名.md", + "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "Paper/CLIP/PromptSRC:Foundational Model Adaptation without Forgetting.md", "Paper/CLIP/MaPLe:Multi-modal Prompt Learning.md", "Paper/CLIP/Learning Hierarchical Prompt with Structured Linguistic Knowledge for Vision-Language Models.md", diff --git a/Paper/CLIP/未命名.md b/Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md similarity index 100% rename from Paper/CLIP/未命名.md rename to Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md