diff --git a/.obsidian/core-plugins.json b/.obsidian/core-plugins.json index bbe96ed..c128689 100644 --- a/.obsidian/core-plugins.json +++ b/.obsidian/core-plugins.json @@ -1,18 +1,31 @@ -[ - "file-explorer", - "global-search", - "switcher", - "graph", - "backlink", - "outgoing-link", - "tag-pane", - "page-preview", - "templates", - "note-composer", - "command-palette", - "editor-status", - "bookmarks", - "outline", - "word-count", - "file-recovery" -] \ No newline at end of file +{ + "file-explorer": true, + "global-search": true, + "switcher": true, + "graph": true, + "backlink": true, + "canvas": false, + "outgoing-link": true, + "tag-pane": true, + "page-preview": true, + "daily-notes": false, + "templates": true, + "note-composer": true, + "command-palette": true, + "slash-command": false, + "editor-status": true, + "starred": true, + "markdown-importer": false, + "zk-prefixer": false, + "random-note": false, + "outline": true, + "word-count": true, + "slides": false, + "audio-recorder": false, + "workspaces": false, + "file-recovery": true, + "publish": false, + "sync": false, + "bookmarks": true, + "properties": false +} \ No newline at end of file diff --git a/.obsidian/workspace.json b/.obsidian/workspace.json index 91ffad9..ca03a81 100644 --- a/.obsidian/workspace.json +++ b/.obsidian/workspace.json @@ -16,10 +16,25 @@ "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "mode": "source", "source": false - } + }, + "icon": "lucide-file", + "title": "Cross-Modal Few-Shot Learning with Multimodal Models" + } + }, + { + "id": "fa3fbc811b622e4c", + "type": "leaf", + "state": { + "type": "release-notes", + "state": { + "currentVersion": "1.7.4" + }, + "icon": "lucide-file", + "title": "Release Notes 1.7.4" } } - ] + ], + "currentTab": 1 } ], "direction": "vertical" @@ -39,7 +54,9 @@ "type": "file-explorer", "state": { "sortOrder": "alphabetical" - } + }, + "icon": "lucide-folder-closed", + "title": "文件列表" } }, { @@ -54,7 +71,9 @@ "collapseAll": false, "extraContext": false, "sortOrder": "alphabetical" - } + }, + "icon": "lucide-search", + "title": "搜索" } }, { @@ -62,7 +81,9 @@ "type": "leaf", "state": { "type": "bookmarks", - "state": {} + "state": {}, + "icon": "lucide-bookmark", + "title": "书签" } } ] @@ -93,7 +114,9 @@ "searchQuery": "", "backlinkCollapsed": false, "unlinkedCollapsed": true - } + }, + "icon": "links-coming-in", + "title": "Cross-Modal Few-Shot Learning with Multimodal Models 的反向链接列表" } }, { @@ -105,7 +128,9 @@ "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "linksCollapsed": false, "unlinkedCollapsed": true - } + }, + "icon": "links-going-out", + "title": "Cross-Modal Few-Shot Learning with Multimodal Models 的出链列表" } }, { @@ -116,7 +141,9 @@ "state": { "sortOrder": "frequency", "useHierarchy": true - } + }, + "icon": "lucide-tags", + "title": "标签" } }, { @@ -126,7 +153,9 @@ "type": "outline", "state": { "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md" - } + }, + "icon": "lucide-list", + "title": "Cross-Modal Few-Shot Learning with Multimodal Models 的大纲" } }, { @@ -134,7 +163,9 @@ "type": "leaf", "state": { "type": "advanced-tables-toolbar", - "state": {} + "state": {}, + "icon": "spreadsheet", + "title": "Advanced Tables" } } ] @@ -157,7 +188,7 @@ "notion-like-tables:Create loom": false } }, - "active": "e144afbc26630891", + "active": "fa3fbc811b622e4c", "lastOpenFiles": [ "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "Paper/CLIP/PromptSRC:Foundational Model Adaptation without Forgetting.md",