vault backup: 2024-11-09 19:05:38

This commit is contained in:
2024-11-09 19:05:38 +08:00
parent 6ae9c45a9a
commit 034ba3290e
2 changed files with 73 additions and 29 deletions

View File

@@ -1,18 +1,31 @@
[ {
"file-explorer", "file-explorer": true,
"global-search", "global-search": true,
"switcher", "switcher": true,
"graph", "graph": true,
"backlink", "backlink": true,
"outgoing-link", "canvas": false,
"tag-pane", "outgoing-link": true,
"page-preview", "tag-pane": true,
"templates", "page-preview": true,
"note-composer", "daily-notes": false,
"command-palette", "templates": true,
"editor-status", "note-composer": true,
"bookmarks", "command-palette": true,
"outline", "slash-command": false,
"word-count", "editor-status": true,
"file-recovery" "starred": true,
] "markdown-importer": false,
"zk-prefixer": false,
"random-note": false,
"outline": true,
"word-count": true,
"slides": false,
"audio-recorder": false,
"workspaces": false,
"file-recovery": true,
"publish": false,
"sync": false,
"bookmarks": true,
"properties": false
}

View File

@@ -16,10 +16,25 @@
"file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md",
"mode": "source", "mode": "source",
"source": false "source": false
} },
"icon": "lucide-file",
"title": "Cross-Modal Few-Shot Learning with Multimodal Models"
}
},
{
"id": "fa3fbc811b622e4c",
"type": "leaf",
"state": {
"type": "release-notes",
"state": {
"currentVersion": "1.7.4"
},
"icon": "lucide-file",
"title": "Release Notes 1.7.4"
} }
} }
] ],
"currentTab": 1
} }
], ],
"direction": "vertical" "direction": "vertical"
@@ -39,7 +54,9 @@
"type": "file-explorer", "type": "file-explorer",
"state": { "state": {
"sortOrder": "alphabetical" "sortOrder": "alphabetical"
} },
"icon": "lucide-folder-closed",
"title": "文件列表"
} }
}, },
{ {
@@ -54,7 +71,9 @@
"collapseAll": false, "collapseAll": false,
"extraContext": false, "extraContext": false,
"sortOrder": "alphabetical" "sortOrder": "alphabetical"
} },
"icon": "lucide-search",
"title": "搜索"
} }
}, },
{ {
@@ -62,7 +81,9 @@
"type": "leaf", "type": "leaf",
"state": { "state": {
"type": "bookmarks", "type": "bookmarks",
"state": {} "state": {},
"icon": "lucide-bookmark",
"title": "书签"
} }
} }
] ]
@@ -93,7 +114,9 @@
"searchQuery": "", "searchQuery": "",
"backlinkCollapsed": false, "backlinkCollapsed": false,
"unlinkedCollapsed": true "unlinkedCollapsed": true
} },
"icon": "links-coming-in",
"title": "Cross-Modal Few-Shot Learning with Multimodal Models 的反向链接列表"
} }
}, },
{ {
@@ -105,7 +128,9 @@
"file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md",
"linksCollapsed": false, "linksCollapsed": false,
"unlinkedCollapsed": true "unlinkedCollapsed": true
} },
"icon": "links-going-out",
"title": "Cross-Modal Few-Shot Learning with Multimodal Models 的出链列表"
} }
}, },
{ {
@@ -116,7 +141,9 @@
"state": { "state": {
"sortOrder": "frequency", "sortOrder": "frequency",
"useHierarchy": true "useHierarchy": true
} },
"icon": "lucide-tags",
"title": "标签"
} }
}, },
{ {
@@ -126,7 +153,9 @@
"type": "outline", "type": "outline",
"state": { "state": {
"file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md" "file": "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md"
} },
"icon": "lucide-list",
"title": "Cross-Modal Few-Shot Learning with Multimodal Models 的大纲"
} }
}, },
{ {
@@ -134,7 +163,9 @@
"type": "leaf", "type": "leaf",
"state": { "state": {
"type": "advanced-tables-toolbar", "type": "advanced-tables-toolbar",
"state": {} "state": {},
"icon": "spreadsheet",
"title": "Advanced Tables"
} }
} }
] ]
@@ -157,7 +188,7 @@
"notion-like-tables:Create loom": false "notion-like-tables:Create loom": false
} }
}, },
"active": "e144afbc26630891", "active": "fa3fbc811b622e4c",
"lastOpenFiles": [ "lastOpenFiles": [
"Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md", "Paper/CLIP/Cross-Modal Few-Shot Learning with Multimodal Models.md",
"Paper/CLIP/PromptSRCFoundational Model Adaptation without Forgetting.md", "Paper/CLIP/PromptSRCFoundational Model Adaptation without Forgetting.md",