mirror of
https://github.com/siyuan-note/siyuan.git
synced 2026-02-20 05:58:07 +01:00
❤️ 完整开源界面和内核 https://github.com/siyuan-note/siyuan/issues/5013
This commit is contained in:
parent
e650b8100c
commit
f40ed985e1
1214 changed files with 345766 additions and 9 deletions
345
kernel/model/appearance.go
Normal file
345
kernel/model/appearance.go
Normal file
|
|
@ -0,0 +1,345 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func InitAppearance() {
|
||||
util.SetBootDetails("Initializing appearance...")
|
||||
if err := os.Mkdir(util.AppearancePath, 0755); nil != err && !os.IsExist(err) {
|
||||
util.LogFatalf("create appearance folder [%s] failed: %s", util.AppearancePath, err)
|
||||
}
|
||||
|
||||
unloadThemes()
|
||||
from := filepath.Join(util.WorkingDir, "appearance")
|
||||
if err := gulu.File.Copy(from, util.AppearancePath); nil != err {
|
||||
util.LogFatalf("copy appearance resources from [%s] to [%s] failed: %s", from, util.AppearancePath, err)
|
||||
}
|
||||
loadThemes()
|
||||
|
||||
if !gulu.Str.Contains(Conf.Appearance.ThemeDark, Conf.Appearance.DarkThemes) {
|
||||
Conf.Appearance.ThemeDark = "midnight"
|
||||
Conf.Appearance.ThemeJS = false
|
||||
}
|
||||
if !gulu.Str.Contains(Conf.Appearance.ThemeLight, Conf.Appearance.LightThemes) {
|
||||
Conf.Appearance.ThemeLight = "daylight"
|
||||
Conf.Appearance.ThemeJS = false
|
||||
}
|
||||
|
||||
loadIcons()
|
||||
if !gulu.Str.Contains(Conf.Appearance.Icon, Conf.Appearance.Icons) {
|
||||
Conf.Appearance.Icon = "material"
|
||||
}
|
||||
|
||||
Conf.Save()
|
||||
}
|
||||
|
||||
var themeWatchers = sync.Map{} // [string]*fsnotify.Watcher{}
|
||||
|
||||
func closeThemeWatchers() {
|
||||
themeWatchers.Range(func(key, value interface{}) bool {
|
||||
if err := value.(*fsnotify.Watcher).Close(); nil != err {
|
||||
util.LogErrorf("close file watcher failed: %s", err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
func unloadThemes() {
|
||||
if !gulu.File.IsDir(util.ThemesPath) {
|
||||
return
|
||||
}
|
||||
|
||||
dir, err := os.Open(util.ThemesPath)
|
||||
if nil != err {
|
||||
util.LogErrorf("open appearance themes folder [%s] failed: %s", util.ThemesPath, err)
|
||||
return
|
||||
}
|
||||
themeDirs, err := dir.Readdir(-1)
|
||||
if nil != err {
|
||||
util.LogErrorf("read appearance themes folder failed: %s", err)
|
||||
return
|
||||
}
|
||||
dir.Close()
|
||||
|
||||
for _, themeDir := range themeDirs {
|
||||
if !themeDir.IsDir() {
|
||||
continue
|
||||
}
|
||||
unwatchTheme(filepath.Join(util.ThemesPath, themeDir.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
func loadThemes() {
|
||||
dir, err := os.Open(util.ThemesPath)
|
||||
if nil != err {
|
||||
util.LogFatalf("open appearance themes folder [%s] failed: %s", util.ThemesPath, err)
|
||||
}
|
||||
themeDirs, err := dir.Readdir(-1)
|
||||
if nil != err {
|
||||
util.LogFatalf("read appearance themes folder failed: %s", err)
|
||||
}
|
||||
dir.Close()
|
||||
|
||||
Conf.Appearance.DarkThemes = nil
|
||||
Conf.Appearance.LightThemes = nil
|
||||
for _, themeDir := range themeDirs {
|
||||
if !themeDir.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := themeDir.Name()
|
||||
themeConf, err := themeJSON(name)
|
||||
if nil != err || nil == themeConf {
|
||||
continue
|
||||
}
|
||||
|
||||
modes := themeConf["modes"].([]interface{})
|
||||
for _, mode := range modes {
|
||||
if "dark" == mode {
|
||||
Conf.Appearance.DarkThemes = append(Conf.Appearance.DarkThemes, name)
|
||||
} else if "light" == mode {
|
||||
Conf.Appearance.LightThemes = append(Conf.Appearance.LightThemes, name)
|
||||
}
|
||||
}
|
||||
|
||||
if 0 == Conf.Appearance.Mode {
|
||||
if Conf.Appearance.ThemeLight == name {
|
||||
Conf.Appearance.ThemeVer = themeConf["version"].(string)
|
||||
Conf.Appearance.ThemeJS = gulu.File.IsExist(filepath.Join(util.ThemesPath, name, "theme.js"))
|
||||
}
|
||||
} else {
|
||||
if Conf.Appearance.ThemeDark == name {
|
||||
Conf.Appearance.ThemeVer = themeConf["version"].(string)
|
||||
Conf.Appearance.ThemeJS = gulu.File.IsExist(filepath.Join(util.ThemesPath, name, "theme.js"))
|
||||
}
|
||||
}
|
||||
|
||||
go watchTheme(filepath.Join(util.ThemesPath, name))
|
||||
}
|
||||
}
|
||||
|
||||
func themeJSON(themeName string) (ret map[string]interface{}, err error) {
|
||||
p := filepath.Join(util.ThemesPath, themeName, "theme.json")
|
||||
if !gulu.File.IsExist(p) {
|
||||
err = os.ErrNotExist
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(p)
|
||||
if nil != err {
|
||||
util.LogErrorf("read theme.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
util.LogErrorf("parse theme.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if 5 > len(ret) {
|
||||
util.LogWarnf("invalid theme.json [%s]", p)
|
||||
return nil, errors.New("invalid theme.json")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func iconJSON(iconName string) (ret map[string]interface{}, err error) {
|
||||
p := filepath.Join(util.IconsPath, iconName, "icon.json")
|
||||
if !gulu.File.IsExist(p) {
|
||||
err = os.ErrNotExist
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(p)
|
||||
if nil != err {
|
||||
util.LogErrorf("read icon.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
util.LogErrorf("parse icon.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if 4 > len(ret) {
|
||||
util.LogWarnf("invalid icon.json [%s]", p)
|
||||
return nil, errors.New("invalid icon.json")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func templateJSON(templateName string) (ret map[string]interface{}, err error) {
|
||||
p := filepath.Join(util.DataDir, "templates", templateName, "template.json")
|
||||
if !gulu.File.IsExist(p) {
|
||||
err = os.ErrNotExist
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(p)
|
||||
if nil != err {
|
||||
util.LogErrorf("read template.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
util.LogErrorf("parse template.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if 4 > len(ret) {
|
||||
util.LogWarnf("invalid template.json [%s]", p)
|
||||
return nil, errors.New("invalid template.json")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func widgetJSON(widgetName string) (ret map[string]interface{}, err error) {
|
||||
p := filepath.Join(util.DataDir, "widgets", widgetName, "widget.json")
|
||||
if !gulu.File.IsExist(p) {
|
||||
err = os.ErrNotExist
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(p)
|
||||
if nil != err {
|
||||
util.LogErrorf("read widget.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
util.LogErrorf("parse widget.json [%s] failed: %s", p, err)
|
||||
return
|
||||
}
|
||||
if 4 > len(ret) {
|
||||
util.LogWarnf("invalid widget.json [%s]", p)
|
||||
return nil, errors.New("invalid widget.json")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func loadIcons() {
|
||||
dir, err := os.Open(util.IconsPath)
|
||||
if nil != err {
|
||||
util.LogFatalf("open appearance icons folder [%s] failed: %s", util.IconsPath, err)
|
||||
}
|
||||
iconDirs, err := dir.Readdir(-1)
|
||||
if nil != err {
|
||||
util.LogFatalf("read appearance icons folder failed: %s", err)
|
||||
}
|
||||
dir.Close()
|
||||
|
||||
Conf.Appearance.Icons = nil
|
||||
for _, iconDir := range iconDirs {
|
||||
if !iconDir.IsDir() {
|
||||
continue
|
||||
}
|
||||
name := iconDir.Name()
|
||||
iconConf, err := iconJSON(name)
|
||||
if nil != err || nil == iconConf {
|
||||
continue
|
||||
}
|
||||
Conf.Appearance.Icons = append(Conf.Appearance.Icons, name)
|
||||
if Conf.Appearance.Icon == name {
|
||||
Conf.Appearance.IconVer = iconConf["version"].(string)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func unwatchTheme(folder string) {
|
||||
val, _ := themeWatchers.Load(folder)
|
||||
if nil != val {
|
||||
themeWatcher := val.(*fsnotify.Watcher)
|
||||
themeWatcher.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func watchTheme(folder string) {
|
||||
val, _ := themeWatchers.Load(folder)
|
||||
var themeWatcher *fsnotify.Watcher
|
||||
if nil != val {
|
||||
themeWatcher = val.(*fsnotify.Watcher)
|
||||
themeWatcher.Close()
|
||||
}
|
||||
|
||||
var err error
|
||||
if themeWatcher, err = fsnotify.NewWatcher(); nil != err {
|
||||
util.LogErrorf("add theme file watcher for folder [%s] failed: %s", folder, err)
|
||||
return
|
||||
}
|
||||
themeWatchers.Store(folder, themeWatcher)
|
||||
|
||||
done := make(chan bool)
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-themeWatcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
//util.LogInfof(event.String())
|
||||
if event.Op&fsnotify.Write == fsnotify.Write &&
|
||||
(strings.HasSuffix(event.Name, "theme.css") || strings.HasSuffix(event.Name, "custom.css")) {
|
||||
var themeName string
|
||||
if themeName = isCurrentUseTheme(event.Name); "" == themeName {
|
||||
break
|
||||
}
|
||||
|
||||
if strings.HasSuffix(event.Name, "theme.css") {
|
||||
util.BroadcastByType("main", "refreshtheme", 0, "", map[string]interface{}{
|
||||
"theme": "/appearance/themes/" + themeName + "/theme.css?" + fmt.Sprintf("%d", time.Now().Unix()),
|
||||
})
|
||||
break
|
||||
}
|
||||
|
||||
if strings.HasSuffix(event.Name, "custom.css") {
|
||||
util.BroadcastByType("main", "refreshtheme", 0, "", map[string]interface{}{
|
||||
"theme": "/appearance/themes/" + themeName + "/custom.css?" + fmt.Sprintf("%d", time.Now().Unix()),
|
||||
})
|
||||
break
|
||||
}
|
||||
}
|
||||
case err, ok := <-themeWatcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
util.LogErrorf("watch theme file failed: %s", err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
//util.LogInfof("add file watcher [%s]", folder)
|
||||
if err := themeWatcher.Add(folder); err != nil {
|
||||
util.LogErrorf("add theme files watcher for folder [%s] failed: %s", folder, err)
|
||||
}
|
||||
<-done
|
||||
}
|
||||
|
||||
func isCurrentUseTheme(themePath string) string {
|
||||
themeName := filepath.Base(filepath.Dir(themePath))
|
||||
if 0 == Conf.Appearance.Mode { // 明亮
|
||||
if Conf.Appearance.ThemeLight == themeName {
|
||||
return themeName
|
||||
}
|
||||
} else if 1 == Conf.Appearance.Mode { // 暗黑
|
||||
if Conf.Appearance.ThemeDark == themeName {
|
||||
return themeName
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
677
kernel/model/assets.go
Normal file
677
kernel/model/assets.go
Normal file
|
|
@ -0,0 +1,677 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func DocImageAssets(rootID string) (ret []string, err error) {
|
||||
tree, err := loadTreeByBlockID(rootID)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if ast.NodeImage == n.Type {
|
||||
linkDest := n.ChildByType(ast.NodeLinkDest)
|
||||
dest := linkDest.Tokens
|
||||
ret = append(ret, gulu.Str.FromBytes(dest))
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func NetImg2LocalAssets(rootID string) (err error) {
|
||||
tree, err := loadTreeByBlockID(rootID)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
var files int
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if ast.NodeImage == n.Type {
|
||||
linkDest := n.ChildByType(ast.NodeLinkDest)
|
||||
dest := linkDest.Tokens
|
||||
if !sql.IsAssetLinkDest(dest) && (bytes.HasPrefix(bytes.ToLower(dest), []byte("https://")) || bytes.HasPrefix(bytes.ToLower(dest), []byte("http://"))) {
|
||||
u := string(dest)
|
||||
util.PushMsg(fmt.Sprintf(Conf.Language(119), u), 15000)
|
||||
request := util.NewBrowserRequest(Conf.System.NetworkProxy.String())
|
||||
resp, reqErr := request.Get(u)
|
||||
if nil != reqErr {
|
||||
util.LogErrorf("download net img [%s] failed: %s", u, reqErr)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
if 200 != resp.StatusCode {
|
||||
util.LogErrorf("download net img [%s] failed: %d", u, resp.StatusCode)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
data, repErr := resp.ToBytes()
|
||||
if nil != repErr {
|
||||
util.LogErrorf("download net img [%s] failed: %s", u, repErr)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
var name string
|
||||
if strings.Contains(u, "?") {
|
||||
name = u[:strings.Index(u, "?")]
|
||||
name = path.Base(name)
|
||||
} else {
|
||||
name = path.Base(u)
|
||||
}
|
||||
name, _ = url.PathUnescape(name)
|
||||
ext := path.Ext(name)
|
||||
if "" == ext {
|
||||
contentType := resp.Header.Get("Content-Type")
|
||||
exts, _ := mime.ExtensionsByType(contentType)
|
||||
if 0 < len(exts) {
|
||||
ext = exts[0]
|
||||
}
|
||||
}
|
||||
name = strings.TrimSuffix(name, ext)
|
||||
name = gulu.Str.SubStr(name, 64)
|
||||
name = util.FilterFileName(name)
|
||||
name = "net-img-" + name + "-" + ast.NewNodeID() + ext
|
||||
writePath := filepath.Join(util.DataDir, "assets", name)
|
||||
if err = gulu.File.WriteFileSafer(writePath, data, 0644); nil != err {
|
||||
util.LogErrorf("write downloaded net img [%s] to local assets [%s] failed: %s", u, writePath, err)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
|
||||
linkDest.Tokens = []byte("assets/" + name)
|
||||
files++
|
||||
}
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
if 0 < files {
|
||||
util.PushMsg(Conf.Language(113), 5000)
|
||||
if err = writeJSONQueue(tree); nil != err {
|
||||
return
|
||||
}
|
||||
sql.WaitForWritingDatabase()
|
||||
util.PushMsg(fmt.Sprintf(Conf.Language(120), files), 5000)
|
||||
} else {
|
||||
util.PushMsg(Conf.Language(121), 3000)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Asset struct {
|
||||
HName string `json:"hName"`
|
||||
Name string `json:"name"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
func SearchAssetsByName(keyword string) (ret []*Asset) {
|
||||
ret = []*Asset{}
|
||||
sqlAssets := sql.QueryAssetsByName(keyword)
|
||||
for _, sqlAsset := range sqlAssets {
|
||||
hName := util.RemoveID(sqlAsset.Name)
|
||||
_, hName = search.MarkText(hName, keyword, 64, Conf.Search.CaseSensitive)
|
||||
asset := &Asset{
|
||||
HName: hName,
|
||||
Name: sqlAsset.Name,
|
||||
Path: sqlAsset.Path,
|
||||
}
|
||||
ret = append(ret, asset)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetAssetAbsPath(relativePath string) (absPath string, err error) {
|
||||
relativePath = strings.TrimSpace(relativePath)
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
err = errors.New(Conf.Language(0))
|
||||
return
|
||||
}
|
||||
|
||||
// 在笔记本下搜索
|
||||
for _, notebook := range notebooks {
|
||||
notebookAbsPath := filepath.Join(util.DataDir, notebook.ID)
|
||||
filepath.Walk(notebookAbsPath, func(path string, info fs.FileInfo, _ error) error {
|
||||
if isSkipFile(info.Name()) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if p := filepath.ToSlash(path); strings.HasSuffix(p, relativePath) {
|
||||
if gulu.File.IsExist(path) {
|
||||
absPath = path
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if "" != absPath {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 在全局 assets 路径下搜索
|
||||
p := filepath.Join(util.DataDir, relativePath)
|
||||
if gulu.File.IsExist(p) {
|
||||
absPath = p
|
||||
return
|
||||
}
|
||||
return "", errors.New(fmt.Sprintf(Conf.Language(12), relativePath))
|
||||
}
|
||||
|
||||
func UploadAssets2Cloud(rootID string) (err error) {
|
||||
if !IsSubscriber() {
|
||||
return
|
||||
}
|
||||
|
||||
sqlAssets := sql.QueryRootBlockAssets(rootID)
|
||||
err = uploadCloud(sqlAssets)
|
||||
return
|
||||
}
|
||||
|
||||
func uploadCloud(sqlAssets []*sql.Asset) (err error) {
|
||||
syncedAssets := readWorkspaceAssets()
|
||||
var unSyncAssets []string
|
||||
for _, sqlAsset := range sqlAssets {
|
||||
if !gulu.Str.Contains(sqlAsset.Path, syncedAssets) && strings.Contains(sqlAsset.Path, "assets/") {
|
||||
unSyncAssets = append(unSyncAssets, sqlAsset.Path)
|
||||
}
|
||||
}
|
||||
|
||||
if 1 > len(unSyncAssets) {
|
||||
return
|
||||
}
|
||||
|
||||
var uploadAbsAssets []string
|
||||
for _, asset := range unSyncAssets {
|
||||
var absPath string
|
||||
absPath, err = GetAssetAbsPath(asset)
|
||||
if nil != err {
|
||||
util.LogWarnf("get asset [%s] abs path failed: %s", asset, err)
|
||||
return
|
||||
}
|
||||
if "" == absPath {
|
||||
util.LogErrorf("not found asset [%s]", asset)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(12), asset))
|
||||
return
|
||||
}
|
||||
|
||||
uploadAbsAssets = append(uploadAbsAssets, absPath)
|
||||
}
|
||||
|
||||
if 1 > len(uploadAbsAssets) {
|
||||
return
|
||||
}
|
||||
|
||||
uploadAbsAssets = util.RemoveDuplicatedElem(uploadAbsAssets)
|
||||
|
||||
util.LogInfof("uploading [%d] assets", len(uploadAbsAssets))
|
||||
if loadErr := LoadUploadToken(); nil != loadErr {
|
||||
util.PushMsg(loadErr.Error(), 5000)
|
||||
return
|
||||
}
|
||||
|
||||
var completedUploadAssets []string
|
||||
for _, absAsset := range uploadAbsAssets {
|
||||
if fi, statErr := os.Stat(absAsset); nil != statErr {
|
||||
util.LogErrorf("stat file [%s] failed: %s", absAsset, statErr)
|
||||
return statErr
|
||||
} else if util.CloudSingleFileMaxSizeLimit/10 <= fi.Size() {
|
||||
util.LogWarnf("file [%s] larger than 10MB, ignore uploading it", absAsset)
|
||||
continue
|
||||
}
|
||||
|
||||
requestResult := gulu.Ret.NewResult()
|
||||
request := util.NewCloudFileRequest2m(Conf.System.NetworkProxy.String())
|
||||
resp, reqErr := request.
|
||||
SetResult(requestResult).
|
||||
SetFile("file[]", absAsset).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: uploadToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/upload?ver=" + util.Ver)
|
||||
if nil != reqErr {
|
||||
util.LogErrorf("upload assets failed: %s", reqErr)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
if 0 != requestResult.Code {
|
||||
util.LogErrorf("upload assets failed: %s", requestResult.Msg)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(94), requestResult.Msg))
|
||||
return
|
||||
}
|
||||
|
||||
absAsset = filepath.ToSlash(absAsset)
|
||||
relAsset := absAsset[strings.Index(absAsset, "assets/"):]
|
||||
completedUploadAssets = append(completedUploadAssets, relAsset)
|
||||
util.LogInfof("uploaded asset [%s]", relAsset)
|
||||
}
|
||||
|
||||
if 0 < len(completedUploadAssets) {
|
||||
syncedAssets = readWorkspaceAssets()
|
||||
util.LogInfof("uploaded [%d] assets", len(completedUploadAssets))
|
||||
for _, completedSyncAsset := range completedUploadAssets {
|
||||
syncedAssets = append(syncedAssets, completedSyncAsset)
|
||||
}
|
||||
saveWorkspaceAssets(syncedAssets)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func readWorkspaceAssets() (ret []string) {
|
||||
ret = []string{}
|
||||
confDir := filepath.Join(util.DataDir, "assets", ".siyuan")
|
||||
if err := os.MkdirAll(confDir, 0755); nil != err {
|
||||
util.LogErrorf("create assets conf dir [%s] failed: %s", confDir, err)
|
||||
return
|
||||
}
|
||||
confPath := filepath.Join(confDir, "assets.json")
|
||||
if !gulu.File.IsExist(confPath) {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(confPath)
|
||||
if nil != err {
|
||||
util.LogErrorf("read assets conf failed: %s", err)
|
||||
return
|
||||
}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
util.LogErrorf("parse assets conf failed: %s, re-init it", err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func saveWorkspaceAssets(assets []string) {
|
||||
confDir := filepath.Join(util.DataDir, "assets", ".siyuan")
|
||||
if err := os.MkdirAll(confDir, 0755); nil != err {
|
||||
util.LogErrorf("create assets conf dir [%s] failed: %s", confDir, err)
|
||||
return
|
||||
}
|
||||
confPath := filepath.Join(confDir, "assets.json")
|
||||
|
||||
assets = util.RemoveDuplicatedElem(assets)
|
||||
sort.Strings(assets)
|
||||
data, err := gulu.JSON.MarshalIndentJSON(assets, "", " ")
|
||||
if nil != err {
|
||||
util.LogErrorf("create assets conf failed: %s", err)
|
||||
return
|
||||
}
|
||||
if err = gulu.File.WriteFileSafer(confPath, data, 0644); nil != err {
|
||||
util.LogErrorf("write assets conf failed: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func RemoveUnusedAssets() (ret []string) {
|
||||
util.PushMsg(Conf.Language(100), 30*1000)
|
||||
defer util.PushMsg(Conf.Language(99), 3000)
|
||||
|
||||
ret = []string{}
|
||||
unusedAssets := UnusedAssets()
|
||||
|
||||
historyDir, err := util.GetHistoryDir("delete")
|
||||
if nil != err {
|
||||
util.LogErrorf("get history dir failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, p := range unusedAssets {
|
||||
historyPath := filepath.Join(historyDir, p)
|
||||
if p = filepath.Join(util.DataDir, p); gulu.File.IsExist(p) {
|
||||
if err = gulu.File.Copy(p, historyPath); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, unusedAsset := range unusedAssets {
|
||||
if unusedAsset = filepath.Join(util.DataDir, unusedAsset); gulu.File.IsExist(unusedAsset) {
|
||||
if err := os.RemoveAll(unusedAsset); nil != err {
|
||||
util.LogErrorf("remove unused asset [%s] failed: %s", unusedAsset, err)
|
||||
}
|
||||
}
|
||||
ret = append(ret, unusedAsset)
|
||||
}
|
||||
if 0 < len(ret) {
|
||||
IncWorkspaceDataVer()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RemoveUnusedAsset(p string) (ret string) {
|
||||
p = filepath.Join(util.DataDir, p)
|
||||
if !gulu.File.IsExist(p) {
|
||||
return p
|
||||
}
|
||||
|
||||
historyDir, err := util.GetHistoryDir("delete")
|
||||
if nil != err {
|
||||
util.LogErrorf("get history dir failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
newP := strings.TrimPrefix(p, util.DataDir)
|
||||
historyPath := filepath.Join(historyDir, newP)
|
||||
if err = gulu.File.Copy(p, historyPath); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
if err = os.RemoveAll(p); nil != err {
|
||||
util.LogErrorf("remove unused asset [%s] failed: %s", p, err)
|
||||
}
|
||||
ret = p
|
||||
IncWorkspaceDataVer()
|
||||
return
|
||||
}
|
||||
|
||||
func UnusedAssets() (ret []string) {
|
||||
ret = []string{}
|
||||
|
||||
assetsPathMap, err := allAssetAbsPaths()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
linkDestMap := map[string]bool{}
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
for _, notebook := range notebooks {
|
||||
notebookAbsPath := filepath.Join(util.DataDir, notebook.ID)
|
||||
trees := loadTrees(notebookAbsPath)
|
||||
dests := map[string]bool{}
|
||||
for _, tree := range trees {
|
||||
for _, d := range assetsLinkDestsInTree(tree) {
|
||||
dests[d] = true
|
||||
}
|
||||
|
||||
if titleImgPath := treenode.GetDocTitleImgPath(tree.Root); "" != titleImgPath {
|
||||
// 题头图计入
|
||||
if !sql.IsAssetLinkDest([]byte(titleImgPath)) {
|
||||
continue
|
||||
}
|
||||
dests[titleImgPath] = true
|
||||
}
|
||||
}
|
||||
|
||||
var linkDestFolderPaths, linkDestFilePaths []string
|
||||
for dest, _ := range dests {
|
||||
if !strings.HasPrefix(dest, "assets/") {
|
||||
continue
|
||||
}
|
||||
|
||||
if "" == assetsPathMap[dest] {
|
||||
continue
|
||||
}
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
linkDestFolderPaths = append(linkDestFolderPaths, dest)
|
||||
} else {
|
||||
linkDestFilePaths = append(linkDestFilePaths, dest)
|
||||
}
|
||||
}
|
||||
|
||||
// 排除文件夹链接
|
||||
var toRemoves []string
|
||||
for asset, _ := range assetsPathMap {
|
||||
for _, linkDestFolder := range linkDestFolderPaths {
|
||||
if strings.HasPrefix(asset, linkDestFolder) {
|
||||
toRemoves = append(toRemoves, asset)
|
||||
}
|
||||
}
|
||||
for _, linkDestPath := range linkDestFilePaths {
|
||||
if strings.HasPrefix(linkDestPath, asset) {
|
||||
toRemoves = append(toRemoves, asset)
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, toRemove := range toRemoves {
|
||||
delete(assetsPathMap, toRemove)
|
||||
}
|
||||
|
||||
for _, dest := range linkDestFilePaths {
|
||||
linkDestMap[dest] = true
|
||||
}
|
||||
}
|
||||
|
||||
// 排除文件注解
|
||||
var toRemoves []string
|
||||
for asset, _ := range assetsPathMap {
|
||||
if strings.HasSuffix(asset, ".sya") {
|
||||
toRemoves = append(toRemoves, asset)
|
||||
}
|
||||
}
|
||||
for _, toRemove := range toRemoves {
|
||||
delete(assetsPathMap, toRemove)
|
||||
}
|
||||
|
||||
for _, assetAbsPath := range assetsPathMap {
|
||||
if _, ok := linkDestMap[assetAbsPath]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var p string
|
||||
if strings.HasPrefix(filepath.Join(util.DataDir, "assets"), assetAbsPath) {
|
||||
p = assetAbsPath[strings.Index(assetAbsPath, "assets"):]
|
||||
} else {
|
||||
p = strings.TrimPrefix(assetAbsPath, util.DataDir)[1:]
|
||||
}
|
||||
p = filepath.ToSlash(p)
|
||||
ret = append(ret, p)
|
||||
}
|
||||
sort.Strings(ret)
|
||||
return
|
||||
}
|
||||
|
||||
func assetsLinkDestsInTree(tree *parse.Tree) (ret []string) {
|
||||
ret = []string{}
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
// 修改以下代码时需要同时修改 database 构造行级元素实现,增加必要的类型
|
||||
if !entering || (ast.NodeLinkDest != n.Type && ast.NodeHTMLBlock != n.Type && ast.NodeInlineHTML != n.Type &&
|
||||
ast.NodeIFrame != n.Type && ast.NodeWidget != n.Type && ast.NodeAudio != n.Type && ast.NodeVideo != n.Type) {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeLinkDest == n.Type {
|
||||
if !isRelativePath(n.Tokens) {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
dest := strings.TrimSpace(string(n.Tokens))
|
||||
ret = append(ret, dest)
|
||||
} else {
|
||||
if ast.NodeWidget == n.Type {
|
||||
dataAssets := n.IALAttr("data-assets")
|
||||
if "" == dataAssets || !isRelativePath([]byte(dataAssets)) {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
ret = append(ret, dataAssets)
|
||||
} else { // HTMLBlock/InlineHTML/IFrame/Audio/Video
|
||||
if index := bytes.Index(n.Tokens, []byte("src=\"")); 0 < index {
|
||||
src := n.Tokens[index+len("src=\""):]
|
||||
src = src[:bytes.Index(src, []byte("\""))]
|
||||
if !isRelativePath(src) {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
dest := strings.TrimSpace(string(src))
|
||||
ret = append(ret, dest)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func isRelativePath(dest []byte) bool {
|
||||
if 1 > len(dest) {
|
||||
return false
|
||||
}
|
||||
if '/' == dest[0] {
|
||||
return false
|
||||
}
|
||||
return !bytes.Contains(dest, []byte(":"))
|
||||
}
|
||||
|
||||
// allAssetAbsPaths 返回 asset 相对路径(assets/xxx)到绝对路径(F:\SiYuan\data\assets\xxx)的映射。
|
||||
func allAssetAbsPaths() (assetsAbsPathMap map[string]string, err error) {
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
assetsAbsPathMap = map[string]string{}
|
||||
// 笔记本 assets
|
||||
for _, notebook := range notebooks {
|
||||
notebookAbsPath := filepath.Join(util.DataDir, notebook.ID)
|
||||
filepath.Walk(notebookAbsPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if notebookAbsPath == path {
|
||||
return nil
|
||||
}
|
||||
if isSkipFile(info.Name()) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() && "assets" == info.Name() {
|
||||
filepath.Walk(path, func(assetPath string, info fs.FileInfo, err error) error {
|
||||
if path == assetPath {
|
||||
return nil
|
||||
}
|
||||
if isSkipFile(info.Name()) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
relPath := filepath.ToSlash(assetPath)
|
||||
relPath = relPath[strings.Index(relPath, "assets/"):]
|
||||
if info.IsDir() {
|
||||
relPath += "/"
|
||||
}
|
||||
assetsAbsPathMap[relPath] = assetPath
|
||||
return nil
|
||||
})
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
// 全局 assets
|
||||
assets := filepath.Join(util.DataDir, "assets")
|
||||
filepath.Walk(assets, func(assetPath string, info fs.FileInfo, err error) error {
|
||||
if assets == assetPath {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isSkipFile(info.Name()) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
relPath := filepath.ToSlash(assetPath)
|
||||
relPath = relPath[strings.Index(relPath, "assets/"):]
|
||||
if info.IsDir() {
|
||||
relPath += "/"
|
||||
}
|
||||
assetsAbsPathMap[relPath] = assetPath
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// copyBoxAssetsToDataAssets 将笔记本路径下所有(包括子文档)的 assets 复制一份到 data/assets 中。
|
||||
func copyBoxAssetsToDataAssets(boxID string) {
|
||||
boxLocalPath := filepath.Join(util.DataDir, boxID)
|
||||
copyAssetsToDataAssets(boxLocalPath)
|
||||
}
|
||||
|
||||
// copyDocAssetsToDataAssets 将文档路径下所有(包括子文档)的 assets 复制一份到 data/assets 中。
|
||||
func copyDocAssetsToDataAssets(boxID, parentDocPath string) {
|
||||
boxLocalPath := filepath.Join(util.DataDir, boxID)
|
||||
parentDocDirAbsPath := filepath.Dir(filepath.Join(boxLocalPath, parentDocPath))
|
||||
copyAssetsToDataAssets(parentDocDirAbsPath)
|
||||
}
|
||||
|
||||
func copyAssetsToDataAssets(rootPath string) {
|
||||
filesys.ReleaseFileLocks(rootPath)
|
||||
|
||||
var assetsDirPaths []string
|
||||
filepath.Walk(rootPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if rootPath == path || nil == info {
|
||||
return nil
|
||||
}
|
||||
|
||||
isDir := info.IsDir()
|
||||
name := info.Name()
|
||||
|
||||
if isSkipFile(name) {
|
||||
if isDir {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if "assets" == name && isDir {
|
||||
assetsDirPaths = append(assetsDirPaths, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
dataAssetsPath := filepath.Join(util.DataDir, "assets")
|
||||
for _, assetsDirPath := range assetsDirPaths {
|
||||
if err := gulu.File.Copy(assetsDirPath, dataAssetsPath); nil != err {
|
||||
util.LogErrorf("copy tree assets from [%s] to [%s] failed: %s", assetsDirPaths, dataAssetsPath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
95
kernel/model/assets_watcher.go
Normal file
95
kernel/model/assets_watcher.go
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build !darwin
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var assetsWatcher *fsnotify.Watcher
|
||||
|
||||
func WatchAssets() {
|
||||
if "android" == util.Container {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
watchAssets()
|
||||
}()
|
||||
}
|
||||
|
||||
func watchAssets() {
|
||||
assetsDir := filepath.Join(util.DataDir, "assets")
|
||||
if nil != assetsWatcher {
|
||||
assetsWatcher.Close()
|
||||
}
|
||||
|
||||
var err error
|
||||
if assetsWatcher, err = fsnotify.NewWatcher(); nil != err {
|
||||
util.LogErrorf("add assets watcher for folder [%s] failed: %s", assetsDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
var (
|
||||
timer *time.Timer
|
||||
lastEvent fsnotify.Event
|
||||
)
|
||||
timer = time.NewTimer(100 * time.Millisecond)
|
||||
<-timer.C // timer should be expired at first
|
||||
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-assetsWatcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
lastEvent = event
|
||||
timer.Reset(time.Millisecond * 100)
|
||||
case err, ok := <-assetsWatcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
util.LogErrorf("watch assets failed: %s", err)
|
||||
case <-timer.C:
|
||||
//util.LogInfof("assets changed: %s", lastEvent)
|
||||
if lastEvent.Op&fsnotify.Write == fsnotify.Write {
|
||||
// 外部修改已有资源文件后纳入云端同步 https://github.com/siyuan-note/siyuan/issues/4694
|
||||
IncWorkspaceDataVer()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err = assetsWatcher.Add(assetsDir); err != nil {
|
||||
util.LogErrorf("add assets watcher for folder [%s] failed: %s", assetsDir, err)
|
||||
}
|
||||
//util.LogInfof("added file watcher [%s]", assetsDir)
|
||||
}
|
||||
|
||||
func CloseWatchAssets() {
|
||||
if nil != assetsWatcher {
|
||||
assetsWatcher.Close()
|
||||
}
|
||||
}
|
||||
88
kernel/model/assets_watcher_darwin.go
Normal file
88
kernel/model/assets_watcher_darwin.go
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
//go:build darwin
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/radovskyb/watcher"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var assetsWatcher *watcher.Watcher
|
||||
|
||||
func WatchAssets() {
|
||||
if "iOS" == util.Container {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
watchAssets()
|
||||
}()
|
||||
}
|
||||
|
||||
func watchAssets() {
|
||||
if nil != assetsWatcher {
|
||||
assetsWatcher.Close()
|
||||
}
|
||||
assetsWatcher = watcher.New()
|
||||
|
||||
assetsDir := filepath.Join(util.DataDir, "assets")
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-assetsWatcher.Event:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
//util.LogInfof("assets changed: %s", event)
|
||||
if watcher.Write == event.Op {
|
||||
IncWorkspaceDataVer()
|
||||
}
|
||||
case err, ok := <-assetsWatcher.Error:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
util.LogErrorf("watch assets failed: %s", err)
|
||||
case <-assetsWatcher.Closed:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := assetsWatcher.Add(assetsDir); nil != err {
|
||||
util.LogErrorf("add assets watcher for folder [%s] failed: %s", assetsDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
//util.LogInfof("added file watcher [%s]", assetsDir)
|
||||
if err := assetsWatcher.Start(10 * time.Second); nil != err {
|
||||
util.LogErrorf("start assets watcher for folder [%s] failed: %s", assetsDir, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func CloseWatchAssets() {
|
||||
if nil != assetsWatcher {
|
||||
assetsWatcher.Close()
|
||||
}
|
||||
}
|
||||
509
kernel/model/backlink.go
Normal file
509
kernel/model/backlink.go
Normal file
|
|
@ -0,0 +1,509 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/emirpasic/gods/sets/hashset"
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func RefreshBacklink(id string) {
|
||||
WaitForWritingFiles()
|
||||
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
defer sql.CommitTx(tx)
|
||||
|
||||
refs := sql.QueryRefsByDefID(id, false)
|
||||
trees := map[string]*parse.Tree{}
|
||||
for _, ref := range refs {
|
||||
tree := trees[ref.RootID]
|
||||
if nil == tree {
|
||||
tree, err = loadTreeByBlockID(ref.RootID)
|
||||
if nil != err {
|
||||
util.LogErrorf("refresh tree refs failed: %s", err)
|
||||
continue
|
||||
}
|
||||
trees[ref.RootID] = tree
|
||||
sql.UpsertRefs(tx, tree)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func CreateBacklink(defID, refID, refText string, isDynamic bool) (refRootID string, err error) {
|
||||
refTree, err := loadTreeByBlockID(refID)
|
||||
if nil != err {
|
||||
return "", err
|
||||
}
|
||||
refNode := treenode.GetNodeInTree(refTree, refID)
|
||||
if nil == refNode {
|
||||
return
|
||||
}
|
||||
refRootID = refTree.Root.ID
|
||||
|
||||
defBlockTree := treenode.GetBlockTree(defID)
|
||||
if nil == defBlockTree {
|
||||
return
|
||||
}
|
||||
defRoot := sql.GetBlock(defBlockTree.RootID)
|
||||
if nil == defRoot {
|
||||
return
|
||||
}
|
||||
|
||||
refTextLower := strings.ToLower(refText)
|
||||
defBlock := sql.QueryBlockByNameOrAlias(defRoot.ID, refText)
|
||||
if nil == defBlock {
|
||||
if strings.ToLower(defRoot.Content) == refTextLower {
|
||||
// 如果命名别名没有命中,但文档名和提及关键字匹配,则使用文档作为定义块
|
||||
defBlock = defRoot
|
||||
}
|
||||
if nil == defBlock {
|
||||
// 使用锚文本进行搜索,取第一个匹配的定义块
|
||||
if defIDs := sql.QueryBlockDefIDsByRefText(refTextLower, nil); 0 < len(defIDs) {
|
||||
if defBlock = sql.GetBlock(defIDs[0]); nil != defBlock {
|
||||
goto OK
|
||||
}
|
||||
}
|
||||
}
|
||||
if nil == defBlock {
|
||||
defBlock = sql.GetBlock(defBlockTree.ID)
|
||||
}
|
||||
if nil == defBlock {
|
||||
return
|
||||
}
|
||||
if strings.ToLower(defBlock.Content) != refTextLower {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
OK:
|
||||
luteEngine := NewLute()
|
||||
found := false
|
||||
var toRemove []*ast.Node
|
||||
ast.Walk(refNode, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeText != n.Type {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
text := gulu.Str.FromBytes(n.Tokens)
|
||||
re := regexp.MustCompile("(?i)" + refText)
|
||||
if strings.Contains(strings.ToLower(text), refTextLower) {
|
||||
if isDynamic {
|
||||
text = re.ReplaceAllString(text, "(("+defBlock.ID+" '"+refText+"'))")
|
||||
} else {
|
||||
text = re.ReplaceAllString(text, "(("+defBlock.ID+" \""+refText+"\"))")
|
||||
}
|
||||
found = true
|
||||
subTree := parse.Inline("", []byte(text), luteEngine.ParseOptions)
|
||||
var toInsert []*ast.Node
|
||||
for newNode := subTree.Root.FirstChild.FirstChild; nil != newNode; newNode = newNode.Next {
|
||||
toInsert = append(toInsert, newNode)
|
||||
}
|
||||
for _, insert := range toInsert {
|
||||
n.InsertBefore(insert)
|
||||
}
|
||||
toRemove = append(toRemove, n)
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
for _, n := range toRemove {
|
||||
n.Unlink()
|
||||
}
|
||||
|
||||
if found {
|
||||
refTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
if err = indexWriteJSONQueue(refTree); nil != err {
|
||||
return "", err
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
}
|
||||
sql.WaitForWritingDatabase()
|
||||
return
|
||||
}
|
||||
|
||||
func BuildTreeBacklink(id, keyword, mentionKeyword string, beforeLen int) (boxID string, linkPaths, mentionPaths []*Path, linkRefsCount, mentionsCount int) {
|
||||
linkPaths = []*Path{}
|
||||
mentionPaths = []*Path{}
|
||||
|
||||
sqlBlock := sql.GetBlock(id)
|
||||
if nil == sqlBlock {
|
||||
return
|
||||
}
|
||||
rootID := sqlBlock.RootID
|
||||
boxID = sqlBlock.Box
|
||||
|
||||
var links []*Block
|
||||
refs := sql.QueryRefsByDefID(id, true)
|
||||
|
||||
// 为了减少查询,组装好 IDs 后一次查出
|
||||
defSQLBlockIDs, refSQLBlockIDs := map[string]bool{}, map[string]bool{}
|
||||
var queryBlockIDs []string
|
||||
for _, ref := range refs {
|
||||
defSQLBlockIDs[ref.DefBlockID] = true
|
||||
refSQLBlockIDs[ref.BlockID] = true
|
||||
queryBlockIDs = append(queryBlockIDs, ref.DefBlockID)
|
||||
queryBlockIDs = append(queryBlockIDs, ref.BlockID)
|
||||
}
|
||||
querySQLBlocks := sql.GetBlocks(queryBlockIDs)
|
||||
defSQLBlocksCache := map[string]*sql.Block{}
|
||||
for _, defSQLBlock := range querySQLBlocks {
|
||||
if nil != defSQLBlock && defSQLBlockIDs[defSQLBlock.ID] {
|
||||
defSQLBlocksCache[defSQLBlock.ID] = defSQLBlock
|
||||
}
|
||||
}
|
||||
refSQLBlocksCache := map[string]*sql.Block{}
|
||||
for _, refSQLBlock := range querySQLBlocks {
|
||||
if nil != refSQLBlock && refSQLBlockIDs[refSQLBlock.ID] {
|
||||
refSQLBlocksCache[refSQLBlock.ID] = refSQLBlock
|
||||
}
|
||||
}
|
||||
|
||||
excludeBacklinkIDs := hashset.New()
|
||||
for _, ref := range refs {
|
||||
defSQLBlock := defSQLBlocksCache[(ref.DefBlockID)]
|
||||
if nil == defSQLBlock {
|
||||
continue
|
||||
}
|
||||
|
||||
refSQLBlock := refSQLBlocksCache[ref.BlockID]
|
||||
if nil == refSQLBlock {
|
||||
continue
|
||||
}
|
||||
refBlock := fromSQLBlock(refSQLBlock, "", beforeLen)
|
||||
if rootID == refBlock.RootID { // 排除当前文档内引用提及
|
||||
excludeBacklinkIDs.Add(refBlock.RootID, refBlock.ID)
|
||||
}
|
||||
defBlock := fromSQLBlock(defSQLBlock, "", beforeLen)
|
||||
if defBlock.RootID == rootID { // 当前文档的定义块
|
||||
links = append(links, defBlock)
|
||||
if ref.DefBlockID == defBlock.ID {
|
||||
defBlock.Refs = append(defBlock.Refs, refBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, link := range links {
|
||||
for _, ref := range link.Refs {
|
||||
excludeBacklinkIDs.Add(ref.RootID, ref.ID)
|
||||
}
|
||||
linkRefsCount += len(link.Refs)
|
||||
}
|
||||
|
||||
var linkRefs []*Block
|
||||
processedParagraphs := hashset.New()
|
||||
var paragraphParentIDs []string
|
||||
for _, link := range links {
|
||||
for _, ref := range link.Refs {
|
||||
if "NodeParagraph" == ref.Type {
|
||||
paragraphParentIDs = append(paragraphParentIDs, ref.ParentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
paragraphParents := sql.GetBlocks(paragraphParentIDs)
|
||||
for _, p := range paragraphParents {
|
||||
if "i" == p.Type {
|
||||
linkRefs = append(linkRefs, fromSQLBlock(p, keyword, beforeLen))
|
||||
processedParagraphs.Add(p.ID)
|
||||
}
|
||||
}
|
||||
for _, link := range links {
|
||||
for _, ref := range link.Refs {
|
||||
if "NodeParagraph" == ref.Type {
|
||||
if processedParagraphs.Contains(ref.ParentID) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
ref.DefID = link.ID
|
||||
ref.DefPath = link.Path
|
||||
|
||||
content := ref.Content
|
||||
if "" != keyword {
|
||||
_, content = search.MarkText(content, keyword, beforeLen, Conf.Search.CaseSensitive)
|
||||
ref.Content = content
|
||||
}
|
||||
linkRefs = append(linkRefs, ref)
|
||||
}
|
||||
}
|
||||
linkPaths = toSubTree(linkRefs, keyword)
|
||||
|
||||
mentions := buildTreeBackmention(sqlBlock, linkRefs, mentionKeyword, excludeBacklinkIDs, beforeLen)
|
||||
mentionsCount = len(mentions)
|
||||
mentionPaths = toFlatTree(mentions, 0, "backlink")
|
||||
return
|
||||
}
|
||||
|
||||
func buildTreeBackmention(defSQLBlock *sql.Block, refBlocks []*Block, keyword string, excludeBacklinkIDs *hashset.Set, beforeLen int) (ret []*Block) {
|
||||
ret = []*Block{}
|
||||
|
||||
var names, aliases []string
|
||||
var fName, rootID string
|
||||
if "d" == defSQLBlock.Type {
|
||||
if Conf.Search.BacklinkMentionName {
|
||||
names = sql.QueryBlockNamesByRootID(defSQLBlock.ID)
|
||||
}
|
||||
if Conf.Search.BacklinkMentionAlias {
|
||||
aliases = sql.QueryBlockAliases(defSQLBlock.ID)
|
||||
}
|
||||
if Conf.Search.BacklinkMentionDoc {
|
||||
fName = path.Base(defSQLBlock.HPath)
|
||||
}
|
||||
rootID = defSQLBlock.ID
|
||||
} else {
|
||||
if Conf.Search.BacklinkMentionName {
|
||||
if "" != defSQLBlock.Name {
|
||||
names = append(names, defSQLBlock.Name)
|
||||
}
|
||||
}
|
||||
if Conf.Search.BacklinkMentionAlias {
|
||||
if "" != defSQLBlock.Alias {
|
||||
aliases = strings.Split(defSQLBlock.Alias, ",")
|
||||
}
|
||||
}
|
||||
root := treenode.GetBlockTree(defSQLBlock.RootID)
|
||||
rootID = root.ID
|
||||
}
|
||||
|
||||
set := hashset.New()
|
||||
for _, name := range names {
|
||||
set.Add(name)
|
||||
}
|
||||
for _, alias := range aliases {
|
||||
set.Add(alias)
|
||||
}
|
||||
if "" != fName {
|
||||
set.Add(fName)
|
||||
}
|
||||
|
||||
if Conf.Search.BacklinkMentionAnchor {
|
||||
for _, refBlock := range refBlocks {
|
||||
refs := sql.QueryRefsByDefIDRefID(refBlock.DefID, refBlock.ID)
|
||||
for _, ref := range refs {
|
||||
set.Add(ref.Content)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var mentionKeywords []string
|
||||
for _, v := range set.Values() {
|
||||
mentionKeywords = append(mentionKeywords, v.(string))
|
||||
}
|
||||
ret = searchBackmention(mentionKeywords, keyword, excludeBacklinkIDs, rootID, beforeLen)
|
||||
return
|
||||
}
|
||||
|
||||
func searchBackmention(mentionKeywords []string, keyword string, excludeBacklinkIDs *hashset.Set, rootID string, beforeLen int) (ret []*Block) {
|
||||
ret = []*Block{}
|
||||
|
||||
if 1 > len(mentionKeywords) {
|
||||
return
|
||||
}
|
||||
sort.SliceStable(mentionKeywords, func(i, j int) bool {
|
||||
return len(mentionKeywords[i]) < len(mentionKeywords[j])
|
||||
})
|
||||
|
||||
table := "blocks_fts" // 大小写敏感
|
||||
if !Conf.Search.CaseSensitive {
|
||||
table = "blocks_fts_case_insensitive"
|
||||
}
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
buf.WriteString("SELECT * FROM " + table + " WHERE " + table + " MATCH '{content}:(")
|
||||
for i, mentionKeyword := range mentionKeywords {
|
||||
if 511 < i { // 提及搜索最大限制 https://github.com/siyuan-note/siyuan/issues/3715
|
||||
util.PushMsg(fmt.Sprintf(Conf.Language(38), len(mentionKeywords)), 5000)
|
||||
mentionKeyword = strings.ReplaceAll(mentionKeyword, "\"", "\"\"")
|
||||
buf.WriteString("\"" + mentionKeyword + "\"")
|
||||
break
|
||||
}
|
||||
|
||||
mentionKeyword = strings.ReplaceAll(mentionKeyword, "\"", "\"\"")
|
||||
buf.WriteString("\"" + mentionKeyword + "\"")
|
||||
if i < len(mentionKeywords)-1 {
|
||||
buf.WriteString(" OR ")
|
||||
}
|
||||
}
|
||||
buf.WriteString(")'")
|
||||
if "" != keyword {
|
||||
buf.WriteString(" AND MATCH '{content}:'")
|
||||
buf.WriteString("\"" + keyword + "\"")
|
||||
keyword = strings.ReplaceAll(keyword, "\"", "\"\"")
|
||||
}
|
||||
buf.WriteString(" AND root_id != '" + rootID + "'") // 不在定义块所在文档中搜索
|
||||
buf.WriteString(" AND type IN ('d', 'h', 'p', 't')")
|
||||
buf.WriteString(" ORDER BY id DESC LIMIT " + strconv.Itoa(Conf.Search.Limit))
|
||||
query := buf.String()
|
||||
|
||||
sqlBlocks := sql.SelectBlocksRawStmt(query, Conf.Search.Limit)
|
||||
blocks := fromSQLBlocks(&sqlBlocks, strings.Join(mentionKeywords, search.TermSep), beforeLen)
|
||||
|
||||
// 排除链接文本 https://github.com/siyuan-note/siyuan/issues/1542
|
||||
luteEngine := NewLute()
|
||||
var tmp []*Block
|
||||
for _, b := range blocks {
|
||||
tree := parse.Parse("", gulu.Str.ToBytes(b.Markdown), luteEngine.ParseOptions)
|
||||
if nil == tree {
|
||||
continue
|
||||
}
|
||||
|
||||
textBuf := &bytes.Buffer{}
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || n.IsBlock() {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if ast.NodeText == n.Type || ast.NodeLinkText == n.Type {
|
||||
textBuf.Write(n.Tokens)
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
text := textBuf.String()
|
||||
text = strings.ToLower(text)
|
||||
var contain bool
|
||||
for _, mentionKeyword := range mentionKeywords {
|
||||
if strings.Contains(text, strings.ToLower(mentionKeyword)) {
|
||||
contain = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if contain {
|
||||
tmp = append(tmp, b)
|
||||
}
|
||||
}
|
||||
blocks = tmp
|
||||
|
||||
mentionBlockMap := map[string]*Block{}
|
||||
for _, block := range blocks {
|
||||
mentionBlockMap[block.ID] = block
|
||||
|
||||
refText := getContainStr(block.Content, mentionKeywords)
|
||||
block.RefText = refText
|
||||
}
|
||||
|
||||
for _, mentionBlock := range mentionBlockMap {
|
||||
if !excludeBacklinkIDs.Contains(mentionBlock.ID) {
|
||||
ret = append(ret, mentionBlock)
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(ret, func(i, j int) bool {
|
||||
return ret[i].ID > ret[j].ID
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func getContainStr(str string, strs []string) string {
|
||||
str = strings.ToLower(str)
|
||||
for _, s := range strs {
|
||||
if strings.Contains(str, strings.ToLower(s)) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// buildFullLinks 构建正向和反向链接列表。
|
||||
// forwardlinks:正向链接关系 refs
|
||||
// backlinks:反向链接关系 defs
|
||||
func buildFullLinks(condition string) (forwardlinks, backlinks []*Block) {
|
||||
forwardlinks, backlinks = []*Block{}, []*Block{}
|
||||
defs := buildDefsAndRefs(condition)
|
||||
backlinks = append(backlinks, defs...)
|
||||
for _, def := range defs {
|
||||
for _, ref := range def.Refs {
|
||||
forwardlinks = append(forwardlinks, ref)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func buildDefsAndRefs(condition string) (defBlocks []*Block) {
|
||||
defBlockMap := map[string]*Block{}
|
||||
refBlockMap := map[string]*Block{}
|
||||
defRefs := sql.DefRefs(condition)
|
||||
|
||||
// 将 sql block 转为 block
|
||||
for _, row := range defRefs {
|
||||
for def, ref := range row {
|
||||
if nil == ref {
|
||||
continue
|
||||
}
|
||||
|
||||
refBlock := refBlockMap[ref.ID]
|
||||
if nil == refBlock {
|
||||
refBlock = fromSQLBlock(ref, "", 0)
|
||||
refBlockMap[ref.ID] = refBlock
|
||||
}
|
||||
|
||||
// ref 块自己也需要作为定义块,否则图上没有节点
|
||||
if defBlock := defBlockMap[ref.ID]; nil == defBlock {
|
||||
defBlockMap[ref.ID] = refBlock
|
||||
}
|
||||
|
||||
if defBlock := defBlockMap[def.ID]; nil == defBlock {
|
||||
defBlock = fromSQLBlock(def, "", 0)
|
||||
defBlockMap[def.ID] = defBlock
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 组装 block.Defs 和 block.Refs 字段
|
||||
for _, row := range defRefs {
|
||||
for def, ref := range row {
|
||||
if nil == ref {
|
||||
defBlock := fromSQLBlock(def, "", 0)
|
||||
defBlockMap[def.ID] = defBlock
|
||||
continue
|
||||
}
|
||||
|
||||
refBlock := refBlockMap[ref.ID]
|
||||
defBlock := defBlockMap[def.ID]
|
||||
if refBlock.ID == defBlock.ID { // 自引用
|
||||
continue
|
||||
}
|
||||
|
||||
refBlock.Defs = append(refBlock.Defs, defBlock)
|
||||
defBlock.Refs = append(defBlock.Refs, refBlock)
|
||||
}
|
||||
}
|
||||
|
||||
for _, def := range defBlockMap {
|
||||
defBlocks = append(defBlocks, def)
|
||||
}
|
||||
return
|
||||
}
|
||||
601
kernel/model/backup.go
Normal file
601
kernel/model/backup.go
Normal file
|
|
@ -0,0 +1,601 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/siyuan-note/encryption"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
type Backup struct {
|
||||
Size int64 `json:"size"`
|
||||
HSize string `json:"hSize"`
|
||||
Updated string `json:"updated"`
|
||||
SaveDir string `json:"saveDir"` // 本地备份数据存放目录路径
|
||||
}
|
||||
|
||||
type Sync struct {
|
||||
Size int64 `json:"size"`
|
||||
HSize string `json:"hSize"`
|
||||
Updated string `json:"updated"`
|
||||
CloudName string `json:"cloudName"` // 云端同步数据存放目录名
|
||||
SaveDir string `json:"saveDir"` // 本地同步数据存放目录路径
|
||||
}
|
||||
|
||||
func RemoveCloudBackup() (err error) {
|
||||
err = removeCloudDirPath("backup")
|
||||
return
|
||||
}
|
||||
|
||||
func getCloudAvailableBackupSize() (size int64, err error) {
|
||||
var sync map[string]interface{}
|
||||
var assetSize int64
|
||||
sync, _, assetSize, err = getCloudSpaceOSS()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
var syncSize int64
|
||||
if nil != sync {
|
||||
syncSize = int64(sync["size"].(float64))
|
||||
}
|
||||
size = int64(Conf.User.UserSiYuanRepoSize) - syncSize - assetSize
|
||||
return
|
||||
}
|
||||
|
||||
func GetCloudSpace() (s *Sync, b *Backup, hSize, hAssetSize, hTotalSize string, err error) {
|
||||
var sync, backup map[string]interface{}
|
||||
var assetSize int64
|
||||
sync, backup, assetSize, err = getCloudSpaceOSS()
|
||||
if nil != err {
|
||||
return nil, nil, "", "", "", errors.New(Conf.Language(30) + " " + err.Error())
|
||||
}
|
||||
|
||||
var totalSize, syncSize, backupSize int64
|
||||
var syncUpdated, backupUpdated string
|
||||
if nil != sync {
|
||||
syncSize = int64(sync["size"].(float64))
|
||||
syncUpdated = sync["updated"].(string)
|
||||
}
|
||||
s = &Sync{
|
||||
Size: syncSize,
|
||||
HSize: humanize.Bytes(uint64(syncSize)),
|
||||
Updated: syncUpdated,
|
||||
}
|
||||
|
||||
if nil != backup {
|
||||
backupSize = int64(backup["size"].(float64))
|
||||
backupUpdated = backup["updated"].(string)
|
||||
}
|
||||
b = &Backup{
|
||||
Size: backupSize,
|
||||
HSize: humanize.Bytes(uint64(backupSize)),
|
||||
Updated: backupUpdated,
|
||||
}
|
||||
totalSize = syncSize + backupSize + assetSize
|
||||
hAssetSize = humanize.Bytes(uint64(assetSize))
|
||||
hSize = humanize.Bytes(uint64(totalSize))
|
||||
hTotalSize = byteCountSI(int64(Conf.User.UserSiYuanRepoSize))
|
||||
return
|
||||
}
|
||||
|
||||
func byteCountSI(b int64) string {
|
||||
const unit = 1000
|
||||
if b < unit {
|
||||
return fmt.Sprintf("%d B", b)
|
||||
}
|
||||
div, exp := int64(unit), 0
|
||||
for n := b / unit; n >= unit; n /= unit {
|
||||
div *= unit
|
||||
exp++
|
||||
}
|
||||
return fmt.Sprintf("%.1f %cB", float64(b)/float64(div), "kMGTPE"[exp])
|
||||
}
|
||||
|
||||
func GetLocalBackup() (ret *Backup, err error) {
|
||||
backupDir := Conf.Backup.GetSaveDir()
|
||||
if err = os.MkdirAll(backupDir, 0755); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
backup, err := os.Stat(backupDir)
|
||||
ret = &Backup{
|
||||
Updated: backup.ModTime().Format("2006-01-02 15:04:05"),
|
||||
SaveDir: Conf.Backup.GetSaveDir(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RecoverLocalBackup() (err error) {
|
||||
if "" == Conf.E2EEPasswd {
|
||||
return errors.New(Conf.Language(11))
|
||||
}
|
||||
|
||||
data := util.AESDecrypt(Conf.E2EEPasswd)
|
||||
data, _ = hex.DecodeString(string(data))
|
||||
passwd := string(data)
|
||||
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
CloseWatchAssets()
|
||||
defer WatchAssets()
|
||||
|
||||
// 使用备份恢复时自动暂停同步,避免刚刚恢复后的数据又被同步覆盖 https://github.com/siyuan-note/siyuan/issues/4773
|
||||
syncEnabled := Conf.Sync.Enabled
|
||||
Conf.Sync.Enabled = false
|
||||
Conf.Save()
|
||||
|
||||
filesys.ReleaseAllFileLocks()
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(63))
|
||||
util.LogInfof("starting recovery...")
|
||||
start := time.Now()
|
||||
|
||||
decryptedDataDir, err := decryptDataDir(passwd)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
newDataDir := filepath.Join(util.WorkspaceDir, "data.new")
|
||||
os.RemoveAll(newDataDir)
|
||||
if err = os.MkdirAll(newDataDir, 0755); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
|
||||
if err = stableCopy(decryptedDataDir, newDataDir); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
|
||||
oldDataDir := filepath.Join(util.WorkspaceDir, "data.old")
|
||||
if err = os.RemoveAll(oldDataDir); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
|
||||
// 备份恢复时生成历史 https://github.com/siyuan-note/siyuan/issues/4752
|
||||
if gulu.File.IsExist(util.DataDir) {
|
||||
var historyDir string
|
||||
historyDir, err = util.GetHistoryDir("backup")
|
||||
if nil != err {
|
||||
util.LogErrorf("get history dir failed: %s", err)
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
|
||||
var dirs []os.DirEntry
|
||||
dirs, err = os.ReadDir(util.DataDir)
|
||||
if nil != err {
|
||||
util.LogErrorf("read dir [%s] failed: %s", util.DataDir, err)
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
from := filepath.Join(util.DataDir, dir.Name())
|
||||
to := filepath.Join(historyDir, dir.Name())
|
||||
if err = os.Rename(from, to); nil != err {
|
||||
util.LogErrorf("rename [%s] to [%s] failed: %s", from, to, err)
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gulu.File.IsExist(util.DataDir) {
|
||||
if err = os.RemoveAll(util.DataDir); nil != err {
|
||||
util.LogErrorf("remove [%s] failed: %s", util.DataDir, err)
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = os.Rename(newDataDir, util.DataDir); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
util.LogErrorf("rename data dir from [%s] to [%s] failed: %s", newDataDir, util.DataDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
elapsed := time.Now().Sub(start).Seconds()
|
||||
size, _ := util.SizeOfDirectory(util.DataDir, false)
|
||||
sizeStr := humanize.Bytes(uint64(size))
|
||||
util.LogInfof("recovered backup [size=%s] in [%.2fs]", sizeStr, elapsed)
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(62))
|
||||
time.Sleep(2 * time.Second)
|
||||
refreshFileTree()
|
||||
if syncEnabled {
|
||||
func() {
|
||||
time.Sleep(5 * time.Second)
|
||||
util.PushMsg(Conf.Language(134), 7000)
|
||||
}()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CreateLocalBackup() (err error) {
|
||||
if "" == Conf.E2EEPasswd {
|
||||
return errors.New(Conf.Language(11))
|
||||
}
|
||||
|
||||
defer util.ClearPushProgress(100)
|
||||
util.PushEndlessProgress(Conf.Language(22))
|
||||
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
filesys.ReleaseAllFileLocks()
|
||||
|
||||
util.LogInfof("creating backup...")
|
||||
start := time.Now()
|
||||
data := util.AESDecrypt(Conf.E2EEPasswd)
|
||||
data, _ = hex.DecodeString(string(data))
|
||||
passwd := string(data)
|
||||
encryptedDataDir, err := encryptDataDir(passwd)
|
||||
if nil != err {
|
||||
util.LogErrorf("encrypt data dir failed: %s", err)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(23), formatErrorMsg(err)))
|
||||
return
|
||||
}
|
||||
|
||||
newBackupDir := Conf.Backup.GetSaveDir() + ".new"
|
||||
os.RemoveAll(newBackupDir)
|
||||
if err = os.MkdirAll(newBackupDir, 0755); nil != err {
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(23), formatErrorMsg(err)))
|
||||
return
|
||||
}
|
||||
|
||||
if err = stableCopy(encryptedDataDir, newBackupDir); nil != err {
|
||||
util.LogErrorf("copy encrypted data dir from [%s] to [%s] failed: %s", encryptedDataDir, newBackupDir, err)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(23), formatErrorMsg(err)))
|
||||
return
|
||||
}
|
||||
|
||||
err = genCloudIndex(newBackupDir, map[string]bool{})
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
conf := map[string]interface{}{"updated": time.Now().UnixMilli()}
|
||||
data, err = gulu.JSON.MarshalJSON(conf)
|
||||
if nil != err {
|
||||
util.LogErrorf("marshal backup conf.json failed: %s", err)
|
||||
} else {
|
||||
confPath := filepath.Join(newBackupDir, "conf.json")
|
||||
if err = os.WriteFile(confPath, data, 0644); nil != err {
|
||||
util.LogErrorf("write backup conf.json [%s] failed: %s", confPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
oldBackupDir := Conf.Backup.GetSaveDir() + ".old"
|
||||
os.RemoveAll(oldBackupDir)
|
||||
|
||||
backupDir := Conf.Backup.GetSaveDir()
|
||||
if gulu.File.IsExist(backupDir) {
|
||||
if err = os.Rename(backupDir, oldBackupDir); nil != err {
|
||||
util.LogErrorf("rename backup dir from [%s] to [%s] failed: %s", backupDir, oldBackupDir, err)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(23), formatErrorMsg(err)))
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = os.Rename(newBackupDir, backupDir); nil != err {
|
||||
util.LogErrorf("rename backup dir from [%s] to [%s] failed: %s", newBackupDir, backupDir, err)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(23), formatErrorMsg(err)))
|
||||
return
|
||||
}
|
||||
os.RemoveAll(oldBackupDir)
|
||||
elapsed := time.Now().Sub(start).Seconds()
|
||||
size, _ := util.SizeOfDirectory(backupDir, false)
|
||||
sizeStr := humanize.Bytes(uint64(size))
|
||||
util.LogInfof("created backup [size=%s] in [%.2fs]", sizeStr, elapsed)
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(21))
|
||||
time.Sleep(2 * time.Second)
|
||||
return
|
||||
}
|
||||
|
||||
func DownloadBackup() (err error) {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
// 使用索引文件进行解密验证 https://github.com/siyuan-note/siyuan/issues/3789
|
||||
var tmpFetchedFiles int
|
||||
var tmpTransferSize uint64
|
||||
err = ossDownload0(util.TempDir+"/backup", "backup", "/"+pathJSON, &tmpFetchedFiles, &tmpTransferSize, false)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
data, err := os.ReadFile(filepath.Join(util.TempDir, "/backup/"+pathJSON))
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
passwdData, _ := hex.DecodeString(string(util.AESDecrypt(Conf.E2EEPasswd)))
|
||||
passwd := string(passwdData)
|
||||
data, err = encryption.AESGCMDecryptBinBytes(data, passwd)
|
||||
if nil != err {
|
||||
err = errors.New(Conf.Language(28))
|
||||
return
|
||||
}
|
||||
|
||||
localDirPath := Conf.Backup.GetSaveDir()
|
||||
util.PushEndlessProgress(Conf.Language(68))
|
||||
start := time.Now()
|
||||
fetchedFiles, transferSize, err := ossDownload(localDirPath, "backup", false)
|
||||
if nil == err {
|
||||
elapsed := time.Now().Sub(start).Seconds()
|
||||
util.LogInfof("downloaded backup [fetchedFiles=%d, transferSize=%s] in [%.2fs]", fetchedFiles, humanize.Bytes(transferSize), elapsed)
|
||||
util.PushEndlessProgress(Conf.Language(69))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func UploadBackup() (err error) {
|
||||
defer util.ClearPushProgress(100)
|
||||
|
||||
if err = checkUploadBackup(); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
localDirPath := Conf.Backup.GetSaveDir()
|
||||
util.PushEndlessProgress(Conf.Language(61))
|
||||
util.LogInfof("uploading backup...")
|
||||
start := time.Now()
|
||||
wroteFiles, transferSize, err := ossUpload(localDirPath, "backup", "", false)
|
||||
if nil == err {
|
||||
elapsed := time.Now().Sub(start).Seconds()
|
||||
util.LogInfof("uploaded backup [wroteFiles=%d, transferSize=%s] in [%.2fs]", wroteFiles, humanize.Bytes(transferSize), elapsed)
|
||||
util.PushEndlessProgress(Conf.Language(41))
|
||||
time.Sleep(2 * time.Second)
|
||||
return
|
||||
}
|
||||
err = errors.New(formatErrorMsg(err))
|
||||
return
|
||||
}
|
||||
|
||||
var pathJSON = fmt.Sprintf("%x", md5.Sum([]byte("paths.json"))) // 6952277a5a37c17aa6a7c6d86cd507b1
|
||||
|
||||
func encryptDataDir(passwd string) (encryptedDataDir string, err error) {
|
||||
encryptedDataDir = filepath.Join(util.WorkspaceDir, "incremental", "backup-encrypt")
|
||||
if err = os.RemoveAll(encryptedDataDir); nil != err {
|
||||
return
|
||||
}
|
||||
if err = os.MkdirAll(encryptedDataDir, 0755); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
ctime := map[string]time.Time{}
|
||||
metaJSON := map[string]string{}
|
||||
filepath.Walk(util.DataDir, func(path string, info fs.FileInfo, _ error) error {
|
||||
if util.DataDir == path {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isCloudSkipFile(path, info) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
plainP := strings.TrimPrefix(path, util.DataDir+string(os.PathSeparator))
|
||||
p := plainP
|
||||
parts := strings.Split(p, string(os.PathSeparator))
|
||||
buf := bytes.Buffer{}
|
||||
for i, part := range parts {
|
||||
buf.WriteString(fmt.Sprintf("%x", sha256.Sum256([]byte(part)))[:7])
|
||||
if i < len(parts)-1 {
|
||||
buf.WriteString(string(os.PathSeparator))
|
||||
}
|
||||
}
|
||||
p = buf.String()
|
||||
metaJSON[filepath.ToSlash(p)] = filepath.ToSlash(plainP)
|
||||
p = encryptedDataDir + string(os.PathSeparator) + p
|
||||
|
||||
if info.IsDir() {
|
||||
if err = os.MkdirAll(p, 0755); nil != err {
|
||||
return io.EOF
|
||||
}
|
||||
if fi, err0 := os.Stat(path); nil == err0 {
|
||||
ctime[p] = fi.ModTime()
|
||||
}
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Dir(p), 0755); nil != err {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
f, err0 := os.Create(p)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("create file [%s] failed: %s", p, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
data, err0 := os.ReadFile(path)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("read file [%s] failed: %s", path, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
data, err0 = encryption.AESGCMEncryptBinBytes(data, passwd)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("encrypt file [%s] failed: %s", path, err0)
|
||||
err = errors.New("encrypt file failed")
|
||||
return io.EOF
|
||||
}
|
||||
if _, err0 = f.Write(data); nil != err0 {
|
||||
util.LogErrorf("write file [%s] failed: %s", p, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
if err0 = f.Close(); nil != err0 {
|
||||
util.LogErrorf("close file [%s] failed: %s", p, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
fi, err0 := os.Stat(path)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("stat file [%s] failed: %s", path, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
ctime[p] = fi.ModTime()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for p, t := range ctime {
|
||||
if err = os.Chtimes(p, t, t); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 检查文件是否全部已经编入索引
|
||||
err = filepath.Walk(encryptedDataDir, func(path string, info fs.FileInfo, _ error) error {
|
||||
if encryptedDataDir == path {
|
||||
return nil
|
||||
}
|
||||
|
||||
path = strings.TrimPrefix(path, encryptedDataDir+string(os.PathSeparator))
|
||||
path = filepath.ToSlash(path)
|
||||
if _, ok := metaJSON[path]; !ok {
|
||||
util.LogErrorf("not found backup path in meta [%s]", path)
|
||||
return errors.New(Conf.Language(27))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := gulu.JSON.MarshalJSON(metaJSON)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
data, err = encryption.AESGCMEncryptBinBytes(data, passwd)
|
||||
if nil != err {
|
||||
return "", errors.New("encrypt file failed")
|
||||
}
|
||||
meta := filepath.Join(encryptedDataDir, pathJSON)
|
||||
if err = gulu.File.WriteFileSafer(meta, data, 0644); nil != err {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func decryptDataDir(passwd string) (decryptedDataDir string, err error) {
|
||||
decryptedDataDir = filepath.Join(util.WorkspaceDir, "incremental", "backup-decrypt")
|
||||
if err = os.RemoveAll(decryptedDataDir); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
backupDir := Conf.Backup.GetSaveDir()
|
||||
meta := filepath.Join(backupDir, pathJSON)
|
||||
data, err := os.ReadFile(meta)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
data, err = encryption.AESGCMDecryptBinBytes(data, passwd)
|
||||
if nil != err {
|
||||
return "", errors.New(Conf.Language(40))
|
||||
}
|
||||
|
||||
metaJSON := map[string]string{}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &metaJSON); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
modTimes := map[string]time.Time{}
|
||||
err = filepath.Walk(backupDir, func(path string, info fs.FileInfo, _ error) error {
|
||||
if backupDir == path || pathJSON == info.Name() || strings.HasSuffix(info.Name(), ".json") {
|
||||
return nil
|
||||
}
|
||||
|
||||
encryptedP := strings.TrimPrefix(path, backupDir+string(os.PathSeparator))
|
||||
encryptedP = filepath.ToSlash(encryptedP)
|
||||
plainP := filepath.Join(decryptedDataDir, metaJSON[encryptedP])
|
||||
plainP = filepath.FromSlash(plainP)
|
||||
|
||||
if info.IsDir() {
|
||||
if err = os.MkdirAll(plainP, 0755); nil != err {
|
||||
return io.EOF
|
||||
}
|
||||
} else {
|
||||
if err = os.MkdirAll(filepath.Dir(plainP), 0755); nil != err {
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
var err0 error
|
||||
data, err0 = os.ReadFile(path)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("read file [%s] failed: %s", path, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
data, err0 = encryption.AESGCMDecryptBinBytes(data, passwd)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("decrypt file [%s] failed: %s", path, err0)
|
||||
err = errors.New(Conf.Language(40))
|
||||
return io.EOF
|
||||
}
|
||||
if err0 = os.WriteFile(plainP, data, 0644); nil != err0 {
|
||||
util.LogErrorf("write file [%s] failed: %s", plainP, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
}
|
||||
|
||||
fi, err0 := os.Stat(path)
|
||||
if nil != err0 {
|
||||
util.LogErrorf("stat file [%s] failed: %s", path, err0)
|
||||
err = err0
|
||||
return io.EOF
|
||||
}
|
||||
modTimes[plainP] = fi.ModTime()
|
||||
return nil
|
||||
})
|
||||
|
||||
for plainP, modTime := range modTimes {
|
||||
if err = os.Chtimes(plainP, modTime, modTime); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
222
kernel/model/bazzar.go
Normal file
222
kernel/model/bazzar.go
Normal file
|
|
@ -0,0 +1,222 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
|
||||
"github.com/siyuan-note/siyuan/kernel/bazaar"
|
||||
)
|
||||
|
||||
func GetPackageREADME(repoURL, repoHash string) (ret string) {
|
||||
ret = bazaar.GetPackageREADME(repoURL, repoHash, Conf.System.NetworkProxy.String(), IsSubscriber(), Conf.System.ID)
|
||||
return
|
||||
}
|
||||
|
||||
func BazaarWidgets() (widgets []*bazaar.Widget) {
|
||||
widgets = bazaar.Widgets(Conf.System.NetworkProxy.String())
|
||||
for _, widget := range widgets {
|
||||
widget.Installed = gulu.File.IsDir(filepath.Join(util.DataDir, "widgets", widget.Name))
|
||||
if widget.Installed {
|
||||
if widget.Installed {
|
||||
if widgetConf, err := widgetJSON(widget.Name); nil == err && nil != widget {
|
||||
if widget.Version != widgetConf["version"].(string) {
|
||||
widget.Outdated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func InstallBazaarWidget(repoURL, repoHash, widgetName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
installPath := filepath.Join(util.DataDir, "widgets", widgetName)
|
||||
err := bazaar.InstallWidget(repoURL, repoHash, installPath, Conf.System.NetworkProxy.String(), IsSubscriber(), Conf.System.ID)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(46), widgetName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UninstallBazaarWidget(widgetName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
installPath := filepath.Join(util.DataDir, "widgets", widgetName)
|
||||
err := bazaar.UninstallWidget(installPath)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(47), err.Error()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func BazaarIcons() (icons []*bazaar.Icon) {
|
||||
icons = bazaar.Icons(Conf.System.NetworkProxy.String())
|
||||
for _, installed := range Conf.Appearance.Icons {
|
||||
for _, icon := range icons {
|
||||
if installed == icon.Name {
|
||||
icon.Installed = true
|
||||
if themeConf, err := iconJSON(icon.Name); nil == err {
|
||||
if icon.Version != themeConf["version"].(string) {
|
||||
icon.Outdated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
icon.Current = icon.Name == Conf.Appearance.Icon
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func InstallBazaarIcon(repoURL, repoHash, iconName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
installPath := filepath.Join(util.IconsPath, iconName)
|
||||
err := bazaar.InstallIcon(repoURL, repoHash, installPath, Conf.System.NetworkProxy.String(), IsSubscriber(), Conf.System.ID)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(46), iconName))
|
||||
}
|
||||
Conf.Appearance.Icon = iconName
|
||||
Conf.Save()
|
||||
InitAppearance()
|
||||
return nil
|
||||
}
|
||||
|
||||
func UninstallBazaarIcon(iconName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
installPath := filepath.Join(util.IconsPath, iconName)
|
||||
err := bazaar.UninstallIcon(installPath)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(47), err.Error()))
|
||||
}
|
||||
|
||||
InitAppearance()
|
||||
return nil
|
||||
}
|
||||
|
||||
func BazaarThemes() (ret []*bazaar.Theme) {
|
||||
ret = bazaar.Themes(Conf.System.NetworkProxy.String())
|
||||
installs := Conf.Appearance.DarkThemes
|
||||
installs = append(installs, Conf.Appearance.LightThemes...)
|
||||
for _, installed := range installs {
|
||||
for _, theme := range ret {
|
||||
if installed == theme.Name {
|
||||
theme.Installed = true
|
||||
if themeConf, err := themeJSON(theme.Name); nil == err {
|
||||
theme.Outdated = theme.Version != themeConf["version"].(string)
|
||||
}
|
||||
theme.Current = theme.Name == Conf.Appearance.ThemeDark || theme.Name == Conf.Appearance.ThemeLight
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func InstallBazaarTheme(repoURL, repoHash, themeName string, mode int, update bool) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
closeThemeWatchers()
|
||||
|
||||
installPath := filepath.Join(util.ThemesPath, themeName)
|
||||
err := bazaar.InstallTheme(repoURL, repoHash, installPath, Conf.System.NetworkProxy.String(), IsSubscriber(), Conf.System.ID)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(46), themeName))
|
||||
}
|
||||
|
||||
if !update {
|
||||
// 更新主题后不需要对该主题进行切换 https://github.com/siyuan-note/siyuan/issues/4966
|
||||
if 0 == mode {
|
||||
Conf.Appearance.ThemeLight = themeName
|
||||
} else {
|
||||
Conf.Appearance.ThemeDark = themeName
|
||||
}
|
||||
Conf.Appearance.Mode = mode
|
||||
Conf.Appearance.ThemeJS = gulu.File.IsExist(filepath.Join(installPath, "theme.js"))
|
||||
Conf.Save()
|
||||
}
|
||||
|
||||
InitAppearance()
|
||||
return nil
|
||||
}
|
||||
|
||||
func UninstallBazaarTheme(themeName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
closeThemeWatchers()
|
||||
|
||||
installPath := filepath.Join(util.ThemesPath, themeName)
|
||||
err := bazaar.UninstallTheme(installPath)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(47), err.Error()))
|
||||
}
|
||||
|
||||
InitAppearance()
|
||||
return nil
|
||||
}
|
||||
|
||||
func BazaarTemplates() (templates []*bazaar.Template) {
|
||||
templates = bazaar.Templates(Conf.System.NetworkProxy.String())
|
||||
for _, template := range templates {
|
||||
template.Installed = gulu.File.IsExist(filepath.Join(util.DataDir, "templates", template.Name))
|
||||
if template.Installed {
|
||||
if themeConf, err := templateJSON(template.Name); nil == err && nil != themeConf {
|
||||
if template.Version != themeConf["version"].(string) {
|
||||
template.Outdated = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func InstallBazaarTemplate(repoURL, repoHash, templateName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
installPath := filepath.Join(util.DataDir, "templates", templateName)
|
||||
err := bazaar.InstallTemplate(repoURL, repoHash, installPath, Conf.System.NetworkProxy.String(), IsSubscriber(), Conf.System.ID)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(46), templateName))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func UninstallBazaarTemplate(templateName string) error {
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
installPath := filepath.Join(util.DataDir, "templates", templateName)
|
||||
err := bazaar.UninstallTemplate(installPath)
|
||||
if nil != err {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(47), err.Error()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
179
kernel/model/block.go
Normal file
179
kernel/model/block.go
Normal file
|
|
@ -0,0 +1,179 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"github.com/88250/lute"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
)
|
||||
|
||||
// Block 描述了内容块。
|
||||
type Block struct {
|
||||
Box string `json:"box"`
|
||||
Path string `json:"path"`
|
||||
HPath string `json:"hPath"`
|
||||
ID string `json:"id"`
|
||||
RootID string `json:"rootID"`
|
||||
ParentID string `json:"parentID"`
|
||||
Name string `json:"name"`
|
||||
Alias string `json:"alias"`
|
||||
Memo string `json:"memo"`
|
||||
Tag string `json:"tag"`
|
||||
Content string `json:"content"`
|
||||
FContent string `json:"fcontent"`
|
||||
Markdown string `json:"markdown"`
|
||||
Folded bool `json:"folded"`
|
||||
Type string `json:"type"`
|
||||
SubType string `json:"subType"`
|
||||
RefText string `json:"refText"`
|
||||
Defs []*Block `json:"-"` // 当前块引用了这些块,避免序列化 JSON 时产生循环引用
|
||||
Refs []*Block `json:"refs"` // 当前块被这些块引用
|
||||
DefID string `json:"defID"`
|
||||
DefPath string `json:"defPath"`
|
||||
IAL map[string]string `json:"ial"`
|
||||
Children []*Block `json:"children"`
|
||||
Depth int `json:"depth"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
func (block *Block) IsContainerBlock() bool {
|
||||
switch block.Type {
|
||||
case "NodeDocument", "NodeBlockquote", "NodeList", "NodeListItem", "NodeSuperBlock":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Path struct {
|
||||
ID string `json:"id"` // 块 ID
|
||||
Box string `json:"box"` // 块 Box
|
||||
Name string `json:"name"` // 当前路径
|
||||
Full string `json:"full"` // 全路径
|
||||
Type string `json:"type"` // "path"
|
||||
NodeType string `json:"nodeType"` // 节点类型
|
||||
SubType string `json:"subType"` // 节点子类型
|
||||
Blocks []*Block `json:"blocks"` // 子块节点
|
||||
Children []*Path `json:"children"` // 子路径节点
|
||||
Depth int `json:"depth"` // 层级深度
|
||||
Count int `json:"count"` // 子块计数
|
||||
}
|
||||
|
||||
func RecentUpdatedBlocks() (ret []*Block) {
|
||||
ret = []*Block{}
|
||||
|
||||
sqlBlocks := sql.QueryRecentUpdatedBlocks()
|
||||
if 1 > len(sqlBlocks) {
|
||||
return
|
||||
}
|
||||
|
||||
ret = fromSQLBlocks(&sqlBlocks, "", 0)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBlockDOM(id string) (ret string) {
|
||||
if "" == id {
|
||||
return
|
||||
}
|
||||
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
luteEngine := NewLute()
|
||||
ret = lute.RenderNodeBlockDOM(node, luteEngine.ParseOptions, luteEngine.RenderOptions)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBlock(id string) (ret *Block, err error) {
|
||||
ret, err = getBlock(id)
|
||||
return
|
||||
}
|
||||
|
||||
func getBlock(id string) (ret *Block, err error) {
|
||||
if "" == id {
|
||||
return
|
||||
}
|
||||
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
sqlBlock := sql.BuildBlockFromNode(node, tree)
|
||||
if nil == sqlBlock {
|
||||
return
|
||||
}
|
||||
ret = fromSQLBlock(sqlBlock, "", 0)
|
||||
return
|
||||
}
|
||||
|
||||
func getBlockRendered(id string, headingMode int) (ret *Block) {
|
||||
tree, _ := loadTreeByBlockID(id)
|
||||
if nil == tree {
|
||||
return
|
||||
}
|
||||
def := treenode.GetNodeInTree(tree, id)
|
||||
if nil == def {
|
||||
return
|
||||
}
|
||||
|
||||
var unlinks, nodes []*ast.Node
|
||||
ast.Walk(def, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeHeading == n.Type {
|
||||
if "1" == n.IALAttr("fold") {
|
||||
children := treenode.FoldedHeadingChildren(n)
|
||||
for _, c := range children {
|
||||
unlinks = append(unlinks, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
for _, n := range unlinks {
|
||||
n.Unlink()
|
||||
}
|
||||
nodes = append(nodes, def)
|
||||
if 0 == headingMode && ast.NodeHeading == def.Type && "1" != def.IALAttr("fold") {
|
||||
children := treenode.HeadingChildren(def)
|
||||
for _, c := range children {
|
||||
if "1" == c.IALAttr("heading-fold") {
|
||||
// 嵌入块包含折叠标题时不应该显示其下方块 https://github.com/siyuan-note/siyuan/issues/4765
|
||||
continue
|
||||
}
|
||||
nodes = append(nodes, c)
|
||||
}
|
||||
}
|
||||
|
||||
b := treenode.GetBlockTree(def.ID)
|
||||
if nil == b {
|
||||
return
|
||||
}
|
||||
|
||||
luteEngine := NewLute()
|
||||
luteEngine.RenderOptions.ProtyleContenteditable = false // 不可编辑
|
||||
dom := renderBlockDOMByNodes(nodes, luteEngine)
|
||||
ret = &Block{Box: def.Box, Path: def.Path, HPath: b.HPath, ID: def.ID, Type: def.Type.String(), Content: dom}
|
||||
return
|
||||
}
|
||||
186
kernel/model/blockial.go
Normal file
186
kernel/model/blockial.go
Normal file
|
|
@ -0,0 +1,186 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/88250/lute/lex"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/araddon/dateparse"
|
||||
"github.com/siyuan-note/siyuan/kernel/cache"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func SetBlockReminder(id string, timed string) (err error) {
|
||||
if !IsSubscriber() {
|
||||
if "ios" == util.Container {
|
||||
return errors.New(Conf.Language(122))
|
||||
}
|
||||
return errors.New(Conf.Language(29))
|
||||
}
|
||||
|
||||
var timedMills int64
|
||||
if "0" != timed {
|
||||
t, e := dateparse.ParseIn(timed, time.Now().Location())
|
||||
if nil != e {
|
||||
return e
|
||||
}
|
||||
timedMills = t.UnixMilli()
|
||||
}
|
||||
|
||||
attrs := GetBlockAttrs(id) // 获取属性是会等待树写入
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(15), id))
|
||||
}
|
||||
|
||||
if ast.NodeDocument != node.Type && node.IsContainerBlock() {
|
||||
node = treenode.FirstLeafBlock(node)
|
||||
}
|
||||
content := treenode.NodeStaticContent(node)
|
||||
content = gulu.Str.SubStr(content, 128)
|
||||
err = SetCloudBlockReminder(id, content, timedMills)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
attrName := "custom-reminder-wechat"
|
||||
if "0" == timed {
|
||||
delete(attrs, attrName)
|
||||
old := node.IALAttr(attrName)
|
||||
oldTimedMills, e := dateparse.ParseIn(old, time.Now().Location())
|
||||
if nil == e {
|
||||
util.PushMsg(fmt.Sprintf(Conf.Language(109), oldTimedMills.Format("2006-01-02 15:04")), 3000)
|
||||
}
|
||||
node.RemoveIALAttr(attrName)
|
||||
} else {
|
||||
attrs[attrName] = timed
|
||||
node.SetIALAttr(attrName, timed)
|
||||
util.PushMsg(fmt.Sprintf(Conf.Language(101), time.UnixMilli(timedMills).Format("2006-01-02 15:04")), 5000)
|
||||
}
|
||||
if err = indexWriteJSONQueue(tree); nil != err {
|
||||
return
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
cache.PutBlockIAL(id, attrs)
|
||||
return
|
||||
}
|
||||
|
||||
func SetBlockAttrs(id string, nameValues map[string]string) (err error) {
|
||||
WaitForWritingFiles()
|
||||
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(15), id))
|
||||
}
|
||||
|
||||
for name, _ := range nameValues {
|
||||
for i := 0; i < len(name); i++ {
|
||||
if !lex.IsASCIILetterNumHyphen(name[i]) {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(25), id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for name, value := range nameValues {
|
||||
if "" == value {
|
||||
node.RemoveIALAttr(name)
|
||||
} else {
|
||||
node.SetIALAttr(name, html.EscapeAttrVal(value))
|
||||
}
|
||||
}
|
||||
|
||||
if err = indexWriteJSONQueue(tree); nil != err {
|
||||
return
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
cache.PutBlockIAL(id, parse.IAL2Map(node.KramdownIAL))
|
||||
return
|
||||
}
|
||||
|
||||
func ResetBlockAttrs(id string, nameValues map[string]string) (err error) {
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(15), id))
|
||||
}
|
||||
|
||||
for name, _ := range nameValues {
|
||||
for i := 0; i < len(name); i++ {
|
||||
if !lex.IsASCIILetterNumHyphen(name[i]) {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(25), id))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node.ClearIALAttrs()
|
||||
for name, value := range nameValues {
|
||||
if "" != value {
|
||||
node.SetIALAttr(name, value)
|
||||
}
|
||||
}
|
||||
|
||||
if err = indexWriteJSONQueue(tree); nil != err {
|
||||
return
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
cache.RemoveBlockIAL(id)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBlockAttrs(id string) (ret map[string]string) {
|
||||
ret = map[string]string{}
|
||||
if cached := cache.GetBlockIAL(id); nil != cached {
|
||||
ret = cached
|
||||
return
|
||||
}
|
||||
|
||||
WaitForWritingFiles()
|
||||
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
for _, kv := range node.KramdownIAL {
|
||||
ret[kv[0]] = html.UnescapeAttrVal(kv[1])
|
||||
}
|
||||
cache.PutBlockIAL(id, ret)
|
||||
return
|
||||
}
|
||||
249
kernel/model/blockinfo.go
Normal file
249
kernel/model/blockinfo.go
Normal file
|
|
@ -0,0 +1,249 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
type BlockInfo struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
RefCount int `json:"refCount"`
|
||||
SubFileCount int `json:"subFileCount"`
|
||||
RefIDs []string `json:"refIDs"`
|
||||
IAL map[string]string `json:"ial"`
|
||||
Icon string `json:"icon"`
|
||||
}
|
||||
|
||||
func GetDocInfo(rootID string) (ret *BlockInfo) {
|
||||
WaitForWritingFiles()
|
||||
|
||||
tree, err := loadTreeByBlockID(rootID)
|
||||
if nil != err {
|
||||
util.LogErrorf("load tree by block id failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
title := tree.Root.IALAttr("title")
|
||||
ret = &BlockInfo{ID: rootID, Name: title}
|
||||
ret.IAL = parse.IAL2Map(tree.Root.KramdownIAL)
|
||||
ret.RefIDs, _ = sql.QueryRefIDsByDefID(rootID, false)
|
||||
ret.RefCount = len(ret.RefIDs)
|
||||
|
||||
var subFileCount int
|
||||
boxLocalPath := filepath.Join(util.DataDir, tree.Box)
|
||||
subFiles, err := os.ReadDir(filepath.Join(boxLocalPath, strings.TrimSuffix(tree.Path, ".sy")))
|
||||
if nil == err {
|
||||
for _, subFile := range subFiles {
|
||||
if strings.HasSuffix(subFile.Name(), ".sy") {
|
||||
subFileCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
ret.SubFileCount = subFileCount
|
||||
ret.Icon = tree.Root.IALAttr("icon")
|
||||
return
|
||||
}
|
||||
|
||||
func GetBlockRefText(id string) string {
|
||||
WaitForWritingFiles()
|
||||
|
||||
bt := treenode.GetBlockTree(id)
|
||||
if nil == bt {
|
||||
return ErrBlockNotFound.Error()
|
||||
}
|
||||
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return ErrTreeNotFound.Error()
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return ErrBlockNotFound.Error()
|
||||
}
|
||||
|
||||
if name := node.IALAttr("name"); "" != name {
|
||||
return name
|
||||
}
|
||||
|
||||
switch node.Type {
|
||||
case ast.NodeBlockQueryEmbed:
|
||||
return "Query Embed Block..."
|
||||
case ast.NodeIFrame:
|
||||
return "IFrame..."
|
||||
case ast.NodeThematicBreak:
|
||||
return "Thematic Break..."
|
||||
case ast.NodeVideo:
|
||||
return "Video..."
|
||||
case ast.NodeAudio:
|
||||
return "Audio..."
|
||||
}
|
||||
|
||||
if ast.NodeDocument != node.Type && node.IsContainerBlock() {
|
||||
node = treenode.FirstLeafBlock(node)
|
||||
}
|
||||
ret := renderBlockText(node)
|
||||
if Conf.Editor.BlockRefDynamicAnchorTextMaxLen < utf8.RuneCountInString(ret) {
|
||||
ret = gulu.Str.SubStr(ret, Conf.Editor.BlockRefDynamicAnchorTextMaxLen) + "..."
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func GetBlockRefIDs(id string) (refIDs, refTexts, defIDs []string) {
|
||||
refIDs = []string{}
|
||||
bt := treenode.GetBlockTree(id)
|
||||
if nil == bt {
|
||||
return
|
||||
}
|
||||
|
||||
isDoc := bt.ID == bt.RootID
|
||||
refIDs, refTexts = sql.QueryRefIDsByDefID(id, isDoc)
|
||||
if isDoc {
|
||||
defIDs = sql.QueryChildDefIDsByRootDefID(id)
|
||||
} else {
|
||||
defIDs = append(defIDs, id)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetBlockRefIDsByFileAnnotationID(id string) (refIDs, refTexts []string) {
|
||||
refIDs, refTexts = sql.QueryRefIDsByAnnotationID(id)
|
||||
return
|
||||
}
|
||||
|
||||
func GetBlockDefIDsByRefText(refText string, excludeIDs []string) (ret []string) {
|
||||
ret = sql.QueryBlockDefIDsByRefText(refText, excludeIDs)
|
||||
sort.Sort(sort.Reverse(sort.StringSlice(ret)))
|
||||
return
|
||||
}
|
||||
|
||||
type BlockPath struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
SubType string `json:"subType"`
|
||||
Children []*BlockPath `json:"children"`
|
||||
}
|
||||
|
||||
func BuildBlockBreadcrumb(id string) (ret []*BlockPath, err error) {
|
||||
ret = []*BlockPath{}
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil == tree {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return
|
||||
}
|
||||
|
||||
ret = buildBlockBreadcrumb(node)
|
||||
return
|
||||
}
|
||||
|
||||
func buildBlockBreadcrumb(node *ast.Node) (ret []*BlockPath) {
|
||||
ret = []*BlockPath{}
|
||||
if nil == node {
|
||||
return
|
||||
}
|
||||
box := Conf.Box(node.Box)
|
||||
if nil == box {
|
||||
return
|
||||
}
|
||||
|
||||
headingLevel := 16
|
||||
maxNameLen := 1024
|
||||
boxName := box.Name
|
||||
var hPath string
|
||||
baseBlock := treenode.GetBlockTreeRootByPath(node.Box, node.Path)
|
||||
if nil != baseBlock {
|
||||
hPath = baseBlock.HPath
|
||||
}
|
||||
for parent := node; nil != parent; parent = parent.Parent {
|
||||
if "" == parent.ID {
|
||||
continue
|
||||
}
|
||||
id := parent.ID
|
||||
|
||||
name := html.EscapeHTMLStr(parent.IALAttr("name"))
|
||||
if ast.NodeDocument == parent.Type {
|
||||
name = html.EscapeHTMLStr(path.Join(boxName, hPath))
|
||||
} else {
|
||||
if "" == name {
|
||||
if ast.NodeListItem == parent.Type {
|
||||
name = gulu.Str.SubStr(renderBlockText(parent.FirstChild), maxNameLen)
|
||||
} else {
|
||||
name = gulu.Str.SubStr(renderBlockText(parent), maxNameLen)
|
||||
}
|
||||
}
|
||||
if ast.NodeHeading == parent.Type {
|
||||
headingLevel = parent.HeadingLevel
|
||||
}
|
||||
}
|
||||
|
||||
add := true
|
||||
if ast.NodeList == parent.Type || ast.NodeSuperBlock == parent.Type || ast.NodeBlockquote == parent.Type {
|
||||
add = false
|
||||
}
|
||||
if ast.NodeParagraph == parent.Type && nil != parent.Parent && ast.NodeListItem == parent.Parent.Type && nil == parent.Next && nil == parent.Previous {
|
||||
add = false
|
||||
}
|
||||
if ast.NodeListItem == parent.Type {
|
||||
if "" == name {
|
||||
name = gulu.Str.SubStr(renderBlockText(parent.FirstChild), maxNameLen)
|
||||
}
|
||||
}
|
||||
|
||||
if add {
|
||||
ret = append([]*BlockPath{{
|
||||
ID: id,
|
||||
Name: name,
|
||||
Type: parent.Type.String(),
|
||||
SubType: treenode.SubTypeAbbr(parent),
|
||||
}}, ret...)
|
||||
}
|
||||
|
||||
for prev := parent.Previous; nil != prev; prev = prev.Previous {
|
||||
if ast.NodeHeading == prev.Type && headingLevel > prev.HeadingLevel {
|
||||
name = gulu.Str.SubStr(renderBlockText(prev), maxNameLen)
|
||||
ret = append([]*BlockPath{{
|
||||
ID: prev.ID,
|
||||
Name: name,
|
||||
Type: prev.Type.String(),
|
||||
SubType: treenode.SubTypeAbbr(prev),
|
||||
}}, ret...)
|
||||
headingLevel = prev.HeadingLevel
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
138
kernel/model/bookmark.go
Normal file
138
kernel/model/bookmark.go
Normal file
|
|
@ -0,0 +1,138 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func RenameBookmark(oldBookmark, newBookmark string) (err error) {
|
||||
if treenode.ContainsMarker(newBookmark) {
|
||||
return errors.New(Conf.Language(112))
|
||||
}
|
||||
|
||||
newBookmark = strings.TrimSpace(newBookmark)
|
||||
if "" == newBookmark {
|
||||
return errors.New(Conf.Language(126))
|
||||
}
|
||||
|
||||
if oldBookmark == newBookmark {
|
||||
return
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(110))
|
||||
|
||||
bookmarks := sql.QueryBookmarkBlocksByKeyword(oldBookmark)
|
||||
treeBlocks := map[string][]string{}
|
||||
for _, tag := range bookmarks {
|
||||
if blocks, ok := treeBlocks[tag.RootID]; !ok {
|
||||
treeBlocks[tag.RootID] = []string{tag.ID}
|
||||
} else {
|
||||
treeBlocks[tag.RootID] = append(blocks, tag.ID)
|
||||
}
|
||||
}
|
||||
|
||||
for treeID, blocks := range treeBlocks {
|
||||
util.PushEndlessProgress("[" + treeID + "]")
|
||||
tree, e := loadTreeByBlockID(treeID)
|
||||
if nil != e {
|
||||
util.ClearPushProgress(100)
|
||||
return e
|
||||
}
|
||||
|
||||
for _, blockID := range blocks {
|
||||
node := treenode.GetNodeInTree(tree, blockID)
|
||||
if nil == node {
|
||||
continue
|
||||
}
|
||||
|
||||
if bookmarkAttrVal := node.IALAttr("bookmark"); bookmarkAttrVal == oldBookmark {
|
||||
node.SetIALAttr("bookmark", newBookmark)
|
||||
}
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(111), tree.Root.IALAttr("title")))
|
||||
if err = writeJSONQueue(tree); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
util.RandomSleep(50, 150)
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(113))
|
||||
sql.WaitForWritingDatabase()
|
||||
util.ReloadUI()
|
||||
return
|
||||
}
|
||||
|
||||
type BookmarkLabel string
|
||||
type BookmarkBlocks []*Block
|
||||
|
||||
type Bookmark struct {
|
||||
Name BookmarkLabel `json:"name"`
|
||||
Blocks []*Block `json:"blocks"`
|
||||
Type string `json:"type"` // "bookmark"
|
||||
Depth int `json:"depth"`
|
||||
Count int `json:"count"`
|
||||
}
|
||||
|
||||
type Bookmarks []*Bookmark
|
||||
|
||||
func (s Bookmarks) Len() int { return len(s) }
|
||||
func (s Bookmarks) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s Bookmarks) Less(i, j int) bool { return s[i].Name < s[j].Name }
|
||||
|
||||
func BookmarkLabels() (ret []string) {
|
||||
ret = sql.QueryBookmarkLabels()
|
||||
return
|
||||
}
|
||||
|
||||
func BuildBookmark() (ret *Bookmarks) {
|
||||
WaitForWritingFiles()
|
||||
sql.WaitForWritingDatabase()
|
||||
|
||||
ret = &Bookmarks{}
|
||||
sqlBlocks := sql.QueryBookmarkBlocks()
|
||||
labelBlocks := map[BookmarkLabel]BookmarkBlocks{}
|
||||
blocks := fromSQLBlocks(&sqlBlocks, "", 0)
|
||||
for _, block := range blocks {
|
||||
label := BookmarkLabel(block.IAL["bookmark"])
|
||||
if bs, ok := labelBlocks[label]; ok {
|
||||
bs = append(bs, block)
|
||||
labelBlocks[label] = bs
|
||||
} else {
|
||||
labelBlocks[label] = []*Block{block}
|
||||
}
|
||||
}
|
||||
|
||||
for label, bs := range labelBlocks {
|
||||
for _, b := range bs {
|
||||
b.Depth = 1
|
||||
}
|
||||
*ret = append(*ret, &Bookmark{Name: label, Blocks: bs, Type: "bookmark", Count: len(bs)})
|
||||
}
|
||||
|
||||
sort.Sort(ret)
|
||||
return
|
||||
}
|
||||
537
kernel/model/box.go
Normal file
537
kernel/model/box.go
Normal file
|
|
@ -0,0 +1,537 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/facette/natsort"
|
||||
"github.com/siyuan-note/siyuan/kernel/conf"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
// Box 笔记本。
|
||||
type Box struct {
|
||||
ID string `json:"id"`
|
||||
Name string `json:"name"`
|
||||
Icon string `json:"icon"`
|
||||
Sort int `json:"sort"`
|
||||
Closed bool `json:"closed"`
|
||||
|
||||
historyGenerated int64 // 最近一次历史生成时间
|
||||
}
|
||||
|
||||
func AutoStat() {
|
||||
for range time.Tick(10 * time.Minute) {
|
||||
autoStat()
|
||||
}
|
||||
}
|
||||
|
||||
func autoStat() {
|
||||
Conf.Stat.DocCount = sql.CountAllDoc()
|
||||
Conf.Save()
|
||||
}
|
||||
|
||||
func ListNotebooks() (ret []*Box, err error) {
|
||||
ret = []*Box{}
|
||||
dirs, err := os.ReadDir(util.DataDir)
|
||||
if nil != err {
|
||||
util.LogErrorf("read dir [%s] failed: %s", util.DataDir, err)
|
||||
return ret, err
|
||||
}
|
||||
for _, dir := range dirs {
|
||||
if util.IsReservedFilename(dir.Name()) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !dir.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
if !util.IsIDPattern(dir.Name()) {
|
||||
continue
|
||||
}
|
||||
|
||||
boxConf := conf.NewBoxConf()
|
||||
boxConfPath := filepath.Join(util.DataDir, dir.Name(), ".siyuan", "conf.json")
|
||||
if !gulu.File.IsExist(boxConfPath) {
|
||||
if isUserGuide(dir.Name()) {
|
||||
filesys.ReleaseAllFileLocks()
|
||||
os.RemoveAll(filepath.Join(util.DataDir, dir.Name()))
|
||||
util.LogWarnf("not found user guid box conf [%s], removed it", boxConfPath)
|
||||
continue
|
||||
}
|
||||
util.LogWarnf("not found box conf [%s], recreate it", boxConfPath)
|
||||
} else {
|
||||
data, readErr := filesys.NoLockFileRead(boxConfPath)
|
||||
if nil != readErr {
|
||||
util.LogErrorf("read box conf [%s] failed: %s", boxConfPath, readErr)
|
||||
continue
|
||||
}
|
||||
if readErr = gulu.JSON.UnmarshalJSON(data, boxConf); nil != readErr {
|
||||
util.LogErrorf("parse box conf [%s] failed: %s", boxConfPath, readErr)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
id := dir.Name()
|
||||
ret = append(ret, &Box{
|
||||
ID: id,
|
||||
Name: boxConf.Name,
|
||||
Icon: boxConf.Icon,
|
||||
Sort: boxConf.Sort,
|
||||
Closed: boxConf.Closed,
|
||||
})
|
||||
}
|
||||
|
||||
switch Conf.FileTree.Sort {
|
||||
case util.SortModeNameASC:
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return util.PinYinCompare(util.RemoveEmoji(ret[i].Name), util.RemoveEmoji(ret[j].Name))
|
||||
})
|
||||
case util.SortModeNameDESC:
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return util.PinYinCompare(util.RemoveEmoji(ret[j].Name), util.RemoveEmoji(ret[i].Name))
|
||||
})
|
||||
case util.SortModeUpdatedASC:
|
||||
case util.SortModeUpdatedDESC:
|
||||
case util.SortModeAlphanumASC:
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return natsort.Compare(util.RemoveEmoji(ret[i].Name), util.RemoveEmoji(ret[j].Name))
|
||||
})
|
||||
case util.SortModeAlphanumDESC:
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return natsort.Compare(util.RemoveEmoji(ret[j].Name), util.RemoveEmoji(ret[i].Name))
|
||||
})
|
||||
case util.SortModeCustom:
|
||||
sort.Slice(ret, func(i, j int) bool { return ret[i].Sort < ret[j].Sort })
|
||||
case util.SortModeRefCountASC:
|
||||
case util.SortModeRefCountDESC:
|
||||
case util.SortModeCreatedASC:
|
||||
sort.Slice(ret, func(i, j int) bool { return natsort.Compare(ret[j].ID, ret[i].ID) })
|
||||
case util.SortModeCreatedDESC:
|
||||
sort.Slice(ret, func(i, j int) bool { return natsort.Compare(ret[j].ID, ret[i].ID) })
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) GetConf() (ret *conf.BoxConf) {
|
||||
ret = conf.NewBoxConf()
|
||||
|
||||
confPath := filepath.Join(util.DataDir, box.ID, ".siyuan/conf.json")
|
||||
if !gulu.File.IsExist(confPath) {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := filesys.LockFileRead(confPath)
|
||||
if nil != err {
|
||||
util.LogErrorf("read box conf [%s] failed: %s", confPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = gulu.JSON.UnmarshalJSON(data, ret); nil != err {
|
||||
util.LogErrorf("parse box conf [%s] failed: %s", confPath, err)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) SaveConf(conf *conf.BoxConf) {
|
||||
confPath := filepath.Join(util.DataDir, box.ID, ".siyuan/conf.json")
|
||||
newData, err := gulu.JSON.MarshalIndentJSON(conf, "", " ")
|
||||
if nil != err {
|
||||
util.LogErrorf("marshal box conf [%s] failed: %s", confPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
oldData, err := filesys.NoLockFileRead(confPath)
|
||||
if nil != err {
|
||||
box.saveConf0(newData)
|
||||
return
|
||||
}
|
||||
|
||||
if bytes.Equal(newData, oldData) {
|
||||
return
|
||||
}
|
||||
|
||||
box.saveConf0(newData)
|
||||
}
|
||||
|
||||
func (box *Box) saveConf0(data []byte) {
|
||||
confPath := filepath.Join(util.DataDir, box.ID, ".siyuan/conf.json")
|
||||
if err := os.MkdirAll(filepath.Join(util.DataDir, box.ID, ".siyuan"), 0755); nil != err {
|
||||
util.LogErrorf("save box conf [%s] failed: %s", confPath, err)
|
||||
}
|
||||
if err := filesys.LockFileWrite(confPath, data); nil != err {
|
||||
util.LogErrorf("save box conf [%s] failed: %s", confPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (box *Box) Ls(p string) (ret []*FileInfo, totals int, err error) {
|
||||
boxLocalPath := filepath.Join(util.DataDir, box.ID)
|
||||
if strings.HasSuffix(p, ".sy") {
|
||||
dir := strings.TrimSuffix(p, ".sy")
|
||||
absDir := filepath.Join(boxLocalPath, dir)
|
||||
if gulu.File.IsDir(absDir) {
|
||||
p = dir
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
files, err := ioutil.ReadDir(filepath.Join(util.DataDir, box.ID, p))
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if util.IsReservedFilename(f.Name()) {
|
||||
continue
|
||||
}
|
||||
|
||||
totals += 1
|
||||
fi := &FileInfo{}
|
||||
fi.name = f.Name()
|
||||
fi.isdir = f.IsDir()
|
||||
fi.size = f.Size()
|
||||
fPath := path.Join(p, f.Name())
|
||||
if f.IsDir() {
|
||||
fPath += "/"
|
||||
}
|
||||
fi.path = fPath
|
||||
ret = append(ret, fi)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) Stat(p string) (ret *FileInfo) {
|
||||
absPath := filepath.Join(util.DataDir, box.ID, p)
|
||||
info, err := os.Stat(absPath)
|
||||
if nil != err {
|
||||
if !os.IsNotExist(err) {
|
||||
util.LogErrorf("stat [%s] failed: %s", absPath, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
ret = &FileInfo{
|
||||
path: p,
|
||||
name: info.Name(),
|
||||
size: info.Size(),
|
||||
isdir: info.IsDir(),
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) Exist(p string) bool {
|
||||
return gulu.File.IsExist(filepath.Join(util.DataDir, box.ID, p))
|
||||
}
|
||||
|
||||
func (box *Box) Mkdir(path string) error {
|
||||
if err := os.Mkdir(filepath.Join(util.DataDir, box.ID, path), 0755); nil != err {
|
||||
msg := fmt.Sprintf(Conf.Language(6), box.Name, path, err)
|
||||
util.LogErrorf("mkdir [path=%s] in box [%s] failed: %s", path, box.ID, err)
|
||||
return errors.New(msg)
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (box *Box) MkdirAll(path string) error {
|
||||
if err := os.MkdirAll(filepath.Join(util.DataDir, box.ID, path), 0755); nil != err {
|
||||
msg := fmt.Sprintf(Conf.Language(6), box.Name, path, err)
|
||||
util.LogErrorf("mkdir all [path=%s] in box [%s] failed: %s", path, box.ID, err)
|
||||
return errors.New(msg)
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (box *Box) Move(oldPath, newPath string) error {
|
||||
boxLocalPath := filepath.Join(util.DataDir, box.ID)
|
||||
fromPath := filepath.Join(boxLocalPath, oldPath)
|
||||
toPath := filepath.Join(boxLocalPath, newPath)
|
||||
filesys.ReleaseFileLocks(fromPath)
|
||||
if err := os.Rename(fromPath, toPath); nil != err {
|
||||
msg := fmt.Sprintf(Conf.Language(5), box.Name, fromPath, err)
|
||||
util.LogErrorf("move [path=%s] in box [%s] failed: %s", fromPath, box.Name, err)
|
||||
return errors.New(msg)
|
||||
}
|
||||
|
||||
if oldDir := path.Dir(oldPath); util.IsIDPattern(path.Base(oldDir)) {
|
||||
fromDir := filepath.Join(boxLocalPath, oldDir)
|
||||
if util.IsEmptyDir(fromDir) {
|
||||
os.Remove(fromDir)
|
||||
}
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (box *Box) Remove(path string) error {
|
||||
boxLocalPath := filepath.Join(util.DataDir, box.ID)
|
||||
filePath := filepath.Join(boxLocalPath, path)
|
||||
filesys.ReleaseFileLocks(filePath)
|
||||
if err := os.RemoveAll(filePath); nil != err {
|
||||
msg := fmt.Sprintf(Conf.Language(7), box.Name, path, err)
|
||||
util.LogErrorf("remove [path=%s] in box [%s] failed: %s", path, box.ID, err)
|
||||
return errors.New(msg)
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (box *Box) Unindex() {
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
sql.RemoveBoxHash(tx, box.ID)
|
||||
sql.DeleteByBoxTx(tx, box.ID)
|
||||
sql.CommitTx(tx)
|
||||
filesys.ReleaseFileLocks(filepath.Join(util.DataDir, box.ID))
|
||||
treenode.RemoveBlockTreesByBoxID(box.ID)
|
||||
}
|
||||
|
||||
func (box *Box) ListFiles(path string) (ret []*FileInfo) {
|
||||
fis, _, err := box.Ls(path)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
box.listFiles(&fis, &ret)
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) listFiles(files, ret *[]*FileInfo) {
|
||||
for _, file := range *files {
|
||||
if file.isdir {
|
||||
fis, _, err := box.Ls(file.path)
|
||||
if nil == err {
|
||||
box.listFiles(&fis, ret)
|
||||
}
|
||||
*ret = append(*ret, file)
|
||||
} else {
|
||||
*ret = append(*ret, file)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isSkipFile(filename string) bool {
|
||||
return strings.HasPrefix(filename, ".") || "node_modules" == filename || "dist" == filename || "target" == filename
|
||||
}
|
||||
|
||||
func checkUploadBackup() (err error) {
|
||||
if !IsSubscriber() {
|
||||
if "ios" == util.Container {
|
||||
return errors.New(Conf.Language(122))
|
||||
}
|
||||
return errors.New(Conf.Language(29))
|
||||
}
|
||||
|
||||
backupDir := Conf.Backup.GetSaveDir()
|
||||
backupSize, err := util.SizeOfDirectory(backupDir, false)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
cloudAvailableBackupSize, err := getCloudAvailableBackupSize()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
if cloudAvailableBackupSize < backupSize {
|
||||
return errors.New(fmt.Sprintf(Conf.Language(43), byteCountSI(int64(Conf.User.UserSiYuanRepoSize))))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (box *Box) renameSubTrees(tree *parse.Tree) {
|
||||
subFiles := box.ListFiles(tree.Path)
|
||||
totals := len(subFiles) + 3
|
||||
showProgress := 64 < totals
|
||||
for i, subFile := range subFiles {
|
||||
if !strings.HasSuffix(subFile.path, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
subTree, err := LoadTree(box.ID, subFile.path) // LoadTree 会重新构造 HPath
|
||||
if nil != err {
|
||||
continue
|
||||
}
|
||||
|
||||
sql.UpsertTreeQueue(subTree)
|
||||
if showProgress {
|
||||
msg := fmt.Sprintf(Conf.Language(107), subTree.HPath)
|
||||
util.PushProgress(util.PushProgressCodeProgressed, i, totals, msg)
|
||||
}
|
||||
}
|
||||
|
||||
if showProgress {
|
||||
util.ClearPushProgress(totals)
|
||||
}
|
||||
}
|
||||
|
||||
func moveTree(tree *parse.Tree) {
|
||||
treenode.SetBlockTreePath(tree)
|
||||
sql.UpsertTreeQueue(tree)
|
||||
|
||||
box := Conf.Box(tree.Box)
|
||||
subFiles := box.ListFiles(tree.Path)
|
||||
totals := len(subFiles) + 5
|
||||
showProgress := 64 < totals
|
||||
|
||||
for i, subFile := range subFiles {
|
||||
if !strings.HasSuffix(subFile.path, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
subTree, err := LoadTree(box.ID, subFile.path)
|
||||
if nil != err {
|
||||
continue
|
||||
}
|
||||
|
||||
treenode.SetBlockTreePath(subTree)
|
||||
sql.UpsertTreeQueue(subTree)
|
||||
|
||||
if showProgress {
|
||||
msg := fmt.Sprintf(Conf.Language(107), subTree.HPath)
|
||||
util.PushProgress(util.PushProgressCodeProgressed, i, totals, msg)
|
||||
}
|
||||
}
|
||||
|
||||
if showProgress {
|
||||
util.ClearPushProgress(totals)
|
||||
}
|
||||
}
|
||||
|
||||
func parseKTree(kramdown []byte) (ret *parse.Tree) {
|
||||
luteEngine := NewLute()
|
||||
ret = parse.Parse("", kramdown, luteEngine.ParseOptions)
|
||||
ast.Walk(ret.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if treenode.IsEmptyBlockIAL(n) {
|
||||
// 空段落保留
|
||||
p := &ast.Node{Type: ast.NodeParagraph}
|
||||
p.KramdownIAL = parse.Tokens2IAL(n.Tokens)
|
||||
p.ID = p.IALAttr("id")
|
||||
n.InsertBefore(p)
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
id := n.IALAttr("id")
|
||||
if "" == id {
|
||||
n.SetIALAttr("id", n.ID)
|
||||
}
|
||||
|
||||
if "" == n.IALAttr("id") && (ast.NodeParagraph == n.Type || ast.NodeList == n.Type || ast.NodeListItem == n.Type || ast.NodeBlockquote == n.Type ||
|
||||
ast.NodeMathBlock == n.Type || ast.NodeCodeBlock == n.Type || ast.NodeHeading == n.Type || ast.NodeTable == n.Type || ast.NodeThematicBreak == n.Type ||
|
||||
ast.NodeYamlFrontMatter == n.Type || ast.NodeBlockQueryEmbed == n.Type || ast.NodeSuperBlock == n.Type ||
|
||||
ast.NodeHTMLBlock == n.Type || ast.NodeIFrame == n.Type || ast.NodeWidget == n.Type || ast.NodeAudio == n.Type || ast.NodeVideo == n.Type) {
|
||||
n.ID = ast.NewNodeID()
|
||||
n.KramdownIAL = [][]string{{"id", n.ID}}
|
||||
n.InsertAfter(&ast.Node{Type: ast.NodeKramdownBlockIAL, Tokens: []byte("{: id=\"" + n.ID + "\"}")})
|
||||
n.SetIALAttr("updated", util.TimeFromID(n.ID))
|
||||
}
|
||||
if "" == n.ID && 0 < len(n.KramdownIAL) && ast.NodeDocument != n.Type {
|
||||
n.ID = n.IALAttr("id")
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
ret.Root.KramdownIAL = parse.Tokens2IAL(ret.Root.LastChild.Tokens)
|
||||
return
|
||||
}
|
||||
|
||||
func RefreshFileTree() {
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
refreshFileTree()
|
||||
}
|
||||
|
||||
func refreshFileTree() {
|
||||
if err := sql.InitDatabase(true); nil != err {
|
||||
util.PushErrMsg(Conf.Language(85), 5000)
|
||||
return
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(35))
|
||||
openedBoxes := Conf.GetOpenedBoxes()
|
||||
for _, openedBox := range openedBoxes {
|
||||
openedBox.Index(true)
|
||||
}
|
||||
IndexRefs()
|
||||
// 缓存根一级的文档树展开
|
||||
for _, openedBox := range openedBoxes {
|
||||
ListDocTree(openedBox.ID, "/", Conf.FileTree.Sort)
|
||||
}
|
||||
treenode.SaveBlockTree()
|
||||
util.PushEndlessProgress(Conf.Language(58))
|
||||
go func() {
|
||||
time.Sleep(1 * time.Second)
|
||||
util.ReloadUI()
|
||||
}()
|
||||
}
|
||||
|
||||
func ChangeBoxSort(boxIDs []string) {
|
||||
for i, boxID := range boxIDs {
|
||||
box := &Box{ID: boxID}
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Sort = i + 1
|
||||
box.SaveConf(boxConf)
|
||||
}
|
||||
}
|
||||
|
||||
func SetBoxIcon(boxID, icon string) {
|
||||
box := &Box{ID: boxID}
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Icon = icon
|
||||
box.SaveConf(boxConf)
|
||||
}
|
||||
|
||||
func (box *Box) UpdateHistoryGenerated() {
|
||||
boxLatestHistoryTime[box.ID] = time.Now()
|
||||
}
|
||||
|
||||
func LockFileByBlockID(id string) (locked bool, filePath string) {
|
||||
bt := treenode.GetBlockTree(id)
|
||||
if nil == bt {
|
||||
return
|
||||
}
|
||||
p := filepath.Join(util.DataDir, bt.BoxID, bt.Path)
|
||||
|
||||
if !gulu.File.IsExist(p) {
|
||||
return true, ""
|
||||
}
|
||||
return nil == filesys.LockFile(p), p
|
||||
}
|
||||
561
kernel/model/conf.go
Normal file
561
kernel/model/conf.go
Normal file
|
|
@ -0,0 +1,561 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute"
|
||||
humanize "github.com/dustin/go-humanize"
|
||||
"github.com/getsentry/sentry-go"
|
||||
"github.com/siyuan-note/siyuan/kernel/conf"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var Conf *AppConf
|
||||
|
||||
// AppConf 维护应用元数据,保存在 ~/.siyuan/conf.json。
|
||||
type AppConf struct {
|
||||
LogLevel string `json:"logLevel"` // 日志级别:Off, Trace, Debug, Info, Warn, Error, Fatal
|
||||
Appearance *conf.Appearance `json:"appearance"` // 外观
|
||||
Langs []*conf.Lang `json:"langs"` // 界面语言列表
|
||||
Lang string `json:"lang"` // 选择的界面语言,同 Appearance.Lang
|
||||
FileTree *conf.FileTree `json:"fileTree"` // 文档面板
|
||||
Tag *conf.Tag `json:"tag"` // 标签面板
|
||||
Editor *conf.Editor `json:"editor"` // 编辑器配置
|
||||
Export *conf.Export `json:"export"` // 导出配置
|
||||
Graph *conf.Graph `json:"graph"` // 关系图配置
|
||||
UILayout *conf.UILayout `json:"uiLayout"` // 界面布局
|
||||
UserData string `json:"userData"` // 社区用户信息,对 User 加密存储
|
||||
User *conf.User `json:"-"` // 社区用户内存结构,不持久化
|
||||
Account *conf.Account `json:"account"` // 帐号配置
|
||||
ReadOnly bool `json:"readonly"` // 是否是只读
|
||||
LocalIPs []string `json:"localIPs"` // 本地 IP 列表
|
||||
AccessAuthCode string `json:"accessAuthCode"` // 访问授权码
|
||||
E2EEPasswd string `json:"e2eePasswd"` // 端到端加密密码,用于备份和同步
|
||||
E2EEPasswdMode int `json:"e2eePasswdMode"` // 端到端加密密码生成方式,0:自动,1:自定义
|
||||
System *conf.System `json:"system"` // 系统
|
||||
Keymap *conf.Keymap `json:"keymap"` // 快捷键
|
||||
Backup *conf.Backup `json:"backup"` // 备份配置
|
||||
Sync *conf.Sync `json:"sync"` // 同步配置
|
||||
Search *conf.Search `json:"search"` // 搜索配置
|
||||
Stat *conf.Stat `json:"stat"` // 统计
|
||||
Api *conf.API `json:"api"` // API
|
||||
Newbie bool `json:"newbie"` // 是否是安装后第一次启动
|
||||
}
|
||||
|
||||
func InitConf() {
|
||||
initLang()
|
||||
|
||||
windowStateConf := filepath.Join(util.ConfDir, "windowState.json")
|
||||
if !gulu.File.IsExist(windowStateConf) {
|
||||
if err := gulu.File.WriteFileSafer(windowStateConf, []byte("{}"), 0644); nil != err {
|
||||
util.LogErrorf("create [windowState.json] failed: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
Conf = &AppConf{LogLevel: "debug", Lang: util.Lang}
|
||||
confPath := filepath.Join(util.ConfDir, "conf.json")
|
||||
if gulu.File.IsExist(confPath) {
|
||||
data, err := os.ReadFile(confPath)
|
||||
if nil != err {
|
||||
util.LogErrorf("load conf [%s] failed: %s", confPath, err)
|
||||
}
|
||||
err = gulu.JSON.UnmarshalJSON(data, Conf)
|
||||
if err != nil {
|
||||
util.LogErrorf("parse conf [%s] failed: %s", confPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
Conf.Langs = loadLangs()
|
||||
if nil == Conf.Appearance {
|
||||
Conf.Appearance = conf.NewAppearance()
|
||||
}
|
||||
var langOK bool
|
||||
for _, l := range Conf.Langs {
|
||||
if Conf.Lang == l.Name {
|
||||
langOK = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !langOK {
|
||||
Conf.Lang = "en_US"
|
||||
}
|
||||
Conf.Appearance.Lang = Conf.Lang
|
||||
if nil == Conf.UILayout {
|
||||
Conf.UILayout = &conf.UILayout{}
|
||||
}
|
||||
if nil == Conf.Keymap {
|
||||
Conf.Keymap = &conf.Keymap{}
|
||||
}
|
||||
if "" == Conf.Appearance.CodeBlockThemeDark {
|
||||
Conf.Appearance.CodeBlockThemeDark = "dracula"
|
||||
}
|
||||
if "" == Conf.Appearance.CodeBlockThemeLight {
|
||||
Conf.Appearance.CodeBlockThemeLight = "github"
|
||||
}
|
||||
if nil == Conf.FileTree {
|
||||
Conf.FileTree = conf.NewFileTree()
|
||||
}
|
||||
if 1 > Conf.FileTree.MaxListCount {
|
||||
Conf.FileTree.MaxListCount = 512
|
||||
}
|
||||
if nil == Conf.Tag {
|
||||
Conf.Tag = conf.NewTag()
|
||||
}
|
||||
if nil == Conf.Editor {
|
||||
Conf.Editor = conf.NewEditor()
|
||||
}
|
||||
if 1 > len(Conf.Editor.Emoji) {
|
||||
Conf.Editor.Emoji = []string{}
|
||||
}
|
||||
if 1 > Conf.Editor.BlockRefDynamicAnchorTextMaxLen {
|
||||
Conf.Editor.BlockRefDynamicAnchorTextMaxLen = 64
|
||||
}
|
||||
if 5120 < Conf.Editor.BlockRefDynamicAnchorTextMaxLen {
|
||||
Conf.Editor.BlockRefDynamicAnchorTextMaxLen = 5120
|
||||
}
|
||||
if nil == Conf.Export {
|
||||
Conf.Export = conf.NewExport()
|
||||
}
|
||||
if 0 == Conf.Export.BlockRefMode || 1 == Conf.Export.BlockRefMode {
|
||||
// 废弃导出选项引用块转换为原始块和引述块 https://github.com/siyuan-note/siyuan/issues/3155
|
||||
Conf.Export.BlockRefMode = 4 // 改为脚注
|
||||
}
|
||||
if 9 > Conf.Editor.FontSize || 72 < Conf.Editor.FontSize {
|
||||
Conf.Editor.FontSize = 16
|
||||
}
|
||||
if "" == Conf.Editor.PlantUMLServePath {
|
||||
Conf.Editor.PlantUMLServePath = "https://www.plantuml.com/plantuml/svg/~1"
|
||||
}
|
||||
|
||||
if nil == Conf.Graph || nil == Conf.Graph.Local || nil == Conf.Graph.Global {
|
||||
Conf.Graph = conf.NewGraph()
|
||||
}
|
||||
if nil == Conf.System {
|
||||
Conf.System = conf.NewSystem()
|
||||
} else {
|
||||
Conf.System.KernelVersion = util.Ver
|
||||
Conf.System.IsInsider = util.IsInsider
|
||||
}
|
||||
if nil == Conf.System.NetworkProxy {
|
||||
Conf.System.NetworkProxy = &conf.NetworkProxy{}
|
||||
}
|
||||
if "" != Conf.System.NetworkProxy.Scheme {
|
||||
util.LogInfof("using network proxy [%s]", Conf.System.NetworkProxy.String())
|
||||
}
|
||||
if "" == Conf.System.ID {
|
||||
Conf.System.ID = util.GetDeviceID()
|
||||
}
|
||||
if "std" == util.Container {
|
||||
Conf.System.ID = util.GetDeviceID()
|
||||
}
|
||||
|
||||
Conf.System.AppDir = util.WorkingDir
|
||||
Conf.System.ConfDir = util.ConfDir
|
||||
Conf.System.HomeDir = util.HomeDir
|
||||
Conf.System.WorkspaceDir = util.WorkspaceDir
|
||||
Conf.System.DataDir = util.DataDir
|
||||
Conf.System.Container = util.Container
|
||||
util.UserAgent = util.UserAgent + " " + util.Container
|
||||
Conf.System.OS = runtime.GOOS
|
||||
Conf.Newbie = util.IsNewbie
|
||||
|
||||
if "" != Conf.UserData {
|
||||
Conf.User = loadUserFromConf()
|
||||
}
|
||||
if nil == Conf.Account {
|
||||
Conf.Account = conf.NewAccount()
|
||||
}
|
||||
|
||||
if nil == Conf.Backup {
|
||||
Conf.Backup = conf.NewBackup()
|
||||
}
|
||||
if !gulu.File.IsExist(Conf.Backup.GetSaveDir()) {
|
||||
if err := os.MkdirAll(Conf.Backup.GetSaveDir(), 0755); nil != err {
|
||||
util.LogErrorf("create backup dir [%s] failed: %s", Conf.Backup.GetSaveDir(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if nil == Conf.Sync {
|
||||
Conf.Sync = conf.NewSync()
|
||||
}
|
||||
if !gulu.File.IsExist(Conf.Sync.GetSaveDir()) {
|
||||
if err := os.MkdirAll(Conf.Sync.GetSaveDir(), 0755); nil != err {
|
||||
util.LogErrorf("create sync dir [%s] failed: %s", Conf.Sync.GetSaveDir(), err)
|
||||
}
|
||||
}
|
||||
|
||||
if nil == Conf.Api {
|
||||
Conf.Api = conf.NewAPI()
|
||||
}
|
||||
|
||||
if 1440 < Conf.Editor.GenerateHistoryInterval {
|
||||
Conf.Editor.GenerateHistoryInterval = 1440
|
||||
}
|
||||
if 1 > Conf.Editor.HistoryRetentionDays {
|
||||
Conf.Editor.HistoryRetentionDays = 7
|
||||
}
|
||||
|
||||
if nil == Conf.Search {
|
||||
Conf.Search = conf.NewSearch()
|
||||
}
|
||||
|
||||
if nil == Conf.Stat {
|
||||
Conf.Stat = conf.NewStat()
|
||||
}
|
||||
|
||||
Conf.ReadOnly = util.ReadOnly
|
||||
if "" != util.AccessAuthCode {
|
||||
Conf.AccessAuthCode = util.AccessAuthCode
|
||||
}
|
||||
|
||||
Conf.E2EEPasswdMode = 0
|
||||
if !isBuiltInE2EEPasswd() {
|
||||
Conf.E2EEPasswdMode = 1
|
||||
}
|
||||
|
||||
Conf.LocalIPs = util.GetLocalIPs()
|
||||
|
||||
Conf.Save()
|
||||
util.SetLogLevel(Conf.LogLevel)
|
||||
|
||||
if Conf.System.UploadErrLog {
|
||||
util.LogInfof("user has enabled [Automatically upload error messages and diagnostic data]")
|
||||
sentry.Init(sentry.ClientOptions{
|
||||
Dsn: "https://bdff135f14654ae58a054adeceb2c308@o1173696.ingest.sentry.io/6269178",
|
||||
Release: util.Ver,
|
||||
Environment: util.Mode,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
var langs = map[string]map[int]string{}
|
||||
var timeLangs = map[string]map[string]interface{}{}
|
||||
|
||||
func initLang() {
|
||||
p := filepath.Join(util.WorkingDir, "appearance", "langs")
|
||||
dir, err := os.Open(p)
|
||||
if nil != err {
|
||||
util.LogFatalf("open language configuration folder [%s] failed: %s", p, err)
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
langNames, err := dir.Readdirnames(-1)
|
||||
if nil != err {
|
||||
util.LogFatalf("list language configuration folder [%s] failed: %s", p, err)
|
||||
}
|
||||
|
||||
for _, langName := range langNames {
|
||||
jsonPath := filepath.Join(p, langName)
|
||||
data, err := os.ReadFile(jsonPath)
|
||||
if nil != err {
|
||||
util.LogErrorf("read language configuration [%s] failed: %s", jsonPath, err)
|
||||
continue
|
||||
}
|
||||
langMap := map[string]interface{}{}
|
||||
if err := gulu.JSON.UnmarshalJSON(data, &langMap); nil != err {
|
||||
util.LogErrorf("parse language configuration failed [%s] failed: %s", jsonPath, err)
|
||||
continue
|
||||
}
|
||||
|
||||
kernelMap := map[int]string{}
|
||||
label := langMap["_label"].(string)
|
||||
kernelLangs := langMap["_kernel"].(map[string]interface{})
|
||||
for k, v := range kernelLangs {
|
||||
num, err := strconv.Atoi(k)
|
||||
if nil != err {
|
||||
util.LogErrorf("parse language configuration [%s] item [%d] failed [%s] failed: %s", p, num, err)
|
||||
continue
|
||||
}
|
||||
kernelMap[num] = v.(string)
|
||||
}
|
||||
kernelMap[-1] = label
|
||||
name := langName[:strings.LastIndex(langName, ".")]
|
||||
langs[name] = kernelMap
|
||||
|
||||
timeLangs[name] = langMap["_time"].(map[string]interface{})
|
||||
}
|
||||
}
|
||||
|
||||
func loadLangs() (ret []*conf.Lang) {
|
||||
for name, langMap := range langs {
|
||||
lang := &conf.Lang{Label: langMap[-1], Name: name}
|
||||
ret = append(ret, lang)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].Name < ret[j].Name
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var exitLock = sync.Mutex{}
|
||||
|
||||
func Close(force bool) (err error) {
|
||||
exitLock.Lock()
|
||||
defer exitLock.Unlock()
|
||||
|
||||
treenode.CloseBlockTree()
|
||||
util.PushMsg(Conf.Language(95), 10000*60)
|
||||
WaitForWritingFiles()
|
||||
if !force {
|
||||
SyncData(false, true, false)
|
||||
if 0 != ExitSyncSucc {
|
||||
err = errors.New(Conf.Language(96))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//util.UIProcessIDs.Range(func(key, _ interface{}) bool {
|
||||
// pid := key.(string)
|
||||
// util.Kill(pid)
|
||||
// return true
|
||||
//})
|
||||
|
||||
Conf.Close()
|
||||
sql.CloseDatabase()
|
||||
util.WebSocketServer.Close()
|
||||
clearWorkspaceTemp()
|
||||
util.LogInfof("exited kernel")
|
||||
go func() {
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
os.Exit(util.ExitCodeOk)
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
var CustomEmojis = sync.Map{}
|
||||
|
||||
func NewLute() (ret *lute.Lute) {
|
||||
ret = util.NewLute()
|
||||
ret.SetCodeSyntaxHighlightLineNum(Conf.Editor.CodeSyntaxHighlightLineNum)
|
||||
ret.SetChineseParagraphBeginningSpace(Conf.Export.ParagraphBeginningSpace)
|
||||
ret.SetProtyleMarkNetImg(Conf.Editor.DisplayNetImgMark)
|
||||
|
||||
customEmojiMap := map[string]string{}
|
||||
CustomEmojis.Range(func(key, value interface{}) bool {
|
||||
customEmojiMap[key.(string)] = value.(string)
|
||||
return true
|
||||
})
|
||||
ret.PutEmojis(customEmojiMap)
|
||||
return
|
||||
}
|
||||
|
||||
var confSaveLock = sync.Mutex{}
|
||||
|
||||
func (conf *AppConf) Save() {
|
||||
confSaveLock.Lock()
|
||||
confSaveLock.Unlock()
|
||||
|
||||
newData, _ := gulu.JSON.MarshalIndentJSON(Conf, "", " ")
|
||||
confPath := filepath.Join(util.ConfDir, "conf.json")
|
||||
oldData, err := filesys.NoLockFileRead(confPath)
|
||||
if nil != err {
|
||||
conf.save0(newData)
|
||||
return
|
||||
}
|
||||
|
||||
if bytes.Equal(newData, oldData) {
|
||||
return
|
||||
}
|
||||
|
||||
conf.save0(newData)
|
||||
}
|
||||
|
||||
func (conf *AppConf) save0(data []byte) {
|
||||
confPath := filepath.Join(util.ConfDir, "conf.json")
|
||||
if err := filesys.LockFileWrite(confPath, data); nil != err {
|
||||
util.LogFatalf("write conf [%s] failed: %s", confPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (conf *AppConf) Close() {
|
||||
conf.Save()
|
||||
}
|
||||
|
||||
func (conf *AppConf) Box(boxID string) *Box {
|
||||
for _, box := range conf.GetOpenedBoxes() {
|
||||
if box.ID == boxID {
|
||||
return box
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conf *AppConf) GetBoxes() (ret []*Box) {
|
||||
ret = []*Box{}
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for _, notebook := range notebooks {
|
||||
id := notebook.ID
|
||||
name := notebook.Name
|
||||
closed := notebook.Closed
|
||||
box := &Box{ID: id, Name: name, Closed: closed}
|
||||
ret = append(ret, box)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (conf *AppConf) GetOpenedBoxes() (ret []*Box) {
|
||||
ret = []*Box{}
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for _, notebook := range notebooks {
|
||||
if !notebook.Closed {
|
||||
ret = append(ret, notebook)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (conf *AppConf) GetClosedBoxes() (ret []*Box) {
|
||||
ret = []*Box{}
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for _, notebook := range notebooks {
|
||||
if notebook.Closed {
|
||||
ret = append(ret, notebook)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (conf *AppConf) Language(num int) string {
|
||||
return langs[conf.Lang][num]
|
||||
}
|
||||
|
||||
func InitBoxes() {
|
||||
initialized := false
|
||||
blockCount := 0
|
||||
if 1 > len(treenode.GetBlockTrees()) {
|
||||
if gulu.File.IsExist(util.BlockTreePath) {
|
||||
util.IncBootProgress(30, "Reading block trees...")
|
||||
go func() {
|
||||
for i := 0; i < 40; i++ {
|
||||
util.RandomSleep(100, 200)
|
||||
util.IncBootProgress(1, "Reading block trees...")
|
||||
}
|
||||
}()
|
||||
if err := treenode.ReadBlockTree(); nil == err {
|
||||
initialized = true
|
||||
} else {
|
||||
if err = os.RemoveAll(util.BlockTreePath); nil != err {
|
||||
util.LogErrorf("remove block tree [%s] failed: %s", util.BlockTreePath, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else { // 大于 1 的话说明在同步阶段已经加载过了
|
||||
initialized = true
|
||||
}
|
||||
|
||||
for _, box := range Conf.GetOpenedBoxes() {
|
||||
box.UpdateHistoryGenerated() // 初始化历史生成时间为当前时间
|
||||
if !initialized {
|
||||
box.BootIndex()
|
||||
}
|
||||
|
||||
ListDocTree(box.ID, "/", Conf.FileTree.Sort) // 缓存根一级的文档树展开
|
||||
}
|
||||
|
||||
if !initialized {
|
||||
treenode.SaveBlockTree()
|
||||
}
|
||||
|
||||
blocktrees := treenode.GetBlockTrees()
|
||||
blockCount = len(blocktrees)
|
||||
|
||||
var dbSize string
|
||||
if dbFile, err := os.Stat(util.DBPath); nil == err {
|
||||
dbSize = humanize.Bytes(uint64(dbFile.Size()))
|
||||
}
|
||||
util.LogInfof("database size [%s], block count [%d]", dbSize, blockCount)
|
||||
}
|
||||
|
||||
func IsSubscriber() bool {
|
||||
return nil != Conf.User && (-1 == Conf.User.UserSiYuanProExpireTime || 0 < Conf.User.UserSiYuanProExpireTime) && 0 == Conf.User.UserSiYuanSubscriptionStatus
|
||||
}
|
||||
|
||||
func isBuiltInE2EEPasswd() bool {
|
||||
if nil == Conf || nil == Conf.User || "" == Conf.E2EEPasswd {
|
||||
return true
|
||||
}
|
||||
|
||||
pwd := GetBuiltInE2EEPasswd()
|
||||
return Conf.E2EEPasswd == util.AESEncrypt(pwd)
|
||||
}
|
||||
|
||||
func GetBuiltInE2EEPasswd() (ret string) {
|
||||
part1 := Conf.User.UserId[:7]
|
||||
part2 := Conf.User.UserId[7:]
|
||||
ret = part2 + part1
|
||||
ret = fmt.Sprintf("%x", sha256.Sum256([]byte(ret)))[:7]
|
||||
return
|
||||
}
|
||||
|
||||
func clearWorkspaceTemp() {
|
||||
os.RemoveAll(filepath.Join(util.TempDir, "bazaar"))
|
||||
os.RemoveAll(filepath.Join(util.TempDir, "export"))
|
||||
os.RemoveAll(filepath.Join(util.TempDir, "import"))
|
||||
|
||||
tmps, err := filepath.Glob(filepath.Join(util.TempDir, "*.tmp"))
|
||||
if nil != err {
|
||||
util.LogErrorf("glob temp files failed: %s", err)
|
||||
}
|
||||
for _, tmp := range tmps {
|
||||
if err = os.RemoveAll(tmp); nil != err {
|
||||
util.LogErrorf("remove temp file [%s] failed: %s", tmp, err)
|
||||
} else {
|
||||
util.LogInfof("removed temp file [%s]", tmp)
|
||||
}
|
||||
}
|
||||
|
||||
tmps, err = filepath.Glob(filepath.Join(util.DataDir, ".siyuan", "*.tmp"))
|
||||
if nil != err {
|
||||
util.LogErrorf("glob temp files failed: %s", err)
|
||||
}
|
||||
for _, tmp := range tmps {
|
||||
if err = os.RemoveAll(tmp); nil != err {
|
||||
util.LogErrorf("remove temp file [%s] failed: %s", tmp, err)
|
||||
} else {
|
||||
util.LogInfof("removed temp file [%s]", tmp)
|
||||
}
|
||||
}
|
||||
}
|
||||
303
kernel/model/css.go
Normal file
303
kernel/model/css.go
Normal file
|
|
@ -0,0 +1,303 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/88250/css"
|
||||
"github.com/88250/gulu"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var colorKeys = map[string][]string{
|
||||
"colorPrimary": colorPrimary,
|
||||
"colorFont": colorFont,
|
||||
"colorBorder": colorBorder,
|
||||
"colorScroll": colorScroll,
|
||||
"colorTab": colorTab,
|
||||
"colorTip": colorTip,
|
||||
"colorGraph": colorGraph,
|
||||
"colorInline": colorInline,
|
||||
}
|
||||
|
||||
var colorPrimary = []string{
|
||||
"--b3-theme-primary",
|
||||
"--b3-theme-primary-light",
|
||||
"--b3-theme-primary-lighter",
|
||||
"--b3-theme-primary-lightest",
|
||||
"--b3-theme-secondary",
|
||||
"--b3-theme-background",
|
||||
"--b3-theme-surface",
|
||||
"--b3-theme-error",
|
||||
}
|
||||
|
||||
var colorFont = []string{
|
||||
"--b3-theme-on-primary",
|
||||
"--b3-theme-on-secondary",
|
||||
"--b3-theme-on-background",
|
||||
"--b3-theme-on-surface",
|
||||
"--b3-theme-on-error",
|
||||
}
|
||||
|
||||
var colorBorder = []string{
|
||||
"--b3-border-color",
|
||||
}
|
||||
|
||||
var colorScroll = []string{
|
||||
"--b3-scroll-color",
|
||||
}
|
||||
|
||||
var colorTab = []string{
|
||||
"--b3-tab-background",
|
||||
}
|
||||
|
||||
var colorTip = []string{
|
||||
"--b3-tooltips-color",
|
||||
}
|
||||
|
||||
var colorGraph = []string{
|
||||
"--b3-graph-line",
|
||||
"--b3-graph-hl-point",
|
||||
"--b3-graph-hl-line",
|
||||
"--b3-graph-p-point",
|
||||
"--b3-graph-heading-point",
|
||||
"--b3-graph-math-point",
|
||||
"--b3-graph-code-point",
|
||||
"--b3-graph-table-point",
|
||||
"--b3-graph-list-point",
|
||||
"--b3-graph-todo-point",
|
||||
"--b3-graph-olist-point",
|
||||
"--b3-graph-listitem-point",
|
||||
"--b3-graph-bq-point",
|
||||
"--b3-graph-super-point",
|
||||
"--b3-graph-doc-point",
|
||||
"--b3-graph-tag-point",
|
||||
"--b3-graph-asset-point",
|
||||
"--b3-graph-line",
|
||||
"--b3-graph-tag-line",
|
||||
"--b3-graph-ref-line",
|
||||
"--b3-graph-tag-tag-line",
|
||||
"--b3-graph-asset-line",
|
||||
"--b3-graph-hl-point",
|
||||
"--b3-graph-hl-line",
|
||||
}
|
||||
|
||||
var colorInline = []string{
|
||||
"--b3-protyle-inline-strong-color",
|
||||
"--b3-protyle-inline-em-color",
|
||||
"--b3-protyle-inline-s-color",
|
||||
"--b3-protyle-inline-link-color",
|
||||
"--b3-protyle-inline-tag-color",
|
||||
"--b3-protyle-inline-blockref-color",
|
||||
"--b3-protyle-inline-mark-background",
|
||||
"--b3-protyle-inline-mark-color",
|
||||
}
|
||||
|
||||
func currentCSSValue(key string) string {
|
||||
var themeName string
|
||||
if 0 == Conf.Appearance.Mode {
|
||||
themeName = Conf.Appearance.ThemeLight
|
||||
} else {
|
||||
themeName = Conf.Appearance.ThemeDark
|
||||
}
|
||||
|
||||
themePath := filepath.Join(util.ThemesPath, themeName)
|
||||
theme := filepath.Join(themePath, "theme.css")
|
||||
custom := filepath.Join(themePath, "custom.css")
|
||||
|
||||
var data []byte
|
||||
var err error
|
||||
if Conf.Appearance.CustomCSS {
|
||||
data, _ = os.ReadFile(custom)
|
||||
}
|
||||
if 1 > len(data) {
|
||||
data, err = os.ReadFile(theme)
|
||||
if nil != err {
|
||||
util.LogErrorf("read theme css [%s] failed: %s", theme, err)
|
||||
return "#ffffff"
|
||||
}
|
||||
}
|
||||
|
||||
ss := css.Parse(string(data))
|
||||
rules := ss.GetCSSRuleList()
|
||||
for _, rule := range rules {
|
||||
for _, style := range rule.Style.Styles {
|
||||
fixStyle(style)
|
||||
|
||||
if key == style.Property {
|
||||
return style.Value.Text()
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func ReadCustomCSS(themeName string) (ret map[string]map[string]string, err error) {
|
||||
ret = map[string]map[string]string{}
|
||||
|
||||
themePath := filepath.Join(util.ThemesPath, themeName)
|
||||
theme := filepath.Join(themePath, "theme.css")
|
||||
custom := filepath.Join(themePath, "custom.css")
|
||||
|
||||
if !gulu.File.IsExist(custom) {
|
||||
if err = gulu.File.CopyFile(theme, custom); nil != err {
|
||||
util.LogErrorf("copy theme [%s] to [%s] failed: %s", theme, custom, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(custom)
|
||||
if nil != err {
|
||||
util.LogErrorf("read custom css [%s] failed: %s", custom, err)
|
||||
return
|
||||
}
|
||||
|
||||
fullColorMap := map[string]string{}
|
||||
ss := css.Parse(string(data))
|
||||
rules := ss.GetCSSRuleList()
|
||||
for _, rule := range rules {
|
||||
for _, style := range rule.Style.Styles {
|
||||
fixStyle(style)
|
||||
|
||||
fullColorMap[style.Property] = style.Value.Text()
|
||||
}
|
||||
}
|
||||
|
||||
// 补充现有主题中的样式
|
||||
data, err = os.ReadFile(theme)
|
||||
if nil != err {
|
||||
util.LogErrorf("read theme css [%s] failed: %s", theme, err)
|
||||
return
|
||||
}
|
||||
ss = css.Parse(string(data))
|
||||
rules = ss.GetCSSRuleList()
|
||||
for _, rule := range rules {
|
||||
for _, style := range rule.Style.Styles {
|
||||
fixStyle(style)
|
||||
if _, ok := fullColorMap[style.Property]; !ok {
|
||||
fullColorMap[style.Property] = style.Value.ParsedText()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
buildColor(&ret, fullColorMap, "colorPrimary")
|
||||
buildColor(&ret, fullColorMap, "colorFont")
|
||||
buildColor(&ret, fullColorMap, "colorBorder")
|
||||
buildColor(&ret, fullColorMap, "colorScroll")
|
||||
buildColor(&ret, fullColorMap, "colorTab")
|
||||
buildColor(&ret, fullColorMap, "colorTip")
|
||||
buildColor(&ret, fullColorMap, "colorGraph")
|
||||
buildColor(&ret, fullColorMap, "colorInline")
|
||||
return
|
||||
}
|
||||
|
||||
func buildColor(ret *map[string]map[string]string, fullColorMap map[string]string, colorMapKey string) {
|
||||
colorMap := map[string]string{}
|
||||
for _, colorKey := range colorKeys[colorMapKey] {
|
||||
colorMap[colorKey] = fullColorMap[colorKey]
|
||||
}
|
||||
(*ret)[colorMapKey] = colorMap
|
||||
}
|
||||
|
||||
func WriteCustomCSS(themeName string, cssMap map[string]interface{}) (err error) {
|
||||
customCSS := map[string]string{}
|
||||
for _, vMap := range cssMap {
|
||||
cssKV := vMap.(map[string]interface{})
|
||||
for k, v := range cssKV {
|
||||
customCSS[k] = v.(string)
|
||||
}
|
||||
}
|
||||
|
||||
themePath := filepath.Join(util.ThemesPath, themeName)
|
||||
custom := filepath.Join(themePath, "custom.css")
|
||||
data, err := os.ReadFile(custom)
|
||||
if nil != err {
|
||||
util.LogErrorf("read custom css [%s] failed: %s", custom, err)
|
||||
return
|
||||
}
|
||||
|
||||
cssData := util.RemoveInvisible(string(data))
|
||||
customStyleSheet := css.Parse(cssData)
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
customRules := customStyleSheet.CssRuleList
|
||||
for _, customRule := range customRules {
|
||||
if css.KEYFRAMES_RULE == customRule.Type {
|
||||
keyframes(customRule, buf)
|
||||
continue
|
||||
} else if css.STYLE_RULE != customRule.Type {
|
||||
buf.WriteString(customRule.Type.Text())
|
||||
buf.WriteString(customRule.Style.Text())
|
||||
buf.WriteString("\n\n")
|
||||
continue
|
||||
}
|
||||
|
||||
for _, style := range customRule.Style.Styles {
|
||||
fixStyle(style)
|
||||
|
||||
if val, ok := customCSS[style.Property]; ok {
|
||||
style.Value = css.NewCSSValue(val)
|
||||
delete(customCSS, style.Property)
|
||||
}
|
||||
}
|
||||
for k, v := range customCSS {
|
||||
customRule.Style.Styles = append(customRule.Style.Styles, &css.CSSStyleDeclaration{Property: k, Value: css.NewCSSValue(v)})
|
||||
}
|
||||
buf.WriteString(customRule.Style.Text())
|
||||
buf.WriteString("\n\n")
|
||||
}
|
||||
|
||||
if err := gulu.File.WriteFileSafer(custom, buf.Bytes(), 0644); nil != err {
|
||||
util.LogErrorf("write custom css [%s] failed: %s", custom, err)
|
||||
}
|
||||
|
||||
util.BroadcastByType("main", "refreshtheme", 0, "", map[string]interface{}{
|
||||
"theme": "/appearance/themes/" + themeName + "/custom.css?" + fmt.Sprintf("%d", time.Now().Unix()),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func keyframes(rule *css.CSSRule, buf *bytes.Buffer) {
|
||||
buf.WriteString(rule.Type.Text())
|
||||
buf.WriteString(" ")
|
||||
buf.WriteString(rule.Style.Selector.Text())
|
||||
buf.WriteString(" {\n")
|
||||
for _, r := range rule.Rules {
|
||||
buf.WriteString(r.Style.Text())
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
buf.WriteString("\n\n")
|
||||
}
|
||||
|
||||
func fixStyle(style *css.CSSStyleDeclaration) {
|
||||
// css 解析库似乎有 bug,这里做修正
|
||||
|
||||
if strings.HasPrefix(style.Property, "-") && !strings.HasPrefix(style.Property, "--") {
|
||||
style.Property = "-" + style.Property
|
||||
}
|
||||
|
||||
if strings.HasPrefix(style.Value.Text(), "- ") {
|
||||
value := style.Value.Text()[2:]
|
||||
style.Value = css.NewCSSValue(value)
|
||||
}
|
||||
}
|
||||
1387
kernel/model/export.go
Normal file
1387
kernel/model/export.go
Normal file
File diff suppressed because it is too large
Load diff
1626
kernel/model/file.go
Normal file
1626
kernel/model/file.go
Normal file
File diff suppressed because it is too large
Load diff
115
kernel/model/format.go
Normal file
115
kernel/model/format.go
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/88250/lute/render"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func AutoSpace(rootID string) (err error) {
|
||||
tree, err := loadTreeByBlockID(rootID)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(116))
|
||||
defer util.ClearPushProgress(100)
|
||||
|
||||
generateFormatHistory(tree)
|
||||
|
||||
var blocks []*ast.Node
|
||||
var rootIAL [][]string
|
||||
// 添加 block ial,后面格式化渲染需要
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || !n.IsBlock() {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeDocument == n.Type {
|
||||
rootIAL = n.KramdownIAL
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeBlockQueryEmbed == n.Type {
|
||||
if script := n.ChildByType(ast.NodeBlockQueryEmbedScript); nil != script {
|
||||
script.Tokens = bytes.ReplaceAll(script.Tokens, []byte("\n"), []byte(" "))
|
||||
}
|
||||
}
|
||||
|
||||
if 0 < len(n.KramdownIAL) {
|
||||
blocks = append(blocks, n)
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
for _, block := range blocks {
|
||||
block.InsertAfter(&ast.Node{Type: ast.NodeKramdownBlockIAL, Tokens: parse.IAL2Tokens(block.KramdownIAL)})
|
||||
}
|
||||
|
||||
luteEngine := NewLute()
|
||||
luteEngine.SetAutoSpace(true)
|
||||
formatRenderer := render.NewFormatRenderer(tree, luteEngine.RenderOptions)
|
||||
md := formatRenderer.Render()
|
||||
newTree := parseKTree(md)
|
||||
newTree.Root.ID = tree.ID
|
||||
newTree.Root.KramdownIAL = rootIAL
|
||||
newTree.ID = tree.ID
|
||||
newTree.Path = tree.Path
|
||||
newTree.HPath = tree.HPath
|
||||
newTree.Box = tree.Box
|
||||
err = writeJSONQueue(newTree)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
sql.WaitForWritingDatabase()
|
||||
return
|
||||
}
|
||||
|
||||
func generateFormatHistory(tree *parse.Tree) {
|
||||
historyDir, err := util.GetHistoryDir("format")
|
||||
if nil != err {
|
||||
util.LogErrorf("get history dir failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
historyPath := filepath.Join(historyDir, tree.Box, tree.Path)
|
||||
if err = os.MkdirAll(filepath.Dir(historyPath), 0755); nil != err {
|
||||
util.LogErrorf("generate history failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if data, err = filesys.NoLockFileRead(filepath.Join(util.DataDir, tree.Box, tree.Path)); err != nil {
|
||||
util.LogErrorf("generate history failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = gulu.File.WriteFileSafer(historyPath, data, 0644); err != nil {
|
||||
util.LogErrorf("generate history failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
677
kernel/model/graph.go
Normal file
677
kernel/model/graph.go
Normal file
|
|
@ -0,0 +1,677 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
type GraphNode struct {
|
||||
ID string `json:"id"`
|
||||
Box string `json:"box"`
|
||||
Path string `json:"path"`
|
||||
Size float64 `json:"size"`
|
||||
Title string `json:"title,omitempty"`
|
||||
Label string `json:"label"`
|
||||
Type string `json:"type"`
|
||||
Refs int `json:"refs"`
|
||||
Defs int `json:"defs"`
|
||||
Color *GraphNodeColor `json:"color"`
|
||||
}
|
||||
|
||||
type GraphNodeColor struct {
|
||||
Background string `json:"background"`
|
||||
}
|
||||
|
||||
type GraphLink struct {
|
||||
From string `json:"from"`
|
||||
To string `json:"to"`
|
||||
Ref bool `json:"-"`
|
||||
Color *GraphLinkColor `json:"color"`
|
||||
Arrows *GraphArrows `json:"arrows"`
|
||||
}
|
||||
|
||||
type GraphLinkColor struct {
|
||||
Color string `json:"color"`
|
||||
}
|
||||
|
||||
type GraphArrows struct {
|
||||
To *GraphArrowsTo `json:"to"`
|
||||
}
|
||||
|
||||
type GraphArrowsTo struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
}
|
||||
|
||||
func BuildTreeGraph(id, query string) (boxID string, nodes []*GraphNode, links []*GraphLink) {
|
||||
nodes = []*GraphNode{}
|
||||
links = []*GraphLink{}
|
||||
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return
|
||||
}
|
||||
sqlBlock := sql.BuildBlockFromNode(node, tree)
|
||||
boxID = sqlBlock.Box
|
||||
block := fromSQLBlock(sqlBlock, "", 0)
|
||||
|
||||
stmt := query2Stmt(query)
|
||||
stmt += graphTypeFilter(true)
|
||||
stmt += graphDailyNoteFilter(true)
|
||||
stmt = strings.ReplaceAll(stmt, "content", "ref.content")
|
||||
forwardlinks, backlinks := buildFullLinks(stmt)
|
||||
|
||||
var sqlBlocks []*sql.Block
|
||||
var rootID string
|
||||
if "NodeDocument" == block.Type {
|
||||
sqlBlocks = sql.GetAllChildBlocks(block.ID, stmt)
|
||||
rootID = block.ID
|
||||
} else {
|
||||
sqlBlocks = sql.GetChildBlocks(block.ID, stmt)
|
||||
}
|
||||
blocks := fromSQLBlocks(&sqlBlocks, "", 0)
|
||||
if "" != rootID {
|
||||
// 局部关系图中添加文档链接关系 https://github.com/siyuan-note/siyuan/issues/4996
|
||||
rootBlock := getBlockIn(blocks, rootID)
|
||||
if nil != rootBlock {
|
||||
// 按引用处理
|
||||
sqlRootDefs := sql.QueryDefRootBlocksByRefRootID(rootID)
|
||||
for _, sqlRootDef := range sqlRootDefs {
|
||||
rootDef := fromSQLBlock(sqlRootDef, "", 0)
|
||||
blocks = append(blocks, rootDef)
|
||||
|
||||
sqlRootRefs := sql.QueryRefRootBlocksByDefRootID(sqlRootDef.ID)
|
||||
rootRefs := fromSQLBlocks(&sqlRootRefs, "", 0)
|
||||
rootDef.Refs = append(rootDef.Refs, rootRefs...)
|
||||
}
|
||||
|
||||
// 按定义处理
|
||||
sqlRootRefs := sql.QueryRefRootBlocksByDefRootID(rootID)
|
||||
for _, sqlRootRef := range sqlRootRefs {
|
||||
rootRef := fromSQLBlock(sqlRootRef, "", 0)
|
||||
blocks = append(blocks, rootRef)
|
||||
|
||||
rootBlock.Refs = append(rootBlock.Refs, rootRef)
|
||||
}
|
||||
}
|
||||
}
|
||||
style := graphStyle(true)
|
||||
genTreeNodes(blocks, &nodes, &links, true, style)
|
||||
growTreeGraph(&forwardlinks, &backlinks, &nodes)
|
||||
blocks = append(blocks, forwardlinks...)
|
||||
blocks = append(blocks, backlinks...)
|
||||
buildLinks(&blocks, &links, style, true)
|
||||
if Conf.Graph.Local.Tag {
|
||||
p := sqlBlock.Path
|
||||
linkTagBlocks(&blocks, &nodes, &links, p, style)
|
||||
}
|
||||
markLinkedNodes(&nodes, &links, true)
|
||||
nodes = removeDuplicatedUnescape(nodes)
|
||||
return
|
||||
}
|
||||
|
||||
func BuildGraph(query string) (boxID string, nodes []*GraphNode, links []*GraphLink) {
|
||||
nodes = []*GraphNode{}
|
||||
links = []*GraphLink{}
|
||||
|
||||
stmt := query2Stmt(query)
|
||||
stmt = strings.TrimPrefix(stmt, "select * from blocks where")
|
||||
stmt += graphTypeFilter(false)
|
||||
stmt += graphDailyNoteFilter(false)
|
||||
stmt = strings.ReplaceAll(stmt, "content", "ref.content")
|
||||
forwardlinks, backlinks := buildFullLinks(stmt)
|
||||
|
||||
var blocks []*Block
|
||||
roots := sql.GetAllRootBlocks()
|
||||
style := graphStyle(false)
|
||||
if 0 < len(roots) {
|
||||
boxID = roots[0].Box
|
||||
}
|
||||
for _, root := range roots {
|
||||
sqlBlocks := sql.GetAllChildBlocks(root.ID, stmt)
|
||||
treeBlocks := fromSQLBlocks(&sqlBlocks, "", 0)
|
||||
genTreeNodes(treeBlocks, &nodes, &links, false, style)
|
||||
blocks = append(blocks, treeBlocks...)
|
||||
|
||||
// 文档块关联
|
||||
rootBlock := getBlockIn(treeBlocks, root.ID)
|
||||
if nil == rootBlock {
|
||||
//util.LogWarnf("root block is nil [rootID=%s], tree blocks [len=%d], just skip it", root.ID, len(treeBlocks))
|
||||
continue
|
||||
}
|
||||
|
||||
sqlRootRefs := sql.QueryRefRootBlocksByDefRootID(root.ID)
|
||||
rootRefs := fromSQLBlocks(&sqlRootRefs, "", 0)
|
||||
rootBlock.Refs = append(rootBlock.Refs, rootRefs...)
|
||||
}
|
||||
growTreeGraph(&forwardlinks, &backlinks, &nodes)
|
||||
blocks = append(blocks, forwardlinks...)
|
||||
blocks = append(blocks, backlinks...)
|
||||
buildLinks(&blocks, &links, style, false)
|
||||
if Conf.Graph.Global.Tag {
|
||||
linkTagBlocks(&blocks, &nodes, &links, "", style)
|
||||
}
|
||||
markLinkedNodes(&nodes, &links, false)
|
||||
pruneUnref(&nodes, &links)
|
||||
nodes = removeDuplicatedUnescape(nodes)
|
||||
return
|
||||
}
|
||||
|
||||
func linkTagBlocks(blocks *[]*Block, nodes *[]*GraphNode, links *[]*GraphLink, p string, style map[string]string) {
|
||||
tagSpans := sql.QueryTagSpans(p, 1024)
|
||||
if 1 > len(tagSpans) {
|
||||
return
|
||||
}
|
||||
|
||||
nodeSize := Conf.Graph.Local.NodeSize
|
||||
if "" != p {
|
||||
nodeSize = Conf.Graph.Global.NodeSize
|
||||
}
|
||||
|
||||
// 构造标签节点
|
||||
var tagNodes []*GraphNode
|
||||
for _, tagSpan := range tagSpans {
|
||||
if nil == tagNodeIn(tagNodes, tagSpan.Content) {
|
||||
node := &GraphNode{
|
||||
ID: tagSpan.Content,
|
||||
Label: tagSpan.Content,
|
||||
Size: nodeSize,
|
||||
Type: tagSpan.Type,
|
||||
Color: &GraphNodeColor{Background: style["--b3-graph-tag-point"]},
|
||||
}
|
||||
*nodes = append(*nodes, node)
|
||||
tagNodes = append(tagNodes, node)
|
||||
}
|
||||
}
|
||||
|
||||
// 连接标签和块
|
||||
for _, block := range *blocks {
|
||||
for _, tagSpan := range tagSpans {
|
||||
if block.ID == tagSpan.BlockID {
|
||||
*links = append(*links, &GraphLink{
|
||||
From: tagSpan.Content,
|
||||
To: block.ID,
|
||||
Color: &GraphLinkColor{Color: style["--b3-graph-tag-line"]},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 连接层级标签
|
||||
for _, tagNode := range tagNodes {
|
||||
ids := strings.Split(tagNode.ID, "/")
|
||||
if 2 > len(ids) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, targetID := range ids[:len(ids)-1] {
|
||||
if targetTag := tagNodeIn(tagNodes, targetID); nil != targetTag {
|
||||
|
||||
*links = append(*links, &GraphLink{
|
||||
From: tagNode.ID,
|
||||
To: targetID,
|
||||
Color: &GraphLinkColor{Color: style["--b3-graph-tag-tag-line"]},
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func tagNodeIn(tagNodes []*GraphNode, content string) *GraphNode {
|
||||
for _, tagNode := range tagNodes {
|
||||
if tagNode.Label == content {
|
||||
return tagNode
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func growTreeGraph(forwardlinks, backlinks *[]*Block, nodes *[]*GraphNode) {
|
||||
forwardDepth, backDepth := 0, 0
|
||||
growLinkedNodes(forwardlinks, backlinks, nodes, nodes, &forwardDepth, &backDepth)
|
||||
}
|
||||
|
||||
func growLinkedNodes(forwardlinks, backlinks *[]*Block, nodes, all *[]*GraphNode, forwardDepth, backDepth *int) {
|
||||
if 1 > len(*nodes) {
|
||||
return
|
||||
}
|
||||
|
||||
forwardGeneration := &[]*GraphNode{}
|
||||
if 16 > *forwardDepth {
|
||||
for _, ref := range *forwardlinks {
|
||||
for _, node := range *nodes {
|
||||
if node.ID == ref.ID {
|
||||
var defs []*Block
|
||||
for _, refDef := range ref.Defs {
|
||||
if existNodes(all, refDef.ID) || existNodes(forwardGeneration, refDef.ID) || existNodes(nodes, refDef.ID) {
|
||||
continue
|
||||
}
|
||||
defs = append(defs, refDef)
|
||||
}
|
||||
|
||||
for _, refDef := range defs {
|
||||
defNode := &GraphNode{
|
||||
ID: refDef.ID,
|
||||
Box: refDef.Box,
|
||||
Path: refDef.Path,
|
||||
Size: Conf.Graph.Local.NodeSize,
|
||||
Type: refDef.Type,
|
||||
}
|
||||
nodeTitleLabel(defNode, nodeContentByBlock(refDef))
|
||||
*forwardGeneration = append(*forwardGeneration, defNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
backGeneration := &[]*GraphNode{}
|
||||
if 16 > *backDepth {
|
||||
for _, def := range *backlinks {
|
||||
for _, node := range *nodes {
|
||||
if node.ID == def.ID {
|
||||
for _, ref := range def.Refs {
|
||||
if existNodes(all, ref.ID) || existNodes(backGeneration, ref.ID) || existNodes(nodes, ref.ID) {
|
||||
continue
|
||||
}
|
||||
|
||||
refNode := &GraphNode{
|
||||
ID: ref.ID,
|
||||
Box: ref.Box,
|
||||
Path: ref.Path,
|
||||
Size: Conf.Graph.Local.NodeSize,
|
||||
Type: ref.Type,
|
||||
}
|
||||
nodeTitleLabel(refNode, nodeContentByBlock(ref))
|
||||
*backGeneration = append(*backGeneration, refNode)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
generation := &[]*GraphNode{}
|
||||
*generation = append(*generation, *forwardGeneration...)
|
||||
*generation = append(*generation, *backGeneration...)
|
||||
*forwardDepth++
|
||||
*backDepth++
|
||||
growLinkedNodes(forwardlinks, backlinks, generation, nodes, forwardDepth, backDepth)
|
||||
*nodes = append(*nodes, *generation...)
|
||||
}
|
||||
|
||||
func existNodes(nodes *[]*GraphNode, id string) bool {
|
||||
for _, node := range *nodes {
|
||||
if node.ID == id {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func buildLinks(defs *[]*Block, links *[]*GraphLink, style map[string]string, local bool) {
|
||||
for _, def := range *defs {
|
||||
for _, ref := range def.Refs {
|
||||
link := &GraphLink{
|
||||
From: ref.ID,
|
||||
To: def.ID,
|
||||
Ref: true,
|
||||
Color: linkColor(true, style),
|
||||
}
|
||||
if local {
|
||||
if Conf.Graph.Local.Arrow {
|
||||
link.Arrows = &GraphArrows{To: &GraphArrowsTo{Enabled: true}}
|
||||
}
|
||||
} else {
|
||||
if Conf.Graph.Global.Arrow {
|
||||
link.Arrows = &GraphArrows{To: &GraphArrowsTo{Enabled: true}}
|
||||
}
|
||||
}
|
||||
*links = append(*links, link)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func genTreeNodes(blocks []*Block, nodes *[]*GraphNode, links *[]*GraphLink, local bool, style map[string]string) {
|
||||
nodeSize := Conf.Graph.Local.NodeSize
|
||||
if !local {
|
||||
nodeSize = Conf.Graph.Global.NodeSize
|
||||
}
|
||||
|
||||
for _, block := range blocks {
|
||||
node := &GraphNode{
|
||||
ID: block.ID,
|
||||
Box: block.Box,
|
||||
Path: block.Path,
|
||||
Type: block.Type,
|
||||
Size: nodeSize,
|
||||
Color: &GraphNodeColor{Background: nodeColor(block.Type, style)},
|
||||
}
|
||||
nodeTitleLabel(node, nodeContentByBlock(block))
|
||||
*nodes = append(*nodes, node)
|
||||
|
||||
*links = append(*links, &GraphLink{
|
||||
From: block.ParentID,
|
||||
To: block.ID,
|
||||
Ref: false,
|
||||
Color: linkColor(false, style),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func markLinkedNodes(nodes *[]*GraphNode, links *[]*GraphLink, local bool) {
|
||||
nodeSize := Conf.Graph.Local.NodeSize
|
||||
if !local {
|
||||
nodeSize = Conf.Graph.Global.NodeSize
|
||||
}
|
||||
|
||||
tmpLinks := (*links)[:0]
|
||||
for _, link := range *links {
|
||||
var sourceFound, targetFound bool
|
||||
for _, node := range *nodes {
|
||||
if link.To == node.ID {
|
||||
if link.Ref {
|
||||
size := nodeSize
|
||||
node.Defs++
|
||||
size = math.Log2(float64(node.Defs))*nodeSize + nodeSize
|
||||
node.Size = size
|
||||
}
|
||||
targetFound = true
|
||||
} else if link.From == node.ID {
|
||||
node.Refs++
|
||||
sourceFound = true
|
||||
}
|
||||
if targetFound && sourceFound {
|
||||
break
|
||||
}
|
||||
}
|
||||
if sourceFound && targetFound {
|
||||
tmpLinks = append(tmpLinks, link)
|
||||
}
|
||||
}
|
||||
*links = tmpLinks
|
||||
}
|
||||
|
||||
func removeDuplicatedUnescape(nodes []*GraphNode) (ret []*GraphNode) {
|
||||
m := map[string]*GraphNode{}
|
||||
for _, n := range nodes {
|
||||
if nil == m[n.ID] {
|
||||
n.Title = html.UnescapeString(n.Title)
|
||||
n.Label = html.UnescapeString(n.Label)
|
||||
ret = append(ret, n)
|
||||
m[n.ID] = n
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func pruneUnref(nodes *[]*GraphNode, links *[]*GraphLink) {
|
||||
maxBlocks := Conf.Graph.MaxBlocks
|
||||
tmpNodes := (*nodes)[:0]
|
||||
for _, node := range *nodes {
|
||||
if 0 == Conf.Graph.Global.MinRefs {
|
||||
tmpNodes = append(tmpNodes, node)
|
||||
} else {
|
||||
if Conf.Graph.Global.MinRefs <= node.Refs {
|
||||
tmpNodes = append(tmpNodes, node)
|
||||
continue
|
||||
}
|
||||
|
||||
if Conf.Graph.Global.MinRefs <= node.Defs {
|
||||
tmpNodes = append(tmpNodes, node)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if maxBlocks < len(tmpNodes) {
|
||||
util.LogWarnf("exceeded the maximum number of render nodes [%d]", maxBlocks)
|
||||
break
|
||||
}
|
||||
}
|
||||
*nodes = tmpNodes
|
||||
|
||||
tmpLinks := (*links)[:0]
|
||||
for _, link := range *links {
|
||||
var sourceFound, targetFound bool
|
||||
for _, node := range *nodes {
|
||||
if link.To == node.ID {
|
||||
targetFound = true
|
||||
} else if link.From == node.ID {
|
||||
sourceFound = true
|
||||
}
|
||||
}
|
||||
if sourceFound && targetFound {
|
||||
tmpLinks = append(tmpLinks, link)
|
||||
}
|
||||
}
|
||||
*links = tmpLinks
|
||||
}
|
||||
|
||||
func nodeContentByBlock(block *Block) (ret string) {
|
||||
if ret = block.Name; "" != ret {
|
||||
return
|
||||
}
|
||||
if ret = block.Memo; "" != ret {
|
||||
return
|
||||
}
|
||||
ret = block.Content
|
||||
if maxLen := 48; maxLen < utf8.RuneCountInString(ret) {
|
||||
ret = gulu.Str.SubStr(ret, maxLen) + "..."
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func nodeContentByNode(node *ast.Node, text string) (ret string) {
|
||||
if ret = node.IALAttr("name"); "" != ret {
|
||||
return
|
||||
}
|
||||
if ret = node.IALAttr("memo"); "" != ret {
|
||||
return
|
||||
}
|
||||
if maxLen := 48; maxLen < utf8.RuneCountInString(text) {
|
||||
text = gulu.Str.SubStr(text, maxLen) + "..."
|
||||
}
|
||||
ret = html.EscapeString(text)
|
||||
return
|
||||
}
|
||||
|
||||
func linkColor(ref bool, style map[string]string) (ret *GraphLinkColor) {
|
||||
ret = &GraphLinkColor{}
|
||||
if ref {
|
||||
ret.Color = style["--b3-graph-ref-line"]
|
||||
return
|
||||
}
|
||||
ret.Color = style["--b3-graph-line"]
|
||||
return
|
||||
}
|
||||
|
||||
func nodeColor(typ string, style map[string]string) string {
|
||||
switch typ {
|
||||
case "NodeDocument":
|
||||
return style["--b3-graph-doc-point"]
|
||||
case "NodeParagraph":
|
||||
return style["--b3-graph-p-point"]
|
||||
case "NodeHeading":
|
||||
return style["--b3-graph-heading-point"]
|
||||
case "NodeMathBlock":
|
||||
return style["--b3-graph-math-point"]
|
||||
case "NodeCodeBlock":
|
||||
return style["--b3-graph-code-point"]
|
||||
case "NodeTable":
|
||||
return style["--b3-graph-table-point"]
|
||||
case "NodeList":
|
||||
return style["--b3-graph-list-point"]
|
||||
case "NodeListItem":
|
||||
return style["--b3-graph-listitem-point"]
|
||||
case "NodeBlockquote":
|
||||
return style["--b3-graph-bq-point"]
|
||||
case "NodeSuperBlock":
|
||||
return style["--b3-graph-super-point"]
|
||||
}
|
||||
return style["--b3-graph-p-point"]
|
||||
}
|
||||
|
||||
func graphTypeFilter(local bool) string {
|
||||
var inList []string
|
||||
|
||||
paragraph := Conf.Graph.Local.Paragraph
|
||||
if !local {
|
||||
paragraph = Conf.Graph.Global.Paragraph
|
||||
}
|
||||
if paragraph {
|
||||
inList = append(inList, "'p'")
|
||||
}
|
||||
|
||||
heading := Conf.Graph.Local.Heading
|
||||
if !local {
|
||||
heading = Conf.Graph.Global.Heading
|
||||
}
|
||||
if heading {
|
||||
inList = append(inList, "'h'")
|
||||
}
|
||||
|
||||
math := Conf.Graph.Local.Math
|
||||
if !local {
|
||||
math = Conf.Graph.Global.Math
|
||||
}
|
||||
if math {
|
||||
inList = append(inList, "'m'")
|
||||
}
|
||||
|
||||
code := Conf.Graph.Local.Code
|
||||
if !local {
|
||||
code = Conf.Graph.Global.Code
|
||||
}
|
||||
if code {
|
||||
inList = append(inList, "'c'")
|
||||
}
|
||||
|
||||
table := Conf.Graph.Local.Table
|
||||
if !local {
|
||||
table = Conf.Graph.Global.Table
|
||||
}
|
||||
if table {
|
||||
inList = append(inList, "'t'")
|
||||
}
|
||||
|
||||
list := Conf.Graph.Local.List
|
||||
if !local {
|
||||
list = Conf.Graph.Global.List
|
||||
}
|
||||
if list {
|
||||
inList = append(inList, "'l'")
|
||||
}
|
||||
|
||||
listItem := Conf.Graph.Local.ListItem
|
||||
if !local {
|
||||
listItem = Conf.Graph.Global.ListItem
|
||||
}
|
||||
if listItem {
|
||||
inList = append(inList, "'i'")
|
||||
}
|
||||
|
||||
blockquote := Conf.Graph.Local.Blockquote
|
||||
if !local {
|
||||
blockquote = Conf.Graph.Global.Blockquote
|
||||
}
|
||||
if blockquote {
|
||||
inList = append(inList, "'b'")
|
||||
}
|
||||
|
||||
super := Conf.Graph.Local.Super
|
||||
if !local {
|
||||
super = Conf.Graph.Global.Super
|
||||
}
|
||||
if super {
|
||||
inList = append(inList, "'s'")
|
||||
}
|
||||
|
||||
inList = append(inList, "'d'")
|
||||
return " AND ref.type IN (" + strings.Join(inList, ",") + ")"
|
||||
}
|
||||
|
||||
func graphDailyNoteFilter(local bool) string {
|
||||
dailyNote := Conf.Graph.Local.DailyNote
|
||||
if !local {
|
||||
dailyNote = Conf.Graph.Global.DailyNote
|
||||
}
|
||||
|
||||
if dailyNote {
|
||||
return ""
|
||||
}
|
||||
|
||||
var dailyNotesPaths []string
|
||||
for _, box := range Conf.GetOpenedBoxes() {
|
||||
boxConf := box.GetConf()
|
||||
if 1 < strings.Count(boxConf.DailyNoteSavePath, "/") {
|
||||
dailyNoteSaveDir := strings.Split(boxConf.DailyNoteSavePath, "/")[1]
|
||||
dailyNotesPaths = append(dailyNotesPaths, "/"+dailyNoteSaveDir)
|
||||
}
|
||||
}
|
||||
if 1 > len(dailyNotesPaths) {
|
||||
return ""
|
||||
}
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
for _, p := range dailyNotesPaths {
|
||||
buf.WriteString(" AND ref.hpath NOT LIKE '" + p + "%'")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func graphStyle(local bool) (ret map[string]string) {
|
||||
ret = map[string]string{}
|
||||
ret["--b3-graph-doc-point"] = currentCSSValue("--b3-graph-doc-point")
|
||||
ret["--b3-graph-p-point"] = currentCSSValue("--b3-graph-p-point")
|
||||
ret["--b3-graph-heading-point"] = currentCSSValue("--b3-graph-heading-point")
|
||||
ret["--b3-graph-math-point"] = currentCSSValue("--b3-graph-math-point")
|
||||
ret["--b3-graph-code-point"] = currentCSSValue("--b3-graph-code-point")
|
||||
ret["--b3-graph-table-point"] = currentCSSValue("--b3-graph-table-point")
|
||||
ret["--b3-graph-list-point"] = currentCSSValue("--b3-graph-list-point")
|
||||
ret["--b3-graph-listitem-point"] = currentCSSValue("--b3-graph-listitem-point")
|
||||
ret["--b3-graph-bq-point"] = currentCSSValue("--b3-graph-bq-point")
|
||||
ret["--b3-graph-super-point"] = currentCSSValue("--b3-graph-super-point")
|
||||
|
||||
ret["--b3-graph-line"] = currentCSSValue("--b3-graph-line")
|
||||
ret["--b3-graph-ref-line"] = currentCSSValue("--b3-graph-ref-line")
|
||||
ret["--b3-graph-tag-line"] = currentCSSValue("--b3-graph-tag-line")
|
||||
ret["--b3-graph-tag-tag-line"] = currentCSSValue("--b3-graph-tag-tag-line")
|
||||
ret["--b3-graph-asset-line"] = currentCSSValue("--b3-graph-asset-line")
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func nodeTitleLabel(node *GraphNode, blockContent string) {
|
||||
if "NodeDocument" != node.Type && "NodeHeading" != node.Type {
|
||||
node.Title = blockContent
|
||||
} else {
|
||||
node.Label = blockContent
|
||||
}
|
||||
}
|
||||
312
kernel/model/heading.go
Normal file
312
kernel/model/heading.go
Normal file
|
|
@ -0,0 +1,312 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/siyuan-note/siyuan/kernel/cache"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func (tx *Transaction) doFoldHeading(operation *Operation) (ret *TxErr) {
|
||||
headingID := operation.ID
|
||||
tree, err := loadTreeByBlockID(headingID)
|
||||
if nil != err {
|
||||
return &TxErr{code: TxErrCodeBlockNotFound, id: headingID}
|
||||
}
|
||||
|
||||
childrenIDs := []string{} // 这里不能用 nil,否则折叠下方没内容的标题时会内核中断 https://github.com/siyuan-note/siyuan/issues/3643
|
||||
heading := treenode.GetNodeInTree(tree, headingID)
|
||||
if nil == heading {
|
||||
return &TxErr{code: TxErrCodeBlockNotFound, id: headingID}
|
||||
}
|
||||
|
||||
children := treenode.HeadingChildren(heading)
|
||||
for _, child := range children {
|
||||
childrenIDs = append(childrenIDs, child.ID)
|
||||
child.RemoveIALAttr("fold")
|
||||
child.SetIALAttr("heading-fold", "1")
|
||||
}
|
||||
heading.SetIALAttr("fold", "1")
|
||||
if err = tx.writeTree(tree); nil != err {
|
||||
return &TxErr{code: TxErrCodeWriteTree, msg: err.Error(), id: headingID}
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
|
||||
cache.PutBlockIAL(headingID, parse.IAL2Map(heading.KramdownIAL))
|
||||
for _, child := range children {
|
||||
cache.PutBlockIAL(child.ID, parse.IAL2Map(child.KramdownIAL))
|
||||
}
|
||||
sql.UpsertTreeQueue(tree)
|
||||
operation.RetData = childrenIDs
|
||||
return
|
||||
}
|
||||
|
||||
func (tx *Transaction) doUnfoldHeading(operation *Operation) (ret *TxErr) {
|
||||
headingID := operation.ID
|
||||
|
||||
tree, err := loadTreeByBlockID(headingID)
|
||||
if nil != err {
|
||||
return &TxErr{code: TxErrCodeBlockNotFound, id: headingID}
|
||||
}
|
||||
|
||||
heading := treenode.GetNodeInTree(tree, headingID)
|
||||
if nil == heading {
|
||||
return &TxErr{code: TxErrCodeBlockNotFound, id: headingID}
|
||||
}
|
||||
|
||||
children := treenode.FoldedHeadingChildren(heading)
|
||||
for _, child := range children {
|
||||
child.RemoveIALAttr("heading-fold")
|
||||
child.RemoveIALAttr("fold")
|
||||
}
|
||||
heading.RemoveIALAttr("fold")
|
||||
if err = tx.writeTree(tree); nil != err {
|
||||
return &TxErr{code: TxErrCodeWriteTree, msg: err.Error(), id: headingID}
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
|
||||
cache.PutBlockIAL(headingID, parse.IAL2Map(heading.KramdownIAL))
|
||||
for _, child := range children {
|
||||
cache.PutBlockIAL(child.ID, parse.IAL2Map(child.KramdownIAL))
|
||||
}
|
||||
sql.UpsertTreeQueue(tree)
|
||||
|
||||
luteEngine := NewLute()
|
||||
operation.RetData = renderBlockDOMByNodes(children, luteEngine)
|
||||
return
|
||||
}
|
||||
|
||||
func Doc2Heading(srcID, targetID string, after bool) (srcTreeBox, srcTreePath string, err error) {
|
||||
WaitForWritingFiles()
|
||||
|
||||
srcTree, _ := loadTreeByBlockID(srcID)
|
||||
if nil == srcTree {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
|
||||
subDir := filepath.Join(util.DataDir, srcTree.Box, strings.TrimSuffix(srcTree.Path, ".sy"))
|
||||
if gulu.File.IsDir(subDir) {
|
||||
if !util.IsEmptyDir(subDir) {
|
||||
err = errors.New(Conf.Language(20))
|
||||
return
|
||||
} else {
|
||||
os.Remove(subDir) // 移除空文件夹不会有副作用
|
||||
}
|
||||
}
|
||||
|
||||
targetTree, _ := loadTreeByBlockID(targetID)
|
||||
if nil == targetTree {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
|
||||
pivot := treenode.GetNodeInTree(targetTree, targetID)
|
||||
if nil == pivot {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
|
||||
if ast.NodeListItem == pivot.Type {
|
||||
pivot = pivot.LastChild
|
||||
}
|
||||
|
||||
pivotLevel := treenode.HeadingLevel(pivot)
|
||||
deltaLevel := pivotLevel - treenode.TopHeadingLevel(srcTree) + 1
|
||||
headingLevel := pivotLevel
|
||||
if ast.NodeHeading == pivot.Type { // 平级插入
|
||||
children := treenode.HeadingChildren(pivot)
|
||||
if after {
|
||||
if length := len(children); 0 < length {
|
||||
pivot = children[length-1]
|
||||
}
|
||||
}
|
||||
} else { // 子节点插入
|
||||
headingLevel++
|
||||
deltaLevel++
|
||||
}
|
||||
if 6 < headingLevel {
|
||||
headingLevel = 6
|
||||
}
|
||||
|
||||
srcTree.Root.RemoveIALAttr("type")
|
||||
heading := &ast.Node{ID: srcTree.Root.ID, Type: ast.NodeHeading, HeadingLevel: headingLevel, KramdownIAL: srcTree.Root.KramdownIAL}
|
||||
heading.AppendChild(&ast.Node{Type: ast.NodeText, Tokens: []byte(srcTree.Root.IALAttr("title"))})
|
||||
heading.Box = targetTree.Box
|
||||
heading.Path = targetTree.Path
|
||||
|
||||
var nodes []*ast.Node
|
||||
if after {
|
||||
for c := srcTree.Root.LastChild; nil != c; c = c.Previous {
|
||||
nodes = append(nodes, c)
|
||||
}
|
||||
} else {
|
||||
for c := srcTree.Root.FirstChild; nil != c; c = c.Next {
|
||||
nodes = append(nodes, c)
|
||||
}
|
||||
}
|
||||
|
||||
if !after {
|
||||
pivot.InsertBefore(heading)
|
||||
}
|
||||
|
||||
for _, n := range nodes {
|
||||
if ast.NodeHeading == n.Type {
|
||||
n.HeadingLevel = n.HeadingLevel + deltaLevel
|
||||
if 6 < n.HeadingLevel {
|
||||
n.HeadingLevel = 6
|
||||
}
|
||||
}
|
||||
n.Box = targetTree.Box
|
||||
n.Path = targetTree.Path
|
||||
if after {
|
||||
pivot.InsertAfter(n)
|
||||
} else {
|
||||
pivot.InsertBefore(n)
|
||||
}
|
||||
}
|
||||
|
||||
if after {
|
||||
pivot.InsertAfter(heading)
|
||||
}
|
||||
|
||||
if contentPivot := treenode.GetNodeInTree(targetTree, targetID); nil != contentPivot && ast.NodeParagraph == contentPivot.Type && nil == contentPivot.FirstChild { // 插入到空的段落块下
|
||||
contentPivot.Unlink()
|
||||
}
|
||||
|
||||
srcTreeBox, srcTreePath = srcTree.Box, srcTree.Path
|
||||
srcTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
if err = indexWriteJSONQueue(srcTree); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
targetTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
err = indexWriteJSONQueue(targetTree)
|
||||
IncWorkspaceDataVer()
|
||||
RefreshBacklink(srcTree.ID)
|
||||
RefreshBacklink(targetTree.ID)
|
||||
return
|
||||
}
|
||||
|
||||
func Heading2Doc(srcHeadingID, targetBoxID, targetPath string) (srcRootBlockID, newTargetPath string, err error) {
|
||||
WaitForWritingFiles()
|
||||
|
||||
srcTree, _ := loadTreeByBlockID(srcHeadingID)
|
||||
if nil == srcTree {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
srcRootBlockID = srcTree.Root.ID
|
||||
|
||||
headingBlock, err := getBlock(srcHeadingID)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
if nil == headingBlock {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
headingNode := treenode.GetNodeInTree(srcTree, srcHeadingID)
|
||||
if nil == headingNode {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
|
||||
box := Conf.Box(targetBoxID)
|
||||
headingText := sql.GetRefText(headingNode.ID)
|
||||
headingText = util.FilterFileName(headingText)
|
||||
|
||||
moveToRoot := "/" == targetPath
|
||||
toHP := path.Join("/", headingText)
|
||||
toFolder := "/"
|
||||
|
||||
if !moveToRoot {
|
||||
toBlock := treenode.GetBlockTreeRootByPath(targetBoxID, targetPath)
|
||||
if nil == toBlock {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
toHP = path.Join(toBlock.HPath, headingText)
|
||||
toFolder = path.Join(path.Dir(targetPath), toBlock.ID)
|
||||
}
|
||||
|
||||
newTargetPath = path.Join(toFolder, srcHeadingID+".sy")
|
||||
if !box.Exist(toFolder) {
|
||||
if err = box.MkdirAll(toFolder); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 折叠标题转换为文档时需要自动展开下方块 https://github.com/siyuan-note/siyuan/issues/2947
|
||||
children := treenode.FoldedHeadingChildren(headingNode)
|
||||
for _, child := range children {
|
||||
child.RemoveIALAttr("heading-fold")
|
||||
child.RemoveIALAttr("fold")
|
||||
}
|
||||
headingNode.RemoveIALAttr("fold")
|
||||
|
||||
luteEngine := NewLute()
|
||||
newTree := &parse.Tree{Root: &ast.Node{Type: ast.NodeDocument, ID: srcHeadingID}, Context: &parse.Context{ParseOption: luteEngine.ParseOptions}}
|
||||
children = treenode.HeadingChildren(headingNode)
|
||||
for _, c := range children {
|
||||
newTree.Root.AppendChild(c)
|
||||
}
|
||||
newTree.ID = srcHeadingID
|
||||
newTree.Path = newTargetPath
|
||||
newTree.HPath = toHP
|
||||
headingNode.SetIALAttr("type", "doc")
|
||||
headingNode.SetIALAttr("id", srcHeadingID)
|
||||
headingNode.SetIALAttr("title", headingText)
|
||||
newTree.Root.KramdownIAL = headingNode.KramdownIAL
|
||||
|
||||
topLevel := treenode.TopHeadingLevel(newTree)
|
||||
for c := newTree.Root.FirstChild; nil != c; c = c.Next {
|
||||
if ast.NodeHeading == c.Type {
|
||||
c.HeadingLevel = c.HeadingLevel - topLevel + 1
|
||||
if 6 < c.HeadingLevel {
|
||||
c.HeadingLevel = 6
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
headingNode.Unlink()
|
||||
srcTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
if err = indexWriteJSONQueue(srcTree); nil != err {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
newTree.Box, newTree.Path = targetBoxID, newTargetPath
|
||||
newTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
if err = indexWriteJSONQueue(newTree); nil != err {
|
||||
return "", "", err
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
RefreshBacklink(srcTree.ID)
|
||||
RefreshBacklink(newTree.ID)
|
||||
return
|
||||
}
|
||||
566
kernel/model/history.go
Normal file
566
kernel/model/history.go
Normal file
|
|
@ -0,0 +1,566 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/protyle"
|
||||
"github.com/siyuan-note/siyuan/kernel/conf"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var historyTicker = time.NewTicker(time.Minute * 10)
|
||||
|
||||
func AutoGenerateDocHistory() {
|
||||
ChangeHistoryTick(Conf.Editor.GenerateHistoryInterval)
|
||||
for {
|
||||
<-historyTicker.C
|
||||
generateDocHistory()
|
||||
}
|
||||
}
|
||||
|
||||
func generateDocHistory() {
|
||||
if 1 > Conf.Editor.GenerateHistoryInterval {
|
||||
return
|
||||
}
|
||||
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
for _, box := range Conf.GetOpenedBoxes() {
|
||||
box.generateDocHistory0()
|
||||
}
|
||||
|
||||
historyDir := filepath.Join(util.WorkspaceDir, "history")
|
||||
clearOutdatedHistoryDir(historyDir)
|
||||
|
||||
// 以下部分是老版本的清理逻辑,暂时保留
|
||||
|
||||
for _, box := range Conf.GetBoxes() {
|
||||
historyDir = filepath.Join(util.DataDir, box.ID, ".siyuan", "history")
|
||||
clearOutdatedHistoryDir(historyDir)
|
||||
}
|
||||
|
||||
historyDir = filepath.Join(util.DataDir, "assets", ".siyuan", "history")
|
||||
clearOutdatedHistoryDir(historyDir)
|
||||
|
||||
historyDir = filepath.Join(util.DataDir, ".siyuan", "history")
|
||||
clearOutdatedHistoryDir(historyDir)
|
||||
}
|
||||
|
||||
func ChangeHistoryTick(minutes int) {
|
||||
if 0 >= minutes {
|
||||
minutes = 3600
|
||||
}
|
||||
historyTicker.Reset(time.Minute * time.Duration(minutes))
|
||||
}
|
||||
|
||||
func ClearWorkspaceHistory() (err error) {
|
||||
historyDir := filepath.Join(util.WorkspaceDir, "history")
|
||||
if gulu.File.IsDir(historyDir) {
|
||||
if err = os.RemoveAll(historyDir); nil != err {
|
||||
util.LogErrorf("remove workspace history dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
util.LogInfof("removed workspace history dir [%s]", historyDir)
|
||||
}
|
||||
|
||||
// 以下部分是老版本的清理逻辑,暂时保留
|
||||
|
||||
notebooks, err := ListNotebooks()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for _, notebook := range notebooks {
|
||||
boxID := notebook.ID
|
||||
historyDir := filepath.Join(util.DataDir, boxID, ".siyuan", "history")
|
||||
if !gulu.File.IsDir(historyDir) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = os.RemoveAll(historyDir); nil != err {
|
||||
util.LogErrorf("remove notebook history dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
util.LogInfof("removed notebook history dir [%s]", historyDir)
|
||||
}
|
||||
|
||||
historyDir = filepath.Join(util.DataDir, ".siyuan", "history")
|
||||
if gulu.File.IsDir(historyDir) {
|
||||
if err = os.RemoveAll(historyDir); nil != err {
|
||||
util.LogErrorf("remove data history dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
util.LogInfof("removed data history dir [%s]", historyDir)
|
||||
}
|
||||
historyDir = filepath.Join(util.DataDir, "assets", ".siyuan", "history")
|
||||
if gulu.File.IsDir(historyDir) {
|
||||
if err = os.RemoveAll(historyDir); nil != err {
|
||||
util.LogErrorf("remove assets history dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
util.LogInfof("removed assets history dir [%s]", historyDir)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetDocHistoryContent(historyPath string) (content string, err error) {
|
||||
if !gulu.File.IsExist(historyPath) {
|
||||
return
|
||||
}
|
||||
|
||||
data, err := filesys.NoLockFileRead(historyPath)
|
||||
if nil != err {
|
||||
util.LogErrorf("read file [%s] failed: %s", historyPath, err)
|
||||
return
|
||||
}
|
||||
luteEngine := NewLute()
|
||||
historyTree, err := protyle.ParseJSONWithoutFix(luteEngine, data)
|
||||
if nil != err {
|
||||
util.LogErrorf("parse tree from file [%s] failed, remove it", historyPath)
|
||||
os.RemoveAll(historyPath)
|
||||
return
|
||||
}
|
||||
content = renderBlockMarkdown(historyTree.Root)
|
||||
return
|
||||
}
|
||||
|
||||
func RollbackDocHistory(boxID, historyPath string) (err error) {
|
||||
if !gulu.File.IsExist(historyPath) {
|
||||
return
|
||||
}
|
||||
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
|
||||
srcPath := historyPath
|
||||
var destPath string
|
||||
baseName := filepath.Base(historyPath)
|
||||
id := strings.TrimSuffix(baseName, ".sy")
|
||||
|
||||
filesys.ReleaseFileLocks(filepath.Join(util.DataDir, boxID))
|
||||
workingDoc := treenode.GetBlockTree(id)
|
||||
if nil != workingDoc {
|
||||
if err = os.RemoveAll(filepath.Join(util.DataDir, boxID, workingDoc.Path)); nil != err {
|
||||
syncLock.Unlock()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
destPath, err = getRollbackDockPath(boxID, historyPath)
|
||||
if nil != err {
|
||||
syncLock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
if err = gulu.File.Copy(srcPath, destPath); nil != err {
|
||||
syncLock.Unlock()
|
||||
return
|
||||
}
|
||||
syncLock.Unlock()
|
||||
|
||||
RefreshFileTree()
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func getRollbackDockPath(boxID, historyPath string) (destPath string, err error) {
|
||||
baseName := filepath.Base(historyPath)
|
||||
parentID := strings.TrimSuffix(filepath.Base(filepath.Dir(historyPath)), ".sy")
|
||||
parentWorkingDoc := treenode.GetBlockTree(parentID)
|
||||
if nil != parentWorkingDoc {
|
||||
// 父路径如果是文档,则恢复到父路径下
|
||||
parentDir := strings.TrimSuffix(parentWorkingDoc.Path, ".sy")
|
||||
parentDir = filepath.Join(util.DataDir, boxID, parentDir)
|
||||
if err = os.MkdirAll(parentDir, 0755); nil != err {
|
||||
return
|
||||
}
|
||||
destPath = filepath.Join(parentDir, baseName)
|
||||
} else {
|
||||
// 父路径如果不是文档,则恢复到笔记本根路径下
|
||||
destPath = filepath.Join(util.DataDir, boxID, baseName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func RollbackAssetsHistory(historyPath string) (err error) {
|
||||
historyPath = filepath.Join(util.WorkspaceDir, historyPath)
|
||||
if !gulu.File.IsExist(historyPath) {
|
||||
return
|
||||
}
|
||||
|
||||
from := historyPath
|
||||
to := filepath.Join(util.DataDir, "assets", filepath.Base(historyPath))
|
||||
|
||||
if err = gulu.File.Copy(from, to); nil != err {
|
||||
util.LogErrorf("copy file [%s] to [%s] failed: %s", from, to, err)
|
||||
return
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
func RollbackNotebookHistory(historyPath string) (err error) {
|
||||
if !gulu.File.IsExist(historyPath) {
|
||||
return
|
||||
}
|
||||
|
||||
from := historyPath
|
||||
to := filepath.Join(util.DataDir, filepath.Base(historyPath))
|
||||
|
||||
if err = gulu.File.Copy(from, to); nil != err {
|
||||
util.LogErrorf("copy file [%s] to [%s] failed: %s", from, to, err)
|
||||
return
|
||||
}
|
||||
|
||||
RefreshFileTree()
|
||||
IncWorkspaceDataVer()
|
||||
return nil
|
||||
}
|
||||
|
||||
type History struct {
|
||||
Time string `json:"time"`
|
||||
Items []*HistoryItem `json:"items"`
|
||||
}
|
||||
|
||||
type HistoryItem struct {
|
||||
Title string `json:"title"`
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
const maxHistory = 32
|
||||
|
||||
func GetDocHistory(boxID string) (ret []*History, err error) {
|
||||
ret = []*History{}
|
||||
|
||||
historyDir := filepath.Join(util.WorkspaceDir, "history")
|
||||
if !gulu.File.IsDir(historyDir) {
|
||||
return
|
||||
}
|
||||
|
||||
historyBoxDirs, err := filepath.Glob(historyDir + "/*/" + boxID)
|
||||
if nil != err {
|
||||
util.LogErrorf("read dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
sort.Slice(historyBoxDirs, func(i, j int) bool {
|
||||
return historyBoxDirs[i] > historyBoxDirs[j]
|
||||
})
|
||||
|
||||
luteEngine := NewLute()
|
||||
count := 0
|
||||
for _, historyBoxDir := range historyBoxDirs {
|
||||
var docs []*HistoryItem
|
||||
itemCount := 0
|
||||
filepath.Walk(historyBoxDir, func(path string, info fs.FileInfo, err error) error {
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".sy") {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := filesys.NoLockFileRead(path)
|
||||
if nil != err {
|
||||
util.LogErrorf("read file [%s] failed: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
historyTree, err := protyle.ParseJSONWithoutFix(luteEngine, data)
|
||||
if nil != err {
|
||||
util.LogErrorf("parse tree from file [%s] failed, remove it", path)
|
||||
os.RemoveAll(path)
|
||||
return nil
|
||||
}
|
||||
historyName := historyTree.Root.IALAttr("title")
|
||||
if "" == historyName {
|
||||
historyName = info.Name()
|
||||
}
|
||||
|
||||
docs = append(docs, &HistoryItem{
|
||||
Title: historyTree.Root.IALAttr("title"),
|
||||
Path: path,
|
||||
})
|
||||
itemCount++
|
||||
if maxHistory < itemCount {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if 1 > len(docs) {
|
||||
continue
|
||||
}
|
||||
|
||||
timeDir := filepath.Base(filepath.Dir(historyBoxDir))
|
||||
t := timeDir[:strings.LastIndex(timeDir, "-")]
|
||||
if ti, parseErr := time.Parse("2006-01-02-150405", t); nil == parseErr {
|
||||
t = ti.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
ret = append(ret, &History{
|
||||
Time: t,
|
||||
Items: docs,
|
||||
})
|
||||
|
||||
count++
|
||||
if maxHistory <= count {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].Time > ret[j].Time
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GetNotebookHistory() (ret []*History, err error) {
|
||||
ret = []*History{}
|
||||
|
||||
historyDir := filepath.Join(util.WorkspaceDir, "history")
|
||||
if !gulu.File.IsDir(historyDir) {
|
||||
return
|
||||
}
|
||||
|
||||
historyNotebookConfs, err := filepath.Glob(historyDir + "/*-delete/*/.siyuan/conf.json")
|
||||
if nil != err {
|
||||
util.LogErrorf("read dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
sort.Slice(historyNotebookConfs, func(i, j int) bool {
|
||||
iTimeDir := filepath.Base(filepath.Dir(filepath.Dir(filepath.Dir(historyNotebookConfs[i]))))
|
||||
jTimeDir := filepath.Base(filepath.Dir(filepath.Dir(filepath.Dir(historyNotebookConfs[j]))))
|
||||
return iTimeDir > jTimeDir
|
||||
})
|
||||
|
||||
historyCount := 0
|
||||
for _, historyNotebookConf := range historyNotebookConfs {
|
||||
timeDir := filepath.Base(filepath.Dir(filepath.Dir(filepath.Dir(historyNotebookConf))))
|
||||
t := timeDir[:strings.LastIndex(timeDir, "-")]
|
||||
if ti, parseErr := time.Parse("2006-01-02-150405", t); nil == parseErr {
|
||||
t = ti.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
var c conf.BoxConf
|
||||
data, readErr := os.ReadFile(historyNotebookConf)
|
||||
if nil != readErr {
|
||||
util.LogErrorf("read notebook conf [%s] failed: %s", historyNotebookConf, readErr)
|
||||
continue
|
||||
}
|
||||
if err = json.Unmarshal(data, &c); nil != err {
|
||||
util.LogErrorf("parse notebook conf [%s] failed: %s", historyNotebookConf, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ret = append(ret, &History{
|
||||
Time: t,
|
||||
Items: []*HistoryItem{
|
||||
{
|
||||
Title: c.Name,
|
||||
Path: filepath.Dir(filepath.Dir(historyNotebookConf)),
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
historyCount++
|
||||
if maxHistory <= historyCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].Time > ret[j].Time
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func GetAssetsHistory() (ret []*History, err error) {
|
||||
ret = []*History{}
|
||||
|
||||
historyDir := filepath.Join(util.WorkspaceDir, "history")
|
||||
if !gulu.File.IsDir(historyDir) {
|
||||
return
|
||||
}
|
||||
|
||||
historyAssetsDirs, err := filepath.Glob(historyDir + "/*/assets")
|
||||
if nil != err {
|
||||
util.LogErrorf("read dir [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
sort.Slice(historyAssetsDirs, func(i, j int) bool {
|
||||
return historyAssetsDirs[i] > historyAssetsDirs[j]
|
||||
})
|
||||
|
||||
historyCount := 0
|
||||
for _, historyAssetsDir := range historyAssetsDirs {
|
||||
var assets []*HistoryItem
|
||||
itemCount := 0
|
||||
filepath.Walk(historyAssetsDir, func(path string, info fs.FileInfo, err error) error {
|
||||
if isSkipFile(info.Name()) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
assets = append(assets, &HistoryItem{
|
||||
Title: info.Name(),
|
||||
Path: filepath.ToSlash(strings.TrimPrefix(path, util.WorkspaceDir)),
|
||||
})
|
||||
itemCount++
|
||||
if maxHistory < itemCount {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if 1 > len(assets) {
|
||||
continue
|
||||
}
|
||||
|
||||
timeDir := filepath.Base(filepath.Dir(historyAssetsDir))
|
||||
t := timeDir[:strings.LastIndex(timeDir, "-")]
|
||||
if ti, parseErr := time.Parse("2006-01-02-150405", t); nil == parseErr {
|
||||
t = ti.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
||||
ret = append(ret, &History{
|
||||
Time: t,
|
||||
Items: assets,
|
||||
})
|
||||
|
||||
historyCount++
|
||||
if maxHistory <= historyCount {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].Time > ret[j].Time
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) generateDocHistory0() {
|
||||
files := box.recentModifiedDocs()
|
||||
if 1 > len(files) {
|
||||
return
|
||||
}
|
||||
|
||||
historyDir, err := util.GetHistoryDir("update")
|
||||
if nil != err {
|
||||
util.LogErrorf("get history dir failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
historyPath := filepath.Join(historyDir, box.ID, strings.TrimPrefix(file, filepath.Join(util.DataDir, box.ID)))
|
||||
if err = os.MkdirAll(filepath.Dir(historyPath), 0755); nil != err {
|
||||
util.LogErrorf("generate history failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if data, err = filesys.NoLockFileRead(file); err != nil {
|
||||
util.LogErrorf("generate history failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = gulu.File.WriteFileSafer(historyPath, data, 0644); err != nil {
|
||||
util.LogErrorf("generate history failed: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func clearOutdatedHistoryDir(historyDir string) {
|
||||
if !gulu.File.IsExist(historyDir) {
|
||||
return
|
||||
}
|
||||
|
||||
dirs, err := os.ReadDir(historyDir)
|
||||
if nil != err {
|
||||
util.LogErrorf("clear history [%s] failed: %s", historyDir, err)
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
var removes []string
|
||||
for _, dir := range dirs {
|
||||
dirInfo, err := dir.Info()
|
||||
if nil != err {
|
||||
util.LogErrorf("read history dir [%s] failed: %s", dir.Name(), err)
|
||||
continue
|
||||
}
|
||||
if Conf.Editor.HistoryRetentionDays < int(now.Sub(dirInfo.ModTime()).Hours()/24) {
|
||||
removes = append(removes, filepath.Join(historyDir, dir.Name()))
|
||||
}
|
||||
}
|
||||
for _, dir := range removes {
|
||||
if err = os.RemoveAll(dir); nil != err {
|
||||
util.LogErrorf("remove history dir [%s] failed: %s", err)
|
||||
continue
|
||||
}
|
||||
//util.LogInfof("auto removed history dir [%s]", dir)
|
||||
}
|
||||
}
|
||||
|
||||
var boxLatestHistoryTime = map[string]time.Time{}
|
||||
|
||||
func (box *Box) recentModifiedDocs() (ret []string) {
|
||||
latestHistoryTime := boxLatestHistoryTime[box.ID]
|
||||
filepath.Walk(filepath.Join(util.DataDir, box.ID), func(path string, info fs.FileInfo, err error) error {
|
||||
if nil == info {
|
||||
return nil
|
||||
}
|
||||
if isSkipFile(info.Name()) {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.ModTime().After(latestHistoryTime) {
|
||||
ret = append(ret, filepath.Join(path))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
box.UpdateHistoryGenerated()
|
||||
return
|
||||
}
|
||||
646
kernel/model/import.go
Normal file
646
kernel/model/import.go
Normal file
|
|
@ -0,0 +1,646 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/88250/protyle"
|
||||
"github.com/mattn/go-zglob"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func ImportSY(zipPath, boxID, toPath string) (err error) {
|
||||
util.PushEndlessProgress(Conf.Language(73))
|
||||
defer util.ClearPushProgress(100)
|
||||
|
||||
baseName := filepath.Base(zipPath)
|
||||
ext := filepath.Ext(baseName)
|
||||
baseName = strings.TrimSuffix(baseName, ext)
|
||||
unzipPath := filepath.Join(filepath.Dir(zipPath), baseName+"-"+gulu.Rand.String(7))
|
||||
err = gulu.Zip.Unzip(zipPath, unzipPath)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(unzipPath)
|
||||
|
||||
var syPaths []string
|
||||
filepath.Walk(unzipPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
if !info.IsDir() && strings.HasSuffix(info.Name(), ".sy") {
|
||||
syPaths = append(syPaths, path)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
unzipRootPaths, err := filepath.Glob(unzipPath + "/*")
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
if 1 != len(unzipRootPaths) {
|
||||
util.LogErrorf("invalid .sy.zip")
|
||||
return errors.New("invalid .sy.zip")
|
||||
}
|
||||
unzipRootPath := unzipRootPaths[0]
|
||||
luteEngine := util.NewLute()
|
||||
blockIDs := map[string]string{}
|
||||
trees := map[string]*parse.Tree{}
|
||||
|
||||
// 重新生成块 ID
|
||||
for _, syPath := range syPaths {
|
||||
data, readErr := os.ReadFile(syPath)
|
||||
if nil != readErr {
|
||||
util.LogErrorf("read .sy [%s] failed: %s", syPath, readErr)
|
||||
err = readErr
|
||||
return
|
||||
}
|
||||
tree, _, parseErr := protyle.ParseJSON(luteEngine, data)
|
||||
if nil != parseErr {
|
||||
util.LogErrorf("parse .sy [%s] failed: %s", syPath, parseErr)
|
||||
err = parseErr
|
||||
return
|
||||
}
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if "" != n.ID {
|
||||
newNodeID := ast.NewNodeID()
|
||||
blockIDs[n.ID] = newNodeID
|
||||
n.ID = newNodeID
|
||||
n.SetIALAttr("id", newNodeID)
|
||||
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
tree.ID = tree.Root.ID
|
||||
tree.Path = filepath.ToSlash(strings.TrimPrefix(syPath, unzipRootPath))
|
||||
trees[tree.ID] = tree
|
||||
}
|
||||
|
||||
// 引用指向重新生成的块 ID
|
||||
for _, tree := range trees {
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if ast.NodeBlockRefID == n.Type {
|
||||
newDefID := blockIDs[n.TokensStr()]
|
||||
if "" != newDefID {
|
||||
n.Tokens = gulu.Str.ToBytes(newDefID)
|
||||
} else {
|
||||
util.LogWarnf("not found def [" + n.TokensStr() + "]")
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
}
|
||||
|
||||
// 写回 .sy
|
||||
for _, tree := range trees {
|
||||
syPath := filepath.Join(unzipRootPath, tree.Path)
|
||||
renderer := protyle.NewJSONRenderer(tree, luteEngine.RenderOptions)
|
||||
data := renderer.Render()
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
buf.Grow(4096)
|
||||
if err = json.Indent(&buf, data, "", "\t"); nil != err {
|
||||
return
|
||||
}
|
||||
data = buf.Bytes()
|
||||
|
||||
if err = os.WriteFile(syPath, data, 0644); nil != err {
|
||||
util.LogErrorf("write .sy [%s] failed: %s", syPath, err)
|
||||
return
|
||||
}
|
||||
newSyPath := filepath.Join(filepath.Dir(syPath), tree.ID+".sy")
|
||||
if err = os.Rename(syPath, newSyPath); nil != err {
|
||||
util.LogErrorf("rename .sy from [%s] to [%s] failed: %s", syPath, newSyPath, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// 重命名文件路径
|
||||
renamePaths := map[string]string{}
|
||||
filepath.Walk(unzipRootPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() && util.IsIDPattern(info.Name()) {
|
||||
renamePaths[path] = path
|
||||
}
|
||||
return nil
|
||||
})
|
||||
for p, _ := range renamePaths {
|
||||
originalPath := p
|
||||
p = strings.TrimPrefix(p, unzipRootPath)
|
||||
p = filepath.ToSlash(p)
|
||||
parts := strings.Split(p, "/")
|
||||
buf := bytes.Buffer{}
|
||||
buf.WriteString("/")
|
||||
for i, part := range parts {
|
||||
if "" == part {
|
||||
continue
|
||||
}
|
||||
newNodeID := blockIDs[part]
|
||||
if "" != newNodeID {
|
||||
buf.WriteString(newNodeID)
|
||||
} else {
|
||||
buf.WriteString(part)
|
||||
}
|
||||
if i < len(parts)-1 {
|
||||
buf.WriteString("/")
|
||||
}
|
||||
}
|
||||
newPath := buf.String()
|
||||
renamePaths[originalPath] = filepath.Join(unzipRootPath, newPath)
|
||||
}
|
||||
|
||||
var oldPaths []string
|
||||
for oldPath, _ := range renamePaths {
|
||||
oldPaths = append(oldPaths, oldPath)
|
||||
}
|
||||
sort.Slice(oldPaths, func(i, j int) bool {
|
||||
return strings.Count(oldPaths[i], string(os.PathSeparator)) < strings.Count(oldPaths[j], string(os.PathSeparator))
|
||||
})
|
||||
for i, oldPath := range oldPaths {
|
||||
newPath := renamePaths[oldPath]
|
||||
if err = os.Rename(oldPath, newPath); nil != err {
|
||||
util.LogErrorf("rename path from [%s] to [%s] failed: %s", oldPath, renamePaths[oldPath], err)
|
||||
return errors.New("rename path failed")
|
||||
}
|
||||
|
||||
delete(renamePaths, oldPath)
|
||||
var toRemoves []string
|
||||
newRenamedPaths := map[string]string{}
|
||||
for oldP, newP := range renamePaths {
|
||||
if strings.HasPrefix(oldP, oldPath) {
|
||||
renamedOldP := strings.Replace(oldP, oldPath, newPath, 1)
|
||||
newRenamedPaths[renamedOldP] = newP
|
||||
toRemoves = append(toRemoves, oldPath)
|
||||
}
|
||||
}
|
||||
for _, toRemove := range toRemoves {
|
||||
delete(renamePaths, toRemove)
|
||||
}
|
||||
for oldP, newP := range newRenamedPaths {
|
||||
renamePaths[oldP] = newP
|
||||
}
|
||||
for j := i + 1; j < len(oldPaths); j++ {
|
||||
if strings.HasPrefix(oldPaths[j], oldPath) {
|
||||
renamedOldP := strings.Replace(oldPaths[j], oldPath, newPath, 1)
|
||||
oldPaths[j] = renamedOldP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assetsDirs, err := zglob.Glob(unzipRootPath + "/**/assets")
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
if 0 < len(assetsDirs) {
|
||||
for _, assets := range assetsDirs {
|
||||
if gulu.File.IsDir(assets) {
|
||||
dataAssets := filepath.Join(util.DataDir, "assets")
|
||||
if err = gulu.File.Copy(assets, dataAssets); nil != err {
|
||||
util.LogErrorf("copy assets from [%s] to [%s] failed: %s", assets, dataAssets, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
os.RemoveAll(assets)
|
||||
}
|
||||
}
|
||||
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
filesys.ReleaseAllFileLocks()
|
||||
|
||||
var baseTargetPath string
|
||||
if "/" == toPath {
|
||||
baseTargetPath = "/"
|
||||
} else {
|
||||
block := treenode.GetBlockTreeRootByPath(boxID, toPath)
|
||||
if nil == block {
|
||||
util.LogErrorf("not found block by path [%s]", toPath)
|
||||
return nil
|
||||
}
|
||||
baseTargetPath = strings.TrimSuffix(block.Path, ".sy")
|
||||
}
|
||||
|
||||
targetDir := filepath.Join(util.DataDir, boxID, baseTargetPath)
|
||||
if err = os.MkdirAll(targetDir, 0755); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
if err = stableCopy(unzipRootPath, targetDir); nil != err {
|
||||
util.LogErrorf("copy data dir from [%s] to [%s] failed: %s", unzipRootPath, util.DataDir, err)
|
||||
err = errors.New("copy data failed")
|
||||
return
|
||||
}
|
||||
|
||||
IncWorkspaceDataVer()
|
||||
refreshFileTree()
|
||||
return
|
||||
}
|
||||
|
||||
func ImportData(zipPath string) (err error) {
|
||||
util.PushEndlessProgress(Conf.Language(73))
|
||||
defer util.ClearPushProgress(100)
|
||||
|
||||
baseName := filepath.Base(zipPath)
|
||||
ext := filepath.Ext(baseName)
|
||||
baseName = strings.TrimSuffix(baseName, ext)
|
||||
unzipPath := filepath.Join(filepath.Dir(zipPath), baseName)
|
||||
err = gulu.Zip.Unzip(zipPath, unzipPath)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(unzipPath)
|
||||
|
||||
files, err := filepath.Glob(filepath.Join(unzipPath, "*/.siyuan/conf.json"))
|
||||
if nil != err {
|
||||
util.LogErrorf("glob conf.json failed: %s", err)
|
||||
return errors.New("not found conf.json")
|
||||
}
|
||||
if 1 > len(files) {
|
||||
return errors.New("not found conf.json")
|
||||
}
|
||||
|
||||
confPath := files[0]
|
||||
confData, err := os.ReadFile(confPath)
|
||||
if nil != err {
|
||||
return errors.New("read conf.json failed")
|
||||
}
|
||||
dataConf := &filesys.DataConf{}
|
||||
if err = gulu.JSON.UnmarshalJSON(confData, dataConf); nil != err {
|
||||
util.LogErrorf("unmarshal conf.json failed: %s", err)
|
||||
return errors.New("unmarshal conf.json failed")
|
||||
}
|
||||
dataConf.Device = util.GetDeviceID()
|
||||
confData, err = gulu.JSON.MarshalJSON(dataConf)
|
||||
if nil != err {
|
||||
util.LogErrorf("marshal conf.json failed: %s", err)
|
||||
return errors.New("marshal conf.json failed")
|
||||
}
|
||||
if err = os.WriteFile(confPath, confData, 0644); nil != err {
|
||||
util.LogErrorf("write conf.json failed: %s", err)
|
||||
return errors.New("write conf.json failed")
|
||||
}
|
||||
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
filesys.ReleaseAllFileLocks()
|
||||
tmpDataPath := filepath.Dir(filepath.Dir(confPath))
|
||||
if err = stableCopy(tmpDataPath, util.DataDir); nil != err {
|
||||
util.LogErrorf("copy data dir from [%s] to [%s] failed: %s", tmpDataPath, util.DataDir, err)
|
||||
err = errors.New("copy data failed")
|
||||
return
|
||||
}
|
||||
|
||||
IncWorkspaceDataVer()
|
||||
refreshFileTree()
|
||||
return
|
||||
}
|
||||
|
||||
func ImportFromLocalPath(boxID, localPath string, toPath string) (err error) {
|
||||
util.PushEndlessProgress(Conf.Language(73))
|
||||
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
box := Conf.Box(boxID)
|
||||
var baseHPath, baseTargetPath, boxLocalPath string
|
||||
if "/" == toPath {
|
||||
baseHPath = "/"
|
||||
baseTargetPath = "/"
|
||||
} else {
|
||||
block := treenode.GetBlockTreeRootByPath(boxID, toPath)
|
||||
if nil == block {
|
||||
util.LogErrorf("not found block by path [%s]", toPath)
|
||||
return nil
|
||||
}
|
||||
baseHPath = block.HPath
|
||||
baseTargetPath = strings.TrimSuffix(block.Path, ".sy")
|
||||
}
|
||||
boxLocalPath = filepath.Join(util.DataDir, boxID)
|
||||
|
||||
if gulu.File.IsDir(localPath) {
|
||||
folderName := filepath.Base(localPath)
|
||||
p := path.Join(toPath, folderName)
|
||||
if box.Exist(p) {
|
||||
return errors.New(Conf.Language(1))
|
||||
}
|
||||
|
||||
// 收集所有资源文件
|
||||
assets := map[string]string{}
|
||||
filepath.Walk(localPath, func(currentPath string, info os.FileInfo, walkErr error) error {
|
||||
if localPath == currentPath {
|
||||
return nil
|
||||
}
|
||||
if strings.HasPrefix(info.Name(), ".") {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".md") && !strings.HasSuffix(info.Name(), ".markdown") {
|
||||
dest := currentPath
|
||||
assets[dest] = currentPath
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
targetPaths := map[string]string{}
|
||||
|
||||
// md 转换 sy
|
||||
i := 0
|
||||
filepath.Walk(localPath, func(currentPath string, info os.FileInfo, walkErr error) error {
|
||||
if strings.HasPrefix(info.Name(), ".") {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var tree *parse.Tree
|
||||
|
||||
ext := path.Ext(info.Name())
|
||||
title := strings.TrimSuffix(info.Name(), ext)
|
||||
id := ast.NewNodeID()
|
||||
|
||||
curRelPath := filepath.ToSlash(strings.TrimPrefix(currentPath, localPath))
|
||||
targetPath := path.Join(baseTargetPath, id)
|
||||
if "" == curRelPath {
|
||||
curRelPath = "/"
|
||||
} else {
|
||||
dirPath := targetPaths[path.Dir(curRelPath)]
|
||||
targetPath = path.Join(dirPath, id)
|
||||
}
|
||||
|
||||
targetPath = strings.ReplaceAll(targetPath, ".sy/", "/")
|
||||
targetPath += ".sy"
|
||||
targetPaths[curRelPath] = targetPath
|
||||
hPath := path.Join(baseHPath, filepath.ToSlash(strings.TrimPrefix(currentPath, localPath)))
|
||||
hPath = strings.TrimSuffix(hPath, ext)
|
||||
if info.IsDir() {
|
||||
tree = treenode.NewTree(boxID, targetPath, hPath, title)
|
||||
if err = filesys.WriteTree(tree); nil != err {
|
||||
return io.EOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".md") && !strings.HasSuffix(info.Name(), ".markdown") {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, readErr := os.ReadFile(currentPath)
|
||||
if nil != readErr {
|
||||
err = readErr
|
||||
return io.EOF
|
||||
}
|
||||
|
||||
tree = parseKTree(data)
|
||||
if nil == tree {
|
||||
util.LogErrorf("parse tree [%s] failed", currentPath)
|
||||
return nil
|
||||
}
|
||||
tree.ID = id
|
||||
tree.Root.ID = id
|
||||
tree.Root.SetIALAttr("id", tree.Root.ID)
|
||||
tree.Root.SetIALAttr("title", title)
|
||||
tree.Box = boxID
|
||||
targetPath = path.Join(path.Dir(targetPath), tree.Root.ID+".sy")
|
||||
tree.Path = targetPath
|
||||
targetPaths[curRelPath] = targetPath
|
||||
tree.HPath = hPath
|
||||
|
||||
docDirLocalPath := filepath.Dir(filepath.Join(boxLocalPath, targetPath))
|
||||
assetDirPath := getAssetsDir(boxLocalPath, docDirLocalPath)
|
||||
currentDir := filepath.Dir(currentPath)
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || ast.NodeLinkDest != n.Type {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
dest := n.TokensStr()
|
||||
if !util.IsRelativePath(dest) || "" == dest {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
absDest := filepath.Join(currentDir, dest)
|
||||
fullPath, exist := assets[absDest]
|
||||
if !exist {
|
||||
absDest = filepath.Join(currentDir, string(html.DecodeDestination([]byte(dest))))
|
||||
}
|
||||
fullPath, exist = assets[absDest]
|
||||
if exist {
|
||||
name := filepath.Base(fullPath)
|
||||
ext := filepath.Ext(name)
|
||||
name = strings.TrimSuffix(name, ext)
|
||||
name += "-" + ast.NewNodeID() + ext
|
||||
assetTargetPath := filepath.Join(assetDirPath, name)
|
||||
delete(assets, absDest)
|
||||
if err = gulu.File.Copy(fullPath, assetTargetPath); nil != err {
|
||||
util.LogErrorf("copy asset from [%s] to [%s] failed: %s", fullPath, assetTargetPath, err)
|
||||
return ast.WalkContinue
|
||||
}
|
||||
n.Tokens = gulu.Str.ToBytes("assets/" + name)
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
reassignIDUpdated(tree)
|
||||
if err = filesys.WriteTree(tree); nil != err {
|
||||
return io.EOF
|
||||
}
|
||||
i++
|
||||
if 0 == i%4 {
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(66), util.ShortPathForBootingDisplay(tree.Path)))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
|
||||
IncWorkspaceDataVer()
|
||||
refreshFileTree()
|
||||
} else { // 导入单个文件
|
||||
fileName := filepath.Base(localPath)
|
||||
if !strings.HasSuffix(fileName, ".md") && !strings.HasSuffix(fileName, ".markdown") {
|
||||
return errors.New(Conf.Language(79))
|
||||
}
|
||||
|
||||
title := strings.TrimSuffix(fileName, ".markdown")
|
||||
title = strings.TrimSuffix(title, ".md")
|
||||
targetPath := strings.TrimSuffix(toPath, ".sy")
|
||||
id := ast.NewNodeID()
|
||||
targetPath = path.Join(targetPath, id+".sy")
|
||||
var data []byte
|
||||
data, err = os.ReadFile(localPath)
|
||||
if nil != err {
|
||||
return err
|
||||
}
|
||||
tree := parseKTree(data)
|
||||
if nil == tree {
|
||||
msg := fmt.Sprintf("parse tree [%s] failed", localPath)
|
||||
util.LogErrorf(msg)
|
||||
return errors.New(msg)
|
||||
}
|
||||
tree.ID = id
|
||||
tree.Root.ID = id
|
||||
tree.Root.SetIALAttr("id", tree.Root.ID)
|
||||
tree.Root.SetIALAttr("title", title)
|
||||
tree.Box = boxID
|
||||
tree.Path = targetPath
|
||||
tree.HPath = path.Join(baseHPath, title)
|
||||
|
||||
docDirLocalPath := filepath.Dir(filepath.Join(boxLocalPath, targetPath))
|
||||
assetDirPath := getAssetsDir(boxLocalPath, docDirLocalPath)
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || ast.NodeLinkDest != n.Type {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
dest := n.TokensStr()
|
||||
if !util.IsRelativePath(dest) {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
dest = filepath.ToSlash(dest)
|
||||
if "" == dest {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
absolutePath := filepath.Join(filepath.Dir(localPath), dest)
|
||||
exist := gulu.File.IsExist(absolutePath)
|
||||
if !exist {
|
||||
absolutePath = filepath.Join(filepath.Dir(localPath), string(html.DecodeDestination([]byte(dest))))
|
||||
exist = gulu.File.IsExist(absolutePath)
|
||||
}
|
||||
if exist {
|
||||
name := filepath.Base(absolutePath)
|
||||
ext := filepath.Ext(name)
|
||||
name = strings.TrimSuffix(name, ext)
|
||||
name += "-" + ast.NewNodeID() + ext
|
||||
assetTargetPath := filepath.Join(assetDirPath, name)
|
||||
if err = gulu.File.CopyFile(absolutePath, assetTargetPath); nil != err {
|
||||
util.LogErrorf("copy asset from [%s] to [%s] failed: %s", absolutePath, assetTargetPath, err)
|
||||
return ast.WalkContinue
|
||||
}
|
||||
n.Tokens = gulu.Str.ToBytes("assets/" + name)
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
reassignIDUpdated(tree)
|
||||
if err = indexWriteJSONQueue(tree); nil != err {
|
||||
return
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
sql.WaitForWritingDatabase()
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(58))
|
||||
go func() {
|
||||
time.Sleep(2 * time.Second)
|
||||
util.ReloadUI()
|
||||
}()
|
||||
}
|
||||
debug.FreeOSMemory()
|
||||
IncWorkspaceDataVer()
|
||||
return
|
||||
}
|
||||
|
||||
func reassignIDUpdated(tree *parse.Tree) {
|
||||
var blockCount int
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || "" == n.ID {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
blockCount++
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
ids := make([]string, blockCount)
|
||||
min, _ := strconv.ParseInt(time.Now().Add(-1*time.Duration(blockCount)*time.Second).Format("20060102150405"), 10, 64)
|
||||
for i := 0; i < blockCount; i++ {
|
||||
ids[i] = newID(fmt.Sprintf("%d", min))
|
||||
min++
|
||||
}
|
||||
|
||||
var i int
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || "" == n.ID {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
n.ID = ids[i]
|
||||
n.SetIALAttr("id", n.ID)
|
||||
n.SetIALAttr("updated", util.TimeFromID(n.ID))
|
||||
i++
|
||||
return ast.WalkContinue
|
||||
})
|
||||
tree.ID = tree.Root.ID
|
||||
tree.Path = path.Join(path.Dir(tree.Path), tree.ID+".sy")
|
||||
tree.Root.SetIALAttr("id", tree.Root.ID)
|
||||
}
|
||||
|
||||
func newID(t string) string {
|
||||
return t + "-" + randStr(7)
|
||||
}
|
||||
|
||||
func randStr(length int) string {
|
||||
letter := []rune("abcdefghijklmnopqrstuvwxyz0123456789")
|
||||
b := make([]rune, length)
|
||||
for i := range b {
|
||||
b[i] = letter[rand.Intn(len(letter))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
391
kernel/model/index.go
Normal file
391
kernel/model/index.go
Normal file
|
|
@ -0,0 +1,391 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/emirpasic/gods/sets/hashset"
|
||||
"github.com/siyuan-note/siyuan/kernel/cache"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func (box *Box) BootIndex() {
|
||||
util.SetBootDetails("Listing files...")
|
||||
files := box.ListFiles("/")
|
||||
boxLen := len(Conf.GetOpenedBoxes())
|
||||
if 1 > boxLen {
|
||||
boxLen = 1
|
||||
}
|
||||
bootProgressPart := 10.0 / float64(boxLen) / float64(len(files))
|
||||
|
||||
luteEngine := NewLute()
|
||||
i := 0
|
||||
// 读取并缓存路径映射
|
||||
for _, file := range files {
|
||||
if file.isdir || !strings.HasSuffix(file.name, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
p := file.path
|
||||
tree, err := filesys.LoadTree(box.ID, p, luteEngine)
|
||||
if nil != err {
|
||||
util.LogErrorf("read box [%s] tree [%s] failed: %s", box.ID, p, err)
|
||||
continue
|
||||
}
|
||||
|
||||
docIAL := parse.IAL2MapUnEsc(tree.Root.KramdownIAL)
|
||||
cache.PutDocIAL(p, docIAL)
|
||||
|
||||
util.IncBootProgress(bootProgressPart, "Parsing tree "+util.ShortPathForBootingDisplay(tree.Path))
|
||||
// 缓存块树
|
||||
treenode.IndexBlockTree(tree)
|
||||
if 1 < i && 0 == i%64 {
|
||||
filesys.ReleaseAllFileLocks()
|
||||
}
|
||||
i++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (box *Box) Index(fullRebuildIndex bool) (treeCount int, treeSize int64) {
|
||||
defer debug.FreeOSMemory()
|
||||
|
||||
sql.IndexMode()
|
||||
defer sql.NormalMode()
|
||||
|
||||
//os.MkdirAll("pprof", 0755)
|
||||
//cpuProfile, _ := os.Create("pprof/cpu_profile_index")
|
||||
//pprof.StartCPUProfile(cpuProfile)
|
||||
//defer pprof.StopCPUProfile()
|
||||
|
||||
util.SetBootDetails("Listing files...")
|
||||
files := box.ListFiles("/")
|
||||
boxLen := len(Conf.GetOpenedBoxes())
|
||||
if 1 > boxLen {
|
||||
boxLen = 1
|
||||
}
|
||||
bootProgressPart := 10.0 / float64(boxLen) / float64(len(files))
|
||||
|
||||
luteEngine := NewLute()
|
||||
idTitleMap := map[string]string{}
|
||||
idHashMap := map[string]string{}
|
||||
|
||||
util.PushEndlessProgress(fmt.Sprintf("["+box.Name+"] "+Conf.Language(64), len(files)))
|
||||
|
||||
i := 0
|
||||
// 读取并缓存路径映射
|
||||
for _, file := range files {
|
||||
if file.isdir || !strings.HasSuffix(file.name, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
p := file.path
|
||||
|
||||
tree, err := filesys.LoadTree(box.ID, p, luteEngine)
|
||||
if nil != err {
|
||||
util.LogErrorf("read box [%s] tree [%s] failed: %s", box.ID, p, err)
|
||||
continue
|
||||
}
|
||||
|
||||
docIAL := parse.IAL2MapUnEsc(tree.Root.KramdownIAL)
|
||||
cache.PutDocIAL(p, docIAL)
|
||||
|
||||
util.IncBootProgress(bootProgressPart, "Parsing tree "+util.ShortPathForBootingDisplay(tree.Path))
|
||||
treeSize += file.size
|
||||
treeCount++
|
||||
// 缓存文档标题,后面做 Path -> HPath 路径映射时需要
|
||||
idTitleMap[tree.ID] = tree.Root.IALAttr("title")
|
||||
// 缓存块树
|
||||
treenode.IndexBlockTree(tree)
|
||||
// 缓存 ID-Hash,后面需要用于判断是否要重建库
|
||||
idHashMap[tree.ID] = tree.Hash
|
||||
if 1 < i && 0 == i%64 {
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(88), i, len(files)-i))
|
||||
filesys.ReleaseAllFileLocks()
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
box.UpdateHistoryGenerated() // 初始化历史生成时间为当前时间
|
||||
|
||||
// 检查是否需要重新建库
|
||||
util.SetBootDetails("Checking data hashes...")
|
||||
var ids []string
|
||||
for id := range idTitleMap {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
sort.Slice(ids, func(i, j int) bool { return ids[i] >= ids[j] })
|
||||
buf := bytes.Buffer{}
|
||||
for _, id := range ids {
|
||||
hash, _ := idHashMap[id]
|
||||
buf.WriteString(hash)
|
||||
util.SetBootDetails("Checking hash " + hash)
|
||||
}
|
||||
boxHash := fmt.Sprintf("%x", sha256.Sum256(buf.Bytes()))
|
||||
|
||||
dbBoxHash := sql.GetBoxHash(box.ID)
|
||||
if boxHash == dbBoxHash {
|
||||
//util.LogInfof("use existing database for box [%s]", box.ID)
|
||||
util.SetBootDetails("Use existing database for notebook " + box.ID)
|
||||
return
|
||||
}
|
||||
|
||||
// 开始重建库
|
||||
|
||||
sql.DisableCache()
|
||||
defer sql.EnableCache()
|
||||
|
||||
start := time.Now()
|
||||
if !fullRebuildIndex {
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
sql.PutBoxHash(tx, box.ID, boxHash)
|
||||
util.SetBootDetails("Cleaning obsolete indexes...")
|
||||
util.PushEndlessProgress(Conf.Language(108))
|
||||
if err = sql.DeleteByBoxTx(tx, box.ID); nil != err {
|
||||
return
|
||||
}
|
||||
if err = sql.CommitTx(tx); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
bootProgressPart = 40.0 / float64(boxLen) / float64(treeCount)
|
||||
|
||||
i = 0
|
||||
// 块级行级入库,缓存块
|
||||
// 这里不能并行插入,因为 SQLite 不支持
|
||||
for _, file := range files {
|
||||
if file.isdir || !strings.HasSuffix(file.name, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
tree, err := filesys.LoadTree(box.ID, file.path, luteEngine)
|
||||
if nil != err {
|
||||
util.LogErrorf("read box [%s] tree [%s] failed: %s", box.ID, file.path, err)
|
||||
continue
|
||||
}
|
||||
|
||||
util.IncBootProgress(bootProgressPart, "Indexing tree "+util.ShortPathForBootingDisplay(tree.Path))
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
continue
|
||||
}
|
||||
if err = sql.InsertBlocksSpans(tx, tree); nil != err {
|
||||
continue
|
||||
}
|
||||
if err = sql.CommitTx(tx); nil != err {
|
||||
continue
|
||||
}
|
||||
if 1 < i && 0 == i%64 {
|
||||
util.PushEndlessProgress(fmt.Sprintf("["+box.Name+"] "+Conf.Language(53), i, treeCount-i))
|
||||
filesys.ReleaseAllFileLocks()
|
||||
}
|
||||
i++
|
||||
}
|
||||
|
||||
end := time.Now()
|
||||
elapsed := end.Sub(start).Seconds()
|
||||
util.LogInfof("rebuilt database for notebook [%s] in [%.2fs], tree [count=%d, size=%s]", box.ID, elapsed, treeCount, humanize.Bytes(uint64(treeSize)))
|
||||
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(56), treeCount))
|
||||
return
|
||||
}
|
||||
|
||||
func IndexRefs() {
|
||||
sql.EnableCache()
|
||||
defer sql.ClearBlockCache()
|
||||
|
||||
start := time.Now()
|
||||
util.SetBootDetails("Resolving refs...")
|
||||
util.PushEndlessProgress(Conf.Language(54))
|
||||
|
||||
// 解析并更新引用块
|
||||
util.SetBootDetails("Resolving ref block content...")
|
||||
refUnresolvedBlocks := sql.GetRefUnresolvedBlocks() // TODO: v2.2.0 以后移除
|
||||
if 0 < len(refUnresolvedBlocks) {
|
||||
dynamicRefTreeIDs := hashset.New()
|
||||
bootProgressPart := 10.0 / float64(len(refUnresolvedBlocks))
|
||||
anchors := map[string]string{}
|
||||
var refBlockIDs []string
|
||||
for i, refBlock := range refUnresolvedBlocks {
|
||||
util.IncBootProgress(bootProgressPart, "Resolving ref block content "+util.ShortPathForBootingDisplay(refBlock.ID))
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
blockContent := sql.ResolveRefContent(refBlock, &anchors)
|
||||
refBlock.Content = blockContent
|
||||
refBlockIDs = append(refBlockIDs, refBlock.ID)
|
||||
dynamicRefTreeIDs.Add(refBlock.RootID)
|
||||
sql.CommitTx(tx)
|
||||
if 1 < i && 0 == i%64 {
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(53), i, len(refUnresolvedBlocks)-i))
|
||||
}
|
||||
}
|
||||
|
||||
// 将需要更新动态引用文本内容的块先删除,后面会重新插入,这样比直接 update 快很多
|
||||
util.SetBootDetails("Deleting unresolved block content...")
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
sql.DeleteBlockByIDs(tx, refBlockIDs)
|
||||
sql.CommitTx(tx)
|
||||
|
||||
bootProgressPart = 10.0 / float64(len(refUnresolvedBlocks))
|
||||
for i, refBlock := range refUnresolvedBlocks {
|
||||
util.IncBootProgress(bootProgressPart, "Updating block content "+util.ShortPathForBootingDisplay(refBlock.ID))
|
||||
tx, err = sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
sql.InsertBlock(tx, refBlock)
|
||||
sql.CommitTx(tx)
|
||||
if 1 < i && 0 == i%64 {
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(53), i, len(refUnresolvedBlocks)-i))
|
||||
}
|
||||
}
|
||||
|
||||
if 0 < dynamicRefTreeIDs.Size() {
|
||||
// 块引锚文本静态化
|
||||
for _, dynamicRefTreeIDVal := range dynamicRefTreeIDs.Values() {
|
||||
dynamicRefTreeID := dynamicRefTreeIDVal.(string)
|
||||
util.IncBootProgress(bootProgressPart, "Persisting block ref text "+util.ShortPathForBootingDisplay(dynamicRefTreeID))
|
||||
tree, err := loadTreeByBlockID(dynamicRefTreeID)
|
||||
if nil != err {
|
||||
util.LogErrorf("tree [%s] dynamic ref text to static failed: %s", dynamicRefTreeID, err)
|
||||
continue
|
||||
}
|
||||
legacyDynamicRefTreeToStatic(tree)
|
||||
if err := filesys.WriteTree(tree); nil == err {
|
||||
//util.LogInfof("persisted tree [%s] dynamic ref text", tree.Box+tree.Path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 引用入库
|
||||
util.SetBootDetails("Indexing refs...")
|
||||
refBlocks := sql.GetRefExistedBlocks()
|
||||
refTreeIDs := hashset.New()
|
||||
for _, refBlock := range refBlocks {
|
||||
refTreeIDs.Add(refBlock.RootID)
|
||||
}
|
||||
if 0 < refTreeIDs.Size() {
|
||||
luteEngine := NewLute()
|
||||
bootProgressPart := 10.0 / float64(refTreeIDs.Size())
|
||||
for _, box := range Conf.GetOpenedBoxes() {
|
||||
tx, err := sql.BeginTx()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
sql.DeleteRefsByBoxTx(tx, box.ID)
|
||||
sql.CommitTx(tx)
|
||||
|
||||
files := box.ListFiles("/")
|
||||
i := 0
|
||||
for _, file := range files {
|
||||
if file.isdir || !strings.HasSuffix(file.name, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
if file.isdir || !strings.HasSuffix(file.name, ".sy") {
|
||||
continue
|
||||
}
|
||||
|
||||
id := strings.TrimSuffix(file.name, ".sy")
|
||||
if !refTreeIDs.Contains(id) {
|
||||
continue
|
||||
}
|
||||
|
||||
util.IncBootProgress(bootProgressPart, "Indexing ref "+util.ShortPathForBootingDisplay(file.path))
|
||||
|
||||
tree, err := filesys.LoadTree(box.ID, file.path, luteEngine)
|
||||
if nil != err {
|
||||
util.LogErrorf("parse box [%s] tree [%s] failed", box.ID, file.path)
|
||||
continue
|
||||
}
|
||||
|
||||
tx, err = sql.BeginTx()
|
||||
if nil != err {
|
||||
continue
|
||||
}
|
||||
sql.InsertRefs(tx, tree)
|
||||
if err = sql.CommitTx(tx); nil != err {
|
||||
continue
|
||||
}
|
||||
if 1 < i && 0 == i%64 {
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(55), i))
|
||||
filesys.ReleaseAllFileLocks()
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
util.LogInfof("resolved refs [%d] in [%dms]", len(refBlocks), time.Now().Sub(start).Milliseconds())
|
||||
}
|
||||
|
||||
func legacyDynamicRefTreeToStatic(tree *parse.Tree) {
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || ast.NodeBlockRef != n.Type {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if isLegacyDynamicBlockRef(n) {
|
||||
idNode := n.ChildByType(ast.NodeBlockRefID)
|
||||
defID := idNode.TokensStr()
|
||||
def := sql.GetBlock(defID)
|
||||
var text string
|
||||
if nil == def {
|
||||
if "zh_CN" == Conf.Lang {
|
||||
text = "解析引用锚文本失败,请尝试更新该引用指向的定义块后再重新打开该文档"
|
||||
} else {
|
||||
text = "Failed to parse the ref anchor text, please try to update the def block pointed to by the ref and then reopen this document"
|
||||
}
|
||||
} else {
|
||||
text = sql.GetRefText(defID)
|
||||
}
|
||||
if Conf.Editor.BlockRefDynamicAnchorTextMaxLen < utf8.RuneCountInString(text) {
|
||||
text = gulu.Str.SubStr(text, Conf.Editor.BlockRefDynamicAnchorTextMaxLen) + "..."
|
||||
}
|
||||
treenode.SetDynamicBlockRefText(n, text)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
}
|
||||
|
||||
func isLegacyDynamicBlockRef(blockRef *ast.Node) bool {
|
||||
return nil == blockRef.ChildByType(ast.NodeBlockRefText) && nil == blockRef.ChildByType(ast.NodeBlockRefDynamicText)
|
||||
}
|
||||
412
kernel/model/liandi.go
Normal file
412
kernel/model/liandi.go
Normal file
|
|
@ -0,0 +1,412 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/siyuan-note/siyuan/kernel/conf"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var ErrFailedToConnectCloudServer = errors.New("failed to connect cloud server")
|
||||
|
||||
func DeactivateUser() (err error) {
|
||||
requestResult := gulu.Ret.NewResult()
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(requestResult).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/user/deactivate")
|
||||
if nil != err {
|
||||
util.LogErrorf("deactivate user failed: %s", err)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
if 0 != requestResult.Code {
|
||||
util.LogErrorf("deactivate user failed: %s", requestResult.Msg)
|
||||
return errors.New(requestResult.Msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SetCloudBlockReminder(id, data string, timed int64) (err error) {
|
||||
requestResult := gulu.Ret.NewResult()
|
||||
payload := map[string]interface{}{"dataId": id, "data": data, "timed": timed}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(requestResult).
|
||||
SetBody(payload).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/calendar/setBlockReminder")
|
||||
if nil != err {
|
||||
util.LogErrorf("set block reminder failed: %s", err)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
if 0 != requestResult.Code {
|
||||
util.LogErrorf("set block reminder failed: %s", requestResult.Msg)
|
||||
return errors.New(requestResult.Msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var uploadToken = ""
|
||||
var uploadTokenTime int64
|
||||
|
||||
func LoadUploadToken() (err error) {
|
||||
now := time.Now().Unix()
|
||||
if 3600 >= now-uploadTokenTime {
|
||||
return
|
||||
}
|
||||
|
||||
requestResult := gulu.Ret.NewResult()
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(requestResult).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/upload/token")
|
||||
if nil != err {
|
||||
util.LogErrorf("get upload token failed: %s", err)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
if 0 != requestResult.Code {
|
||||
util.LogErrorf("get upload token failed: %s", requestResult.Msg)
|
||||
return
|
||||
}
|
||||
|
||||
resultData := requestResult.Data.(map[string]interface{})
|
||||
uploadToken = resultData["uploadToken"].(string)
|
||||
uploadTokenTime = now
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
refreshUserTicker = time.NewTicker(30 * time.Minute)
|
||||
subscriptionExpirationReminded bool
|
||||
)
|
||||
|
||||
func AutoRefreshUser() {
|
||||
for {
|
||||
if !subscriptionExpirationReminded {
|
||||
subscriptionExpirationReminded = true
|
||||
go func() {
|
||||
if "ios" == util.Container {
|
||||
return
|
||||
}
|
||||
if IsSubscriber() && -1 != Conf.User.UserSiYuanProExpireTime {
|
||||
expired := int64(Conf.User.UserSiYuanProExpireTime)
|
||||
if time.Now().UnixMilli() >= expired { // 已经过期
|
||||
time.Sleep(time.Second * 30)
|
||||
util.PushErrMsg(Conf.Language(128), 0)
|
||||
return
|
||||
}
|
||||
remains := (expired - time.Now().Add(24*time.Hour*15).UnixMilli()) / 1000 / 60 / 60 / 24
|
||||
if 0 <= remains && 15 > remains { // 15 后过期
|
||||
time.Sleep(3 * time.Minute)
|
||||
util.PushErrMsg(fmt.Sprintf(Conf.Language(127), remains), 0)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if nil != Conf.User {
|
||||
time.Sleep(3 * time.Minute)
|
||||
RefreshUser(Conf.User.UserToken)
|
||||
subscriptionExpirationReminded = false
|
||||
}
|
||||
<-refreshUserTicker.C
|
||||
}
|
||||
}
|
||||
|
||||
func RefreshUser(token string) error {
|
||||
threeDaysAfter := util.CurrentTimeMillis() + 1000*60*60*24*3
|
||||
if "" == token {
|
||||
if "" != Conf.UserData {
|
||||
Conf.User = loadUserFromConf()
|
||||
}
|
||||
if nil == Conf.User {
|
||||
return errors.New(Conf.Language(19))
|
||||
}
|
||||
|
||||
var tokenExpireTime int64
|
||||
tokenExpireTime, err := strconv.ParseInt(Conf.User.UserTokenExpireTime+"000", 10, 64)
|
||||
if nil != err {
|
||||
util.LogErrorf("convert token expire time [%s] failed: %s", Conf.User.UserTokenExpireTime, err)
|
||||
return errors.New(Conf.Language(19))
|
||||
}
|
||||
|
||||
if threeDaysAfter > tokenExpireTime {
|
||||
token = Conf.User.UserToken
|
||||
goto Net
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Net:
|
||||
start := time.Now()
|
||||
user, err := getUser(token)
|
||||
if err != nil {
|
||||
if nil == Conf.User || errInvalidUser == err {
|
||||
return errors.New(Conf.Language(19))
|
||||
}
|
||||
|
||||
var tokenExpireTime int64
|
||||
tokenExpireTime, err = strconv.ParseInt(Conf.User.UserTokenExpireTime+"000", 10, 64)
|
||||
if nil != err {
|
||||
util.LogErrorf("convert token expire time [%s] failed: %s", Conf.User.UserTokenExpireTime, err)
|
||||
return errors.New(Conf.Language(19))
|
||||
}
|
||||
|
||||
if threeDaysAfter > tokenExpireTime {
|
||||
return errors.New(Conf.Language(19))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
Conf.User = user
|
||||
data, _ := gulu.JSON.MarshalJSON(user)
|
||||
Conf.UserData = util.AESEncrypt(string(data))
|
||||
Conf.Save()
|
||||
|
||||
if elapsed := time.Now().Sub(start).Milliseconds(); 3000 < elapsed {
|
||||
util.LogInfof("get cloud user elapsed [%dms]", elapsed)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func loadUserFromConf() *conf.User {
|
||||
if "" == Conf.UserData {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := util.AESDecrypt(Conf.UserData)
|
||||
data, _ = hex.DecodeString(string(data))
|
||||
user := &conf.User{}
|
||||
if err := gulu.JSON.UnmarshalJSON(data, &user); nil == err {
|
||||
return user
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func RemoveCloudShorthands(ids []string) (err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
body := map[string]interface{}{
|
||||
"ids": ids,
|
||||
}
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
SetBody(body).
|
||||
Post(util.AliyunServer + "/apis/siyuan/inbox/removeCloudShorthands")
|
||||
if nil != err {
|
||||
util.LogErrorf("remove cloud shorthands failed: %s", err)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
util.LogErrorf("remove cloud shorthands failed: %s", result["msg"])
|
||||
err = errors.New(result["msg"].(string))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func GetCloudShorthands(page int) (result map[string]interface{}, err error) {
|
||||
result = map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/inbox/getCloudShorthands?p=" + strconv.Itoa(page))
|
||||
if nil != err {
|
||||
util.LogErrorf("get cloud shorthands failed: %s", err)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
util.LogErrorf("get cloud shorthands failed: %s", result["msg"])
|
||||
err = errors.New(result["msg"].(string))
|
||||
return
|
||||
}
|
||||
shorthands := result["data"].(map[string]interface{})["shorthands"].([]interface{})
|
||||
for _, item := range shorthands {
|
||||
shorthand := item.(map[string]interface{})
|
||||
id := shorthand["oId"].(string)
|
||||
t, _ := strconv.ParseInt(id, 10, 64)
|
||||
hCreated := util.Millisecond2Time(t)
|
||||
shorthand["hCreated"] = hCreated.Format("2006-01-02 15:04")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var errInvalidUser = errors.New("invalid user")
|
||||
|
||||
func getUser(token string) (*conf.User, error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
_, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"token": token}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/user")
|
||||
if nil != err {
|
||||
util.LogErrorf("get community user failed: %s", err)
|
||||
return nil, errors.New(Conf.Language(18))
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
if 255 == code {
|
||||
return nil, errInvalidUser
|
||||
}
|
||||
util.LogErrorf("get community user failed: %s", result["msg"])
|
||||
return nil, errors.New(Conf.Language(18))
|
||||
}
|
||||
|
||||
dataStr := result["data"].(string)
|
||||
data := util.AESDecrypt(dataStr)
|
||||
user := &conf.User{}
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &user); nil != err {
|
||||
util.LogErrorf("get community user failed: %s", err)
|
||||
return nil, errors.New(Conf.Language(18))
|
||||
}
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func UseActivationcode(code string) (err error) {
|
||||
requestResult := gulu.Ret.NewResult()
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
_, err = request.
|
||||
SetResult(requestResult).
|
||||
SetBody(map[string]string{"data": code}).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/useActivationcode")
|
||||
if nil != err {
|
||||
util.LogErrorf("check activation code failed: %s", err)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
if 0 != requestResult.Code {
|
||||
return errors.New(requestResult.Msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CheckActivationcode(code string) (retCode int, msg string) {
|
||||
retCode = 1
|
||||
requestResult := gulu.Ret.NewResult()
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
_, err := request.
|
||||
SetResult(requestResult).
|
||||
SetBody(map[string]string{"data": code}).
|
||||
SetCookies(&http.Cookie{Name: "symphony", Value: Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/checkActivationcode")
|
||||
if nil != err {
|
||||
util.LogErrorf("check activation code failed: %s", err)
|
||||
msg = ErrFailedToConnectCloudServer.Error()
|
||||
return
|
||||
}
|
||||
if 0 == requestResult.Code {
|
||||
retCode = 0
|
||||
}
|
||||
msg = requestResult.Msg
|
||||
return
|
||||
}
|
||||
|
||||
func Login(userName, password, captcha string) (ret *gulu.Result, err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
_, err = request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"userName": userName, "userPassword": password, "captcha": captcha}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/login")
|
||||
if nil != err {
|
||||
util.LogErrorf("login failed: %s", err)
|
||||
return nil, errors.New(Conf.Language(18))
|
||||
}
|
||||
ret = &gulu.Result{
|
||||
Code: int(result["code"].(float64)),
|
||||
Msg: result["msg"].(string),
|
||||
Data: map[string]interface{}{
|
||||
"userName": result["userName"],
|
||||
"token": result["token"],
|
||||
"needCaptcha": result["needCaptcha"],
|
||||
},
|
||||
}
|
||||
if -1 == ret.Code {
|
||||
ret.Code = 1
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func Login2fa(token, code string) (map[string]interface{}, error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
_, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"twofactorAuthCode": code}).
|
||||
SetHeader("token", token).
|
||||
Post(util.AliyunServer + "/apis/siyuan/login/2fa")
|
||||
if nil != err {
|
||||
util.LogErrorf("login 2fa failed: %s", err)
|
||||
return nil, errors.New(Conf.Language(18))
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func LogoutUser() {
|
||||
Conf.UserData = ""
|
||||
Conf.User = nil
|
||||
Conf.Save()
|
||||
}
|
||||
112
kernel/model/listitem.go
Normal file
112
kernel/model/listitem.go
Normal file
|
|
@ -0,0 +1,112 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/88250/protyle"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func ListItem2Doc(srcListItemID, targetBoxID, targetPath string) (srcRootBlockID, newTargetPath string, err error) {
|
||||
WaitForWritingFiles()
|
||||
|
||||
srcTree, _ := loadTreeByBlockID(srcListItemID)
|
||||
if nil == srcTree {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
srcRootBlockID = srcTree.Root.ID
|
||||
|
||||
listItemNode := treenode.GetNodeInTree(srcTree, srcListItemID)
|
||||
if nil == listItemNode {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
|
||||
box := Conf.Box(targetBoxID)
|
||||
listItemText := sql.GetContainerText(listItemNode)
|
||||
listItemText = util.FilterFileName(listItemText)
|
||||
|
||||
moveToRoot := "/" == targetPath
|
||||
toHP := path.Join("/", listItemText)
|
||||
toFolder := "/"
|
||||
|
||||
if !moveToRoot {
|
||||
toBlock := treenode.GetBlockTreeRootByPath(targetBoxID, targetPath)
|
||||
if nil == toBlock {
|
||||
err = ErrBlockNotFound
|
||||
return
|
||||
}
|
||||
toHP = path.Join(toBlock.HPath, listItemText)
|
||||
toFolder = path.Join(path.Dir(targetPath), toBlock.ID)
|
||||
}
|
||||
|
||||
newTargetPath = path.Join(toFolder, srcListItemID+".sy")
|
||||
if !box.Exist(toFolder) {
|
||||
if err = box.MkdirAll(toFolder); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var children []*ast.Node
|
||||
for c := listItemNode.FirstChild.Next; nil != c; c = c.Next {
|
||||
children = append(children, c)
|
||||
}
|
||||
if 1 > len(children) {
|
||||
newNode := protyle.NewParagraph()
|
||||
children = append(children, newNode)
|
||||
}
|
||||
|
||||
luteEngine := NewLute()
|
||||
newTree := &parse.Tree{Root: &ast.Node{Type: ast.NodeDocument, ID: srcListItemID}, Context: &parse.Context{ParseOption: luteEngine.ParseOptions}}
|
||||
for _, c := range children {
|
||||
newTree.Root.AppendChild(c)
|
||||
}
|
||||
newTree.ID = srcListItemID
|
||||
newTree.Path = newTargetPath
|
||||
newTree.HPath = toHP
|
||||
listItemNode.SetIALAttr("type", "doc")
|
||||
listItemNode.SetIALAttr("id", srcListItemID)
|
||||
listItemNode.SetIALAttr("title", listItemText)
|
||||
newTree.Root.KramdownIAL = listItemNode.KramdownIAL
|
||||
srcLiParent := listItemNode.Parent
|
||||
listItemNode.Unlink()
|
||||
if nil != srcLiParent && nil == srcLiParent.FirstChild {
|
||||
srcLiParent.Unlink()
|
||||
}
|
||||
srcTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
|
||||
if err = indexWriteJSONQueue(srcTree); nil != err {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
newTree.Box, newTree.Path = targetBoxID, newTargetPath
|
||||
newTree.Root.SetIALAttr("updated", util.CurrentTimeSecondsStr())
|
||||
if err = indexWriteJSONQueue(newTree); nil != err {
|
||||
return "", "", err
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
RefreshBacklink(srcTree.ID)
|
||||
RefreshBacklink(newTree.ID)
|
||||
return
|
||||
}
|
||||
214
kernel/model/mount.go
Normal file
214
kernel/model/mount.go
Normal file
|
|
@ -0,0 +1,214 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func CreateBox(name string) (id string, err error) {
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
id = ast.NewNodeID()
|
||||
boxLocalPath := filepath.Join(util.DataDir, id)
|
||||
err = os.MkdirAll(boxLocalPath, 0755)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
box := &Box{ID: id, Name: name}
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Name = name
|
||||
box.SaveConf(boxConf)
|
||||
IncWorkspaceDataVer()
|
||||
return
|
||||
}
|
||||
|
||||
func RenameBox(boxID, name string) (err error) {
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
box := Conf.Box(boxID)
|
||||
if nil == box {
|
||||
return errors.New(Conf.Language(0))
|
||||
}
|
||||
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Name = name
|
||||
box.Name = name
|
||||
box.SaveConf(boxConf)
|
||||
IncWorkspaceDataVer()
|
||||
return
|
||||
}
|
||||
|
||||
func RemoveBox(boxID string) (err error) {
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
if util.IsReservedFilename(boxID) {
|
||||
return errors.New(fmt.Sprintf("can not remove [%s] caused by it is a reserved file", boxID))
|
||||
}
|
||||
|
||||
localPath := filepath.Join(util.DataDir, boxID)
|
||||
if !gulu.File.IsExist(localPath) {
|
||||
return
|
||||
}
|
||||
if !gulu.File.IsDir(localPath) {
|
||||
return errors.New(fmt.Sprintf("can not remove [%s] caused by it is not a dir", boxID))
|
||||
}
|
||||
|
||||
filesys.ReleaseFileLocks(localPath)
|
||||
if !isUserGuide(boxID) {
|
||||
var historyDir string
|
||||
historyDir, err = util.GetHistoryDir("delete")
|
||||
if nil != err {
|
||||
util.LogErrorf("get history dir failed: %s", err)
|
||||
return
|
||||
}
|
||||
p := strings.TrimPrefix(localPath, util.DataDir)
|
||||
historyPath := filepath.Join(historyDir, p)
|
||||
if err = gulu.File.Copy(localPath, historyPath); nil != err {
|
||||
util.LogErrorf("gen sync history failed: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
copyBoxAssetsToDataAssets(boxID)
|
||||
}
|
||||
|
||||
unmount0(boxID)
|
||||
if err = os.RemoveAll(localPath); nil != err {
|
||||
return
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return
|
||||
}
|
||||
|
||||
func Unmount(boxID string) {
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
unmount0(boxID)
|
||||
evt := util.NewCmdResult("unmount", 0, util.PushModeBroadcast, 0)
|
||||
evt.Data = map[string]interface{}{
|
||||
"box": boxID,
|
||||
}
|
||||
util.PushEvent(evt)
|
||||
}
|
||||
|
||||
func unmount0(boxID string) {
|
||||
for _, box := range Conf.GetOpenedBoxes() {
|
||||
if box.ID == boxID {
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Closed = true
|
||||
box.SaveConf(boxConf)
|
||||
box.Unindex()
|
||||
debug.FreeOSMemory()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Mount(boxID string) (alreadyMount bool, err error) {
|
||||
WaitForWritingFiles()
|
||||
syncLock.Lock()
|
||||
defer syncLock.Unlock()
|
||||
|
||||
localPath := filepath.Join(util.DataDir, boxID)
|
||||
|
||||
var reMountGuide bool
|
||||
if isUserGuide(boxID) {
|
||||
// 重新挂载帮助文档
|
||||
|
||||
guideBox := Conf.Box(boxID)
|
||||
if nil != guideBox {
|
||||
unmount0(guideBox.ID)
|
||||
reMountGuide = true
|
||||
}
|
||||
|
||||
if err = os.RemoveAll(localPath); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
p := filepath.Join(util.WorkingDir, "guide", boxID)
|
||||
if err = gulu.File.Copy(p, localPath); nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
if box := Conf.Box(boxID); nil != box {
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Closed = true
|
||||
box.SaveConf(boxConf)
|
||||
}
|
||||
|
||||
if Conf.Newbie {
|
||||
Conf.Newbie = false
|
||||
Conf.Save()
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(time.Second * 5)
|
||||
util.PushErrMsg(Conf.Language(52), 9000)
|
||||
}()
|
||||
}
|
||||
|
||||
if !gulu.File.IsDir(localPath) {
|
||||
return false, errors.New("can not open file, just support open folder only")
|
||||
}
|
||||
|
||||
for _, box := range Conf.GetOpenedBoxes() {
|
||||
if box.ID == boxID {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
box := &Box{ID: boxID}
|
||||
boxConf := box.GetConf()
|
||||
boxConf.Closed = false
|
||||
box.SaveConf(boxConf)
|
||||
|
||||
box.Index(false)
|
||||
IndexRefs()
|
||||
// 缓存根一级的文档树展开
|
||||
ListDocTree(box.ID, "/", Conf.FileTree.Sort)
|
||||
treenode.SaveBlockTree()
|
||||
util.ClearPushProgress(100)
|
||||
if reMountGuide {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func isUserGuide(boxID string) bool {
|
||||
return "20210808180117-czj9bvb" == boxID || "20210808180117-6v0mkxr" == boxID || "20211226090932-5lcq56f" == boxID
|
||||
}
|
||||
821
kernel/model/osssync.go
Normal file
821
kernel/model/osssync.go
Normal file
|
|
@ -0,0 +1,821 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/sha1"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/panjf2000/ants/v2"
|
||||
"github.com/qiniu/go-sdk/v7/storage"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func getCloudSpaceOSS() (sync, backup map[string]interface{}, assetSize int64, err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"token": Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanWorkspace")
|
||||
if nil != err {
|
||||
util.LogErrorf("get cloud space failed: %s", err)
|
||||
return nil, nil, 0, ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
util.LogErrorf("get cloud space failed: %s", result["msg"])
|
||||
return nil, nil, 0, errors.New(result["msg"].(string))
|
||||
}
|
||||
|
||||
data := result["data"].(map[string]interface{})
|
||||
sync = data["sync"].(map[string]interface{})
|
||||
backup = data["backup"].(map[string]interface{})
|
||||
assetSize = int64(data["assetSize"].(float64))
|
||||
return
|
||||
}
|
||||
|
||||
func removeCloudDirPath(dirPath string) (err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"dirPath": dirPath, "token": Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/removeSiYuanDirPath")
|
||||
if nil != err {
|
||||
util.LogErrorf("create cloud sync dir failed: %s", err)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("remove cloud dir failed: %d", resp.StatusCode)
|
||||
util.LogErrorf(msg)
|
||||
err = errors.New(msg)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createCloudSyncDirOSS(name string) (err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"name": name, "token": Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/createSiYuanSyncDir")
|
||||
if nil != err {
|
||||
util.LogErrorf("create cloud sync dir failed: %s", err)
|
||||
return ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("create cloud sync dir failed: %d", resp.StatusCode)
|
||||
util.LogErrorf(msg)
|
||||
err = errors.New(msg)
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
util.LogErrorf("create cloud sync dir failed: %s", result["msg"])
|
||||
return errors.New(result["msg"].(string))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func listCloudSyncDirOSS() (dirs []map[string]interface{}, size int64, err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetBody(map[string]interface{}{"token": Conf.User.UserToken}).
|
||||
SetResult(&result).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanSyncDirList?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("get cloud sync dirs failed: %s", err)
|
||||
return nil, 0, ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
msg := fmt.Sprintf("get cloud sync dirs failed: %d", resp.StatusCode)
|
||||
util.LogErrorf(msg)
|
||||
err = errors.New(msg)
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
util.LogErrorf("get cloud sync dirs failed: %s", result["msg"])
|
||||
return nil, 0, ErrFailedToConnectCloudServer
|
||||
}
|
||||
|
||||
data := result["data"].(map[string]interface{})
|
||||
dataDirs := data["dirs"].([]interface{})
|
||||
for _, d := range dataDirs {
|
||||
dirs = append(dirs, d.(map[string]interface{}))
|
||||
}
|
||||
sort.Slice(dirs, func(i, j int) bool { return dirs[i]["name"].(string) < dirs[j]["name"].(string) })
|
||||
size = int64(data["size"].(float64))
|
||||
return
|
||||
}
|
||||
|
||||
func ossDownload(localDirPath, cloudDirPath string, bootOrExit bool) (fetchedFiles int, transferSize uint64, err error) {
|
||||
if !gulu.File.IsExist(localDirPath) {
|
||||
return
|
||||
}
|
||||
|
||||
cloudFileList, err := getCloudFileListOSS(cloudDirPath)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
localRemoves, cloudFetches, err := localUpsertRemoveListOSS(localDirPath, cloudFileList)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
for _, localRemove := range localRemoves {
|
||||
if err = os.RemoveAll(localRemove); nil != err {
|
||||
util.LogErrorf("local remove [%s] failed: %s", localRemove, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
needPushProgress := 32 < len(cloudFetches)
|
||||
waitGroup := &sync.WaitGroup{}
|
||||
var downloadErr error
|
||||
poolSize := 4
|
||||
if poolSize > len(cloudFetches) {
|
||||
poolSize = len(cloudFetches)
|
||||
}
|
||||
p, _ := ants.NewPoolWithFunc(poolSize, func(arg interface{}) {
|
||||
defer waitGroup.Done()
|
||||
if nil != downloadErr {
|
||||
return // 快速失败
|
||||
}
|
||||
fetch := arg.(string)
|
||||
err = ossDownload0(localDirPath, cloudDirPath, fetch, &fetchedFiles, &transferSize, bootOrExit)
|
||||
if nil != err {
|
||||
downloadErr = err
|
||||
return
|
||||
}
|
||||
if needPushProgress {
|
||||
msg := fmt.Sprintf(Conf.Language(103), fetchedFiles, len(cloudFetches)-fetchedFiles)
|
||||
util.PushProgress(util.PushProgressCodeProgressed, fetchedFiles, len(cloudFetches), msg)
|
||||
}
|
||||
if bootOrExit {
|
||||
msg := fmt.Sprintf("Downloading data from the cloud %d/%d", fetchedFiles, len(cloudFetches))
|
||||
util.IncBootProgress(0, msg)
|
||||
}
|
||||
})
|
||||
for _, fetch := range cloudFetches {
|
||||
waitGroup.Add(1)
|
||||
p.Invoke(fetch)
|
||||
}
|
||||
waitGroup.Wait()
|
||||
p.Release()
|
||||
if nil != downloadErr {
|
||||
err = downloadErr
|
||||
return
|
||||
}
|
||||
if needPushProgress {
|
||||
util.ClearPushProgress(len(cloudFetches))
|
||||
util.PushMsg(Conf.Language(106), 1000*60*10)
|
||||
}
|
||||
if bootOrExit {
|
||||
util.IncBootProgress(0, "Decrypting from sync to data...")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ossDownload0(localDirPath, cloudDirPath, fetch string, fetchedFiles *int, transferSize *uint64, bootORExit bool) (err error) {
|
||||
localFilePath := filepath.Join(localDirPath, fetch)
|
||||
remoteFileURL := path.Join(cloudDirPath, fetch)
|
||||
var result map[string]interface{}
|
||||
resp, err := util.NewCloudRequest(Conf.System.NetworkProxy.String()).
|
||||
SetResult(&result).
|
||||
SetBody(map[string]interface{}{"token": Conf.User.UserToken, "path": remoteFileURL}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanFile?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("download request [%s] failed: %s", remoteFileURL, err)
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New("account authentication failed, please login again")
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
util.LogErrorf("download request status code [%d]", resp.StatusCode)
|
||||
err = errors.New("download file URL failed")
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
msg := result["msg"].(string)
|
||||
util.LogErrorf("download cloud file failed: %s", msg)
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), msg))
|
||||
}
|
||||
|
||||
resultData := result["data"].(map[string]interface{})
|
||||
downloadURL := resultData["url"].(string)
|
||||
|
||||
if err = os.MkdirAll(filepath.Dir(localFilePath), 0755); nil != err {
|
||||
return
|
||||
}
|
||||
os.Remove(localFilePath)
|
||||
|
||||
if bootORExit {
|
||||
resp, err = util.NewCloudFileRequest15s(Conf.System.NetworkProxy.String()).Get(downloadURL)
|
||||
} else {
|
||||
resp, err = util.NewCloudFileRequest2m(Conf.System.NetworkProxy.String()).Get(downloadURL)
|
||||
}
|
||||
if nil != err {
|
||||
util.LogErrorf("download request [%s] failed: %s", downloadURL, err)
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
if 200 != resp.StatusCode {
|
||||
util.LogErrorf("download request [%s] status code [%d]", downloadURL, resp.StatusCode)
|
||||
err = errors.New(fmt.Sprintf("download file failed [%d]", resp.StatusCode))
|
||||
if 404 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(135))
|
||||
}
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
|
||||
data, err := resp.ToBytes()
|
||||
if nil != err {
|
||||
util.LogErrorf("download read response body data failed: %s, %s", err, string(data))
|
||||
err = errors.New("download read data failed")
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
size := int64(len(data))
|
||||
|
||||
if err = gulu.File.WriteFileSafer(localFilePath, data, 0644); nil != err {
|
||||
util.LogErrorf("write file [%s] failed: %s", localFilePath, err)
|
||||
return errors.New(fmt.Sprintf(Conf.Language(93), err))
|
||||
}
|
||||
|
||||
*fetchedFiles++
|
||||
*transferSize += uint64(size)
|
||||
return
|
||||
}
|
||||
|
||||
func ossUpload(localDirPath, cloudDirPath, cloudDevice string, boot bool) (wroteFiles int, transferSize uint64, err error) {
|
||||
if !gulu.File.IsExist(localDirPath) {
|
||||
return
|
||||
}
|
||||
|
||||
var cloudFileList map[string]*CloudIndex
|
||||
localDevice := Conf.System.ID
|
||||
if "" != localDevice && localDevice == cloudDevice {
|
||||
//util.LogInfof("cloud device is the same as local device, get index from local")
|
||||
cloudFileList, err = getLocalFileListOSS(cloudDirPath)
|
||||
if nil != err {
|
||||
util.LogInfof("get local index failed [%s], get index from cloud", err)
|
||||
cloudFileList, err = getCloudFileListOSS(cloudDirPath)
|
||||
}
|
||||
} else {
|
||||
cloudFileList, err = getCloudFileListOSS(cloudDirPath)
|
||||
}
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
localUpserts, cloudRemoves, err := cloudUpsertRemoveListOSS(localDirPath, cloudFileList)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
needPushProgress := 32 < len(localUpserts)
|
||||
waitGroup := &sync.WaitGroup{}
|
||||
var uploadErr error
|
||||
|
||||
poolSize := 4
|
||||
if poolSize > len(localUpserts) {
|
||||
poolSize = len(localUpserts)
|
||||
}
|
||||
p, _ := ants.NewPoolWithFunc(poolSize, func(arg interface{}) {
|
||||
defer waitGroup.Done()
|
||||
if nil != uploadErr {
|
||||
return // 快速失败
|
||||
}
|
||||
localUpsert := arg.(string)
|
||||
err = ossUpload0(localDirPath, cloudDirPath, localUpsert, &wroteFiles, &transferSize)
|
||||
if nil != err {
|
||||
uploadErr = err
|
||||
return
|
||||
}
|
||||
if needPushProgress {
|
||||
util.PushMsg(fmt.Sprintf(Conf.Language(104), wroteFiles, len(localUpserts)-wroteFiles), 1000*60*10)
|
||||
}
|
||||
if boot {
|
||||
msg := fmt.Sprintf("Uploading data to the cloud %d/%d", wroteFiles, len(localUpserts))
|
||||
util.IncBootProgress(0, msg)
|
||||
}
|
||||
})
|
||||
var index string
|
||||
localIndex := filepath.Join(localDirPath, "index.json")
|
||||
for _, localUpsert := range localUpserts {
|
||||
if localIndex == localUpsert {
|
||||
// 同步过程中断导致的一致性问题 https://github.com/siyuan-note/siyuan/issues/4912
|
||||
// index 最后单独上传
|
||||
index = localUpsert
|
||||
continue
|
||||
}
|
||||
|
||||
waitGroup.Add(1)
|
||||
p.Invoke(localUpsert)
|
||||
}
|
||||
waitGroup.Wait()
|
||||
p.Release()
|
||||
if nil != uploadErr {
|
||||
err = uploadErr
|
||||
return
|
||||
}
|
||||
|
||||
// 单独上传 index
|
||||
if uploadErr = ossUpload0(localDirPath, cloudDirPath, index, &wroteFiles, &transferSize); nil != uploadErr {
|
||||
err = uploadErr
|
||||
return
|
||||
}
|
||||
|
||||
if needPushProgress {
|
||||
util.PushMsg(Conf.Language(105), 3000)
|
||||
}
|
||||
|
||||
err = ossRemove0(cloudDirPath, cloudRemoves)
|
||||
return
|
||||
}
|
||||
|
||||
func ossRemove0(cloudDirPath string, removes []string) (err error) {
|
||||
if 1 > len(removes) {
|
||||
return
|
||||
}
|
||||
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetBody(map[string]interface{}{"token": Conf.User.UserToken, "dirPath": cloudDirPath, "paths": removes}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/removeSiYuanFile?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("remove cloud file failed: %s", err)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
if 200 != resp.StatusCode {
|
||||
msg := fmt.Sprintf("remove cloud file failed [sc=%d]", resp.StatusCode)
|
||||
util.LogErrorf(msg)
|
||||
err = errors.New(msg)
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func ossUpload0(localDirPath, cloudDirPath, localUpsert string, wroteFiles *int, transferSize *uint64) (err error) {
|
||||
info, statErr := os.Stat(localUpsert)
|
||||
if nil != statErr {
|
||||
err = statErr
|
||||
return
|
||||
}
|
||||
|
||||
filename := filepath.ToSlash(strings.TrimPrefix(localUpsert, localDirPath))
|
||||
upToken, err := getOssUploadToken(filename, cloudDirPath, info.Size())
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
key := path.Join("siyuan", Conf.User.UserId, cloudDirPath, filename)
|
||||
if err = putFileToCloud(localUpsert, key, upToken); nil != err {
|
||||
util.LogErrorf("put file [%s] to cloud failed: %s", localUpsert, err)
|
||||
return errors.New(fmt.Sprintf(Conf.Language(94), err))
|
||||
}
|
||||
|
||||
//util.LogInfof("cloud wrote [%s], size [%d]", filename, info.Size())
|
||||
*wroteFiles++
|
||||
*transferSize += uint64(info.Size())
|
||||
return
|
||||
}
|
||||
|
||||
func getOssUploadToken(filename, cloudDirPath string, length int64) (ret string, err error) {
|
||||
// 因为需要指定 key,所以每次上传文件都必须在云端生成 Token,否则有安全隐患
|
||||
|
||||
var result map[string]interface{}
|
||||
req := util.NewCloudRequest(Conf.System.NetworkProxy.String()).
|
||||
SetResult(&result)
|
||||
req.SetBody(map[string]interface{}{
|
||||
"token": Conf.User.UserToken,
|
||||
"dirPath": cloudDirPath,
|
||||
"name": filename,
|
||||
"length": length})
|
||||
resp, err := req.Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanFileUploadToken?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("get file [%s] upload token failed: %+v", filename, err)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(94), err))
|
||||
return
|
||||
}
|
||||
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(94), Conf.Language(31)))
|
||||
return
|
||||
}
|
||||
util.LogErrorf("get file [%s] upload token failed [sc=%d]", filename, resp.StatusCode)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(94), strconv.Itoa(resp.StatusCode)))
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
msg := result["msg"].(string)
|
||||
util.LogErrorf("download cloud file failed: %s", msg)
|
||||
err = errors.New(fmt.Sprintf(Conf.Language(93), msg))
|
||||
return
|
||||
}
|
||||
|
||||
resultData := result["data"].(map[string]interface{})
|
||||
ret = resultData["token"].(string)
|
||||
return
|
||||
}
|
||||
|
||||
func getCloudSyncVer(cloudDir string) (cloudSyncVer int64, err error) {
|
||||
start := time.Now()
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"syncDir": cloudDir, "token": Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanWorkspaceSyncVer?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("get cloud sync ver failed: %s", err)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
util.LogErrorf("get cloud sync ver failed: %d", resp.StatusCode)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
msg := result["msg"].(string)
|
||||
util.LogErrorf("get cloud sync ver failed: %s", msg)
|
||||
err = errors.New(msg)
|
||||
return
|
||||
}
|
||||
|
||||
data := result["data"].(map[string]interface{})
|
||||
cloudSyncVer = int64(data["v"].(float64))
|
||||
|
||||
if elapsed := time.Now().Sub(start).Milliseconds(); 2000 < elapsed {
|
||||
util.LogInfof("get cloud sync ver elapsed [%dms]", elapsed)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCloudSync(cloudDir string) (assetSize, backupSize int64, device string, err error) {
|
||||
start := time.Now()
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"syncDir": cloudDir, "token": Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanWorkspaceSync?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("get cloud sync info failed: %s", err)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
if 200 != resp.StatusCode {
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
util.LogErrorf("get cloud sync info failed: %d", resp.StatusCode)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
msg := result["msg"].(string)
|
||||
util.LogErrorf("get cloud sync info failed: %s", msg)
|
||||
err = errors.New(msg)
|
||||
return
|
||||
}
|
||||
|
||||
data := result["data"].(map[string]interface{})
|
||||
assetSize = int64(data["assetSize"].(float64))
|
||||
backupSize = int64(data["backupSize"].(float64))
|
||||
if nil != data["d"] {
|
||||
device = data["d"].(string)
|
||||
}
|
||||
|
||||
if elapsed := time.Now().Sub(start).Milliseconds(); 5000 < elapsed {
|
||||
util.LogInfof("get cloud sync [%s] elapsed [%dms]", elapsed)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getLocalFileListOSS(dirPath string) (ret map[string]*CloudIndex, err error) {
|
||||
dir := "sync"
|
||||
if !strings.HasPrefix(dirPath, "sync") {
|
||||
dir = "backup"
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filepath.Join(util.WorkspaceDir, dir, "index.json"))
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getCloudFileListOSS(cloudDirPath string) (ret map[string]*CloudIndex, err error) {
|
||||
result := map[string]interface{}{}
|
||||
request := util.NewCloudRequest(Conf.System.NetworkProxy.String())
|
||||
resp, err := request.
|
||||
SetResult(&result).
|
||||
SetBody(map[string]string{"dirPath": cloudDirPath, "token": Conf.User.UserToken}).
|
||||
Post(util.AliyunServer + "/apis/siyuan/data/getSiYuanFileListURL?uid=" + Conf.User.UserId)
|
||||
if nil != err {
|
||||
util.LogErrorf("get cloud file list failed: %s", err)
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
if 401 == resp.StatusCode {
|
||||
err = errors.New(Conf.Language(31))
|
||||
return
|
||||
}
|
||||
|
||||
code := result["code"].(float64)
|
||||
if 0 != code {
|
||||
util.LogErrorf("get cloud file list failed: %s", result["msg"])
|
||||
err = ErrFailedToConnectCloudServer
|
||||
return
|
||||
}
|
||||
|
||||
retData := result["data"].(map[string]interface{})
|
||||
downloadURL := retData["url"].(string)
|
||||
resp, err = util.NewCloudFileRequest15s(Conf.System.NetworkProxy.String()).Get(downloadURL)
|
||||
if nil != err {
|
||||
util.LogErrorf("download request [%s] failed: %s", downloadURL, err)
|
||||
return
|
||||
}
|
||||
if 200 != resp.StatusCode {
|
||||
util.LogErrorf("download request [%s] status code [%d]", downloadURL, resp.StatusCode)
|
||||
err = errors.New(fmt.Sprintf("download file list failed [%d]", resp.StatusCode))
|
||||
return
|
||||
}
|
||||
|
||||
data, err := resp.ToBytes()
|
||||
if err = gulu.JSON.UnmarshalJSON(data, &ret); nil != err {
|
||||
util.LogErrorf("unmarshal index failed: %s", err)
|
||||
err = errors.New(fmt.Sprintf("unmarshal index failed"))
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func localUpsertRemoveListOSS(localDirPath string, cloudFileList map[string]*CloudIndex) (localRemoves, cloudFetches []string, err error) {
|
||||
unchanged := map[string]bool{}
|
||||
|
||||
filepath.Walk(localDirPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if localDirPath == path {
|
||||
return nil
|
||||
}
|
||||
|
||||
if info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
relPath := filepath.ToSlash(strings.TrimPrefix(path, localDirPath))
|
||||
cloudIdx, ok := cloudFileList[relPath]
|
||||
if !ok {
|
||||
if util.CloudSingleFileMaxSizeLimit < info.Size() {
|
||||
util.LogWarnf("file [%s] larger than 100MB, ignore removing it", path)
|
||||
return nil
|
||||
}
|
||||
|
||||
localRemoves = append(localRemoves, path)
|
||||
return nil
|
||||
}
|
||||
|
||||
localHash, hashErr := GetEtag(path)
|
||||
if nil != hashErr {
|
||||
util.LogErrorf("get local file [%s] etag failed: %s", path, hashErr)
|
||||
return nil
|
||||
}
|
||||
if cloudIdx.Hash == localHash {
|
||||
unchanged[relPath] = true
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
for cloudPath, cloudIndex := range cloudFileList {
|
||||
if _, ok := unchanged[cloudPath]; ok {
|
||||
continue
|
||||
}
|
||||
if util.CloudSingleFileMaxSizeLimit < cloudIndex.Size {
|
||||
util.LogWarnf("cloud file [%s] larger than 100MB, ignore fetching it", cloudPath)
|
||||
continue
|
||||
}
|
||||
cloudFetches = append(cloudFetches, cloudPath)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func cloudUpsertRemoveListOSS(localDirPath string, cloudFileList map[string]*CloudIndex) (localUpserts, cloudRemoves []string, err error) {
|
||||
localUpserts, cloudRemoves = []string{}, []string{}
|
||||
unchanged := map[string]bool{}
|
||||
for cloudFile, cloudIdx := range cloudFileList {
|
||||
localCheckPath := filepath.Join(localDirPath, cloudFile)
|
||||
if !gulu.File.IsExist(localCheckPath) {
|
||||
cloudRemoves = append(cloudRemoves, cloudFile)
|
||||
continue
|
||||
}
|
||||
|
||||
localHash, hashErr := GetEtag(localCheckPath)
|
||||
if nil != hashErr {
|
||||
util.LogErrorf("get local file [%s] hash failed: %s", localCheckPath, hashErr)
|
||||
err = hashErr
|
||||
return
|
||||
}
|
||||
|
||||
if localHash == cloudIdx.Hash {
|
||||
unchanged[localCheckPath] = true
|
||||
}
|
||||
}
|
||||
|
||||
syncIgnoreList := getSyncIgnoreList()
|
||||
excludes := map[string]bool{}
|
||||
ignores := syncIgnoreList.Values()
|
||||
for _, p := range ignores {
|
||||
relPath := p.(string)
|
||||
relPath = pathSha246(relPath, "/")
|
||||
relPath = filepath.Join(localDirPath, relPath)
|
||||
excludes[relPath] = true
|
||||
}
|
||||
|
||||
delete(unchanged, filepath.Join(localDirPath, "index.json")) // 同步偶尔报错 `The system cannot find the path specified.` https://github.com/siyuan-note/siyuan/issues/4942
|
||||
err = genCloudIndex(localDirPath, excludes)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
filepath.Walk(localDirPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if localDirPath == path || info.IsDir() {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !unchanged[path] {
|
||||
if excludes[path] {
|
||||
return nil
|
||||
}
|
||||
if util.CloudSingleFileMaxSizeLimit < info.Size() {
|
||||
util.LogWarnf("file [%s] larger than 100MB, ignore uploading it", path)
|
||||
return nil
|
||||
}
|
||||
localUpserts = append(localUpserts, path)
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func putFileToCloud(filePath, key, upToken string) (err error) {
|
||||
formUploader := storage.NewFormUploader(&storage.Config{UseHTTPS: true})
|
||||
ret := storage.PutRet{}
|
||||
err = formUploader.PutFile(context.Background(), &ret, upToken, key, filePath, nil)
|
||||
if nil != err {
|
||||
util.LogWarnf("put file [%s] to cloud failed [%s], retry it after 3s", filePath, err)
|
||||
time.Sleep(3 * time.Second)
|
||||
err = formUploader.PutFile(context.Background(), &ret, upToken, key, filePath, nil)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
util.LogInfof("put file [%s] to cloud retry success", filePath)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// 以下是七牛云 Hash 算法实现 https://github.com/qiniu/qetag/blob/master/qetag.go
|
||||
|
||||
func GetEtag(filename string) (etag string, err error) {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
fi, err := f.Stat()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
fsize := fi.Size()
|
||||
blockCnt := BlockCount(fsize)
|
||||
sha1Buf := make([]byte, 0, 21)
|
||||
|
||||
if blockCnt <= 1 { // file size <= 4M
|
||||
sha1Buf = append(sha1Buf, 0x16)
|
||||
sha1Buf, err = CalSha1(sha1Buf, f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
} else { // file size > 4M
|
||||
sha1Buf = append(sha1Buf, 0x96)
|
||||
sha1BlockBuf := make([]byte, 0, blockCnt*20)
|
||||
for i := 0; i < blockCnt; i++ {
|
||||
body := io.LimitReader(f, BLOCK_SIZE)
|
||||
sha1BlockBuf, err = CalSha1(sha1BlockBuf, body)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
sha1Buf, _ = CalSha1(sha1Buf, bytes.NewReader(sha1BlockBuf))
|
||||
}
|
||||
etag = base64.URLEncoding.EncodeToString(sha1Buf)
|
||||
return
|
||||
}
|
||||
|
||||
const (
|
||||
BLOCK_BITS = 22 // Indicate that the blocksize is 4M
|
||||
BLOCK_SIZE = 1 << BLOCK_BITS
|
||||
)
|
||||
|
||||
func BlockCount(fsize int64) int {
|
||||
return int((fsize + (BLOCK_SIZE - 1)) >> BLOCK_BITS)
|
||||
}
|
||||
|
||||
func CalSha1(b []byte, r io.Reader) ([]byte, error) {
|
||||
|
||||
h := sha1.New()
|
||||
_, err := io.Copy(h, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return h.Sum(b), nil
|
||||
}
|
||||
111
kernel/model/outline.go
Normal file
111
kernel/model/outline.go
Normal file
|
|
@ -0,0 +1,111 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/emirpasic/gods/stacks/linkedliststack"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
)
|
||||
|
||||
func Outline(rootID string) (ret []*Path, err error) {
|
||||
time.Sleep(512 * time.Millisecond /* 前端队列轮询间隔 */)
|
||||
WaitForWritingFiles()
|
||||
|
||||
ret = []*Path{}
|
||||
tree, _ := loadTreeByBlockID(rootID)
|
||||
if nil == tree {
|
||||
return
|
||||
}
|
||||
|
||||
luteEngine := NewLute()
|
||||
var headings []*Block
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if entering && ast.NodeHeading == n.Type && !n.ParentIs(ast.NodeBlockquote) {
|
||||
n.Box, n.Path = tree.Box, tree.Path
|
||||
block := &Block{
|
||||
RootID: rootID,
|
||||
Depth: n.HeadingLevel,
|
||||
Box: n.Box,
|
||||
Path: n.Path,
|
||||
ID: n.ID,
|
||||
Content: renderOutline(n, luteEngine),
|
||||
Type: n.Type.String(),
|
||||
SubType: treenode.SubTypeAbbr(n),
|
||||
}
|
||||
headings = append(headings, block)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
if 1 > len(headings) {
|
||||
return
|
||||
}
|
||||
|
||||
var blocks []*Block
|
||||
stack := linkedliststack.New()
|
||||
for _, h := range headings {
|
||||
L:
|
||||
for ; ; stack.Pop() {
|
||||
cur, ok := stack.Peek()
|
||||
if !ok {
|
||||
blocks = append(blocks, h)
|
||||
stack.Push(h)
|
||||
break L
|
||||
}
|
||||
|
||||
tip := cur.(*Block)
|
||||
if tip.Depth < h.Depth {
|
||||
tip.Children = append(tip.Children, h)
|
||||
stack.Push(h)
|
||||
break L
|
||||
}
|
||||
tip.Count = len(tip.Children)
|
||||
}
|
||||
}
|
||||
|
||||
ret = toFlatTree(blocks, 0, "outline")
|
||||
if 0 < len(ret) {
|
||||
children := ret[0].Blocks
|
||||
ret = nil
|
||||
for _, b := range children {
|
||||
resetDepth(b, 0)
|
||||
ret = append(ret, &Path{
|
||||
ID: b.ID,
|
||||
Box: b.Box,
|
||||
Name: b.Content,
|
||||
Type: b.Type,
|
||||
SubType: b.SubType,
|
||||
Blocks: b.Children,
|
||||
Depth: 0,
|
||||
Count: b.Count,
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func resetDepth(b *Block, depth int) {
|
||||
b.Depth = depth
|
||||
b.Count = len(b.Children)
|
||||
for _, c := range b.Children {
|
||||
resetDepth(c, depth+1)
|
||||
}
|
||||
}
|
||||
354
kernel/model/path.go
Normal file
354
kernel/model/path.go
Normal file
|
|
@ -0,0 +1,354 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func createDocsByHPath(boxID, hPath, content string) (id string, err error) {
|
||||
hPath = strings.TrimSuffix(hPath, ".sy")
|
||||
if docExist := nil != treenode.GetBlockTreeRootByHPath(boxID, hPath); docExist {
|
||||
hPath += "-" + gulu.Rand.String(7)
|
||||
}
|
||||
pathBuilder := bytes.Buffer{}
|
||||
pathBuilder.WriteString("/")
|
||||
hPathBuilder := bytes.Buffer{}
|
||||
hPathBuilder.WriteString("/")
|
||||
|
||||
parts := strings.Split(hPath, "/")[1:]
|
||||
for i, part := range parts {
|
||||
hPathBuilder.WriteString(part)
|
||||
hp := hPathBuilder.String()
|
||||
root := treenode.GetBlockTreeRootByHPath(boxID, hp)
|
||||
isNotLast := i < len(parts)-1
|
||||
if nil == root {
|
||||
id = ast.NewNodeID()
|
||||
pathBuilder.WriteString(id)
|
||||
docP := pathBuilder.String() + ".sy"
|
||||
if isNotLast {
|
||||
if err = createDoc(boxID, docP, part, ""); nil != err {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err = createDoc(boxID, docP, part, content); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if isNotLast {
|
||||
dirPath := filepath.Join(util.DataDir, boxID, pathBuilder.String())
|
||||
if err = os.MkdirAll(dirPath, 0755); nil != err {
|
||||
util.LogErrorf("mkdir [%s] failed: %s", dirPath, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
id = root.ID
|
||||
pathBuilder.WriteString(root.ID)
|
||||
if !isNotLast {
|
||||
pathBuilder.WriteString(".sy")
|
||||
}
|
||||
}
|
||||
|
||||
if isNotLast {
|
||||
pathBuilder.WriteString("/")
|
||||
hPathBuilder.WriteString("/")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func toFlatTree(blocks []*Block, baseDepth int, typ string) (ret []*Path) {
|
||||
var blockRoots []*Block
|
||||
for _, block := range blocks {
|
||||
root := getBlockIn(blockRoots, block.RootID)
|
||||
if nil == root {
|
||||
root, _ = getBlock(block.RootID)
|
||||
blockRoots = append(blockRoots, root)
|
||||
}
|
||||
if nil == root {
|
||||
return
|
||||
}
|
||||
block.Depth = baseDepth + 1
|
||||
block.Count = len(block.Children)
|
||||
root.Children = append(root.Children, block)
|
||||
}
|
||||
|
||||
for _, root := range blockRoots {
|
||||
treeNode := &Path{
|
||||
ID: root.ID,
|
||||
Box: root.Box,
|
||||
Name: path.Base(root.HPath),
|
||||
NodeType: root.Type,
|
||||
Type: typ,
|
||||
SubType: root.SubType,
|
||||
Depth: baseDepth,
|
||||
Count: len(root.Children),
|
||||
}
|
||||
for _, c := range root.Children {
|
||||
treeNode.Blocks = append(treeNode.Blocks, c)
|
||||
}
|
||||
ret = append(ret, treeNode)
|
||||
}
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].ID > ret[j].ID
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func toSubTree(blocks []*Block, keyword string) (ret []*Path) {
|
||||
keyword = strings.TrimSpace(keyword)
|
||||
var blockRoots []*Block
|
||||
for _, block := range blocks {
|
||||
root := getBlockIn(blockRoots, block.RootID)
|
||||
if nil == root {
|
||||
root, _ = getBlock(block.RootID)
|
||||
blockRoots = append(blockRoots, root)
|
||||
}
|
||||
block.Depth = 1
|
||||
block.Count = len(block.Children)
|
||||
root.Children = append(root.Children, block)
|
||||
}
|
||||
|
||||
for _, root := range blockRoots {
|
||||
treeNode := &Path{
|
||||
ID: root.ID,
|
||||
Box: root.Box,
|
||||
Name: path.Base(root.HPath),
|
||||
Type: "backlink",
|
||||
NodeType: "NodeDocument",
|
||||
SubType: root.SubType,
|
||||
Depth: 0,
|
||||
Count: len(root.Children),
|
||||
}
|
||||
for _, c := range root.Children {
|
||||
if "NodeListItem" == c.Type {
|
||||
tree, _ := loadTreeByBlockID(c.RootID)
|
||||
li := treenode.GetNodeInTree(tree, c.ID)
|
||||
var first *sql.Block
|
||||
if 3 != li.ListData.Typ {
|
||||
first = sql.GetBlock(li.FirstChild.ID)
|
||||
} else {
|
||||
first = sql.GetBlock(li.FirstChild.Next.ID)
|
||||
}
|
||||
name := first.Content
|
||||
parentPos := 0
|
||||
if "" != keyword {
|
||||
parentPos, name = search.MarkText(name, keyword, 12, Conf.Search.CaseSensitive)
|
||||
}
|
||||
subRoot := &Path{
|
||||
ID: li.ID,
|
||||
Box: li.Box,
|
||||
Name: name,
|
||||
Type: "backlink",
|
||||
NodeType: li.Type.String(),
|
||||
SubType: c.SubType,
|
||||
Depth: 1,
|
||||
Count: 1,
|
||||
}
|
||||
|
||||
unfold := true
|
||||
for liFirstBlockSpan := li.FirstChild.FirstChild; nil != liFirstBlockSpan; liFirstBlockSpan = liFirstBlockSpan.Next {
|
||||
if ast.NodeBlockRef == liFirstBlockSpan.Type {
|
||||
continue
|
||||
}
|
||||
if "" != strings.TrimSpace(liFirstBlockSpan.Text()) {
|
||||
unfold = false
|
||||
break
|
||||
}
|
||||
}
|
||||
for next := li.FirstChild.Next; nil != next; next = next.Next {
|
||||
subBlock, _ := getBlock(next.ID)
|
||||
if unfold {
|
||||
if ast.NodeList == next.Type {
|
||||
for subLi := next.FirstChild; nil != subLi; subLi = subLi.Next {
|
||||
subLiBlock, _ := getBlock(subLi.ID)
|
||||
var subFirst *sql.Block
|
||||
if 3 != subLi.ListData.Typ {
|
||||
subFirst = sql.GetBlock(subLi.FirstChild.ID)
|
||||
} else {
|
||||
subFirst = sql.GetBlock(subLi.FirstChild.Next.ID)
|
||||
}
|
||||
subPos := 0
|
||||
content := subFirst.Content
|
||||
if "" != keyword {
|
||||
subPos, content = search.MarkText(subFirst.Content, keyword, 12, Conf.Search.CaseSensitive)
|
||||
}
|
||||
if -1 < subPos {
|
||||
parentPos = 0 // 需要显示父级
|
||||
}
|
||||
subLiBlock.Content = content
|
||||
subLiBlock.Depth = 2
|
||||
subRoot.Blocks = append(subRoot.Blocks, subLiBlock)
|
||||
}
|
||||
} else if ast.NodeHeading == next.Type {
|
||||
subBlock.Depth = 2
|
||||
subRoot.Blocks = append(subRoot.Blocks, subBlock)
|
||||
headingChildren := treenode.HeadingChildren(next)
|
||||
var breakSub bool
|
||||
for _, n := range headingChildren {
|
||||
block, _ := getBlock(n.ID)
|
||||
subPos := 0
|
||||
content := block.Content
|
||||
if "" != keyword {
|
||||
subPos, content = search.MarkText(block.Content, keyword, 12, Conf.Search.CaseSensitive)
|
||||
}
|
||||
if -1 < subPos {
|
||||
parentPos = 0
|
||||
}
|
||||
block.Content = content
|
||||
block.Depth = 3
|
||||
subRoot.Blocks = append(subRoot.Blocks, block)
|
||||
if ast.NodeHeading == n.Type {
|
||||
// 跳过子标题下面的块
|
||||
breakSub = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if breakSub {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if nil == treenode.HeadingParent(next) {
|
||||
subBlock.Depth = 2
|
||||
subRoot.Blocks = append(subRoot.Blocks, subBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if -1 < parentPos {
|
||||
treeNode.Children = append(treeNode.Children, subRoot)
|
||||
}
|
||||
} else if "NodeHeading" == c.Type {
|
||||
tree, _ := loadTreeByBlockID(c.RootID)
|
||||
h := treenode.GetNodeInTree(tree, c.ID)
|
||||
name := sql.GetBlock(h.ID).Content
|
||||
parentPos := 0
|
||||
if "" != keyword {
|
||||
parentPos, name = search.MarkText(name, keyword, 12, Conf.Search.CaseSensitive)
|
||||
}
|
||||
subRoot := &Path{
|
||||
ID: h.ID,
|
||||
Box: h.Box,
|
||||
Name: name,
|
||||
Type: "backlink",
|
||||
NodeType: h.Type.String(),
|
||||
SubType: c.SubType,
|
||||
Depth: 1,
|
||||
Count: 1,
|
||||
}
|
||||
|
||||
unfold := true
|
||||
for headingFirstSpan := h.FirstChild; nil != headingFirstSpan; headingFirstSpan = headingFirstSpan.Next {
|
||||
if ast.NodeBlockRef == headingFirstSpan.Type {
|
||||
continue
|
||||
}
|
||||
if "" != strings.TrimSpace(headingFirstSpan.Text()) {
|
||||
unfold = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if unfold {
|
||||
headingChildren := treenode.HeadingChildren(h)
|
||||
for _, headingChild := range headingChildren {
|
||||
if ast.NodeList == headingChild.Type {
|
||||
for subLi := headingChild.FirstChild; nil != subLi; subLi = subLi.Next {
|
||||
subLiBlock, _ := getBlock(subLi.ID)
|
||||
var subFirst *sql.Block
|
||||
if 3 != subLi.ListData.Typ {
|
||||
subFirst = sql.GetBlock(subLi.FirstChild.ID)
|
||||
} else {
|
||||
subFirst = sql.GetBlock(subLi.FirstChild.Next.ID)
|
||||
}
|
||||
subPos := 0
|
||||
content := subFirst.Content
|
||||
if "" != keyword {
|
||||
subPos, content = search.MarkText(content, keyword, 12, Conf.Search.CaseSensitive)
|
||||
}
|
||||
if -1 < subPos {
|
||||
parentPos = 0
|
||||
}
|
||||
subLiBlock.Content = subFirst.Content
|
||||
subLiBlock.Depth = 2
|
||||
subRoot.Blocks = append(subRoot.Blocks, subLiBlock)
|
||||
}
|
||||
} else {
|
||||
subBlock, _ := getBlock(headingChild.ID)
|
||||
subBlock.Depth = 2
|
||||
subRoot.Blocks = append(subRoot.Blocks, subBlock)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if -1 < parentPos {
|
||||
treeNode.Children = append(treeNode.Children, subRoot)
|
||||
}
|
||||
} else {
|
||||
pos := 0
|
||||
content := c.Content
|
||||
if "" != keyword {
|
||||
pos, content = search.MarkText(content, keyword, 12, Conf.Search.CaseSensitive)
|
||||
}
|
||||
if -1 < pos {
|
||||
treeNode.Blocks = append(treeNode.Blocks, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rootPos := -1
|
||||
var rootContent string
|
||||
if "" != keyword {
|
||||
rootPos, rootContent = search.MarkText(treeNode.Name, keyword, 12, Conf.Search.CaseSensitive)
|
||||
treeNode.Name = rootContent
|
||||
}
|
||||
if 0 < len(treeNode.Children) || 0 < len(treeNode.Blocks) || (-1 < rootPos && "" != keyword) {
|
||||
ret = append(ret, treeNode)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Slice(ret, func(i, j int) bool {
|
||||
return ret[i].ID > ret[j].ID
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func getBlockIn(blocks []*Block, id string) *Block {
|
||||
if "" == id {
|
||||
return nil
|
||||
}
|
||||
for _, block := range blocks {
|
||||
if block.ID == id {
|
||||
return block
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
47
kernel/model/process.go
Normal file
47
kernel/model/process.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func HookResident() {
|
||||
if util.Resident {
|
||||
return
|
||||
}
|
||||
|
||||
for range time.Tick(time.Second * 30) {
|
||||
if 0 == util.CountSessions() {
|
||||
util.LogInfof("no active session, exit kernel process now")
|
||||
Close(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func HandleSignal() {
|
||||
c := make(chan os.Signal)
|
||||
signal.Notify(c, syscall.SIGINT, syscall.SIGQUIT, syscall.SIGTERM)
|
||||
s := <-c
|
||||
util.LogInfof("received os signal [%s], exit kernel process now", s)
|
||||
Close(false)
|
||||
}
|
||||
234
kernel/model/render.go
Normal file
234
kernel/model/render.go
Normal file
|
|
@ -0,0 +1,234 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/lute"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/88250/lute/render"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
)
|
||||
|
||||
func renderOutline(node *ast.Node, luteEngine *lute.Lute) (ret string) {
|
||||
if nil == node {
|
||||
return ""
|
||||
}
|
||||
|
||||
if ast.NodeDocument == node.Type {
|
||||
return node.IALAttr("title")
|
||||
}
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
buf.Grow(4096)
|
||||
ast.Walk(node, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
switch n.Type {
|
||||
case ast.NodeTagOpenMarker, ast.NodeTagCloseMarker:
|
||||
buf.WriteByte('#')
|
||||
case ast.NodeBlockRef:
|
||||
buf.WriteString(html.EscapeString(treenode.GetDynamicBlockRefText(n)))
|
||||
return ast.WalkSkipChildren
|
||||
case ast.NodeText, ast.NodeLinkText, ast.NodeFileAnnotationRefText, ast.NodeFootnotesRef, ast.NodeCodeBlockCode, ast.NodeMathBlockContent:
|
||||
tokens := html.EscapeHTML(n.Tokens)
|
||||
tokens = bytes.ReplaceAll(tokens, []byte(" "), []byte(" ")) // 大纲面板条目中无法显示多个空格 https://github.com/siyuan-note/siyuan/issues/4370
|
||||
buf.Write(tokens)
|
||||
case ast.NodeInlineMath, ast.NodeStrong, ast.NodeEmphasis, ast.NodeCodeSpan:
|
||||
dom := lute.RenderNodeBlockDOM(n, luteEngine.ParseOptions, luteEngine.RenderOptions)
|
||||
buf.WriteString(dom)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
ret = strings.TrimSpace(buf.String())
|
||||
ret = strings.ReplaceAll(ret, "\n", "")
|
||||
return
|
||||
}
|
||||
|
||||
func renderBlockText(node *ast.Node) (ret string) {
|
||||
ret = treenode.NodeStaticContent(node)
|
||||
ret = strings.TrimSpace(ret)
|
||||
ret = strings.ReplaceAll(ret, "\n", "")
|
||||
ret = html.EscapeString(ret)
|
||||
ret = strings.TrimSpace(ret)
|
||||
if "" == ret {
|
||||
// 复制内容为空的块作为块引用时粘贴无效 https://github.com/siyuan-note/siyuan/issues/4962
|
||||
buf := bytes.Buffer{}
|
||||
ast.Walk(node, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeImage == n.Type {
|
||||
title := n.ChildByType(ast.NodeLinkTitle)
|
||||
if nil == title {
|
||||
alt := n.ChildByType(ast.NodeLinkText)
|
||||
if nil != alt && 0 < len(alt.Tokens) {
|
||||
buf.Write(alt.Tokens)
|
||||
} else {
|
||||
buf.WriteString("image")
|
||||
}
|
||||
} else {
|
||||
buf.Write(title.Tokens)
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
ret = buf.String()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func renderBlockDOMByNodes(nodes []*ast.Node, luteEngine *lute.Lute) string {
|
||||
tree := &parse.Tree{Root: &ast.Node{Type: ast.NodeDocument}, Context: &parse.Context{ParseOption: luteEngine.ParseOptions}}
|
||||
blockRenderer := render.NewBlockRenderer(tree, luteEngine.RenderOptions)
|
||||
for _, n := range nodes {
|
||||
ast.Walk(n, func(node *ast.Node, entering bool) ast.WalkStatus {
|
||||
rendererFunc := blockRenderer.RendererFuncs[node.Type]
|
||||
return rendererFunc(node, entering)
|
||||
})
|
||||
}
|
||||
h := strings.TrimSpace(blockRenderer.Writer.String())
|
||||
if strings.HasPrefix(h, "<li") {
|
||||
h = "<ul>" + h + "</ul>"
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func renderBlockMarkdownR(id string) string {
|
||||
depth := 0
|
||||
nodes := renderBlockMarkdownR0(id, &depth)
|
||||
buf := bytes.Buffer{}
|
||||
buf.Grow(4096)
|
||||
luteEngine := NewLute()
|
||||
for _, n := range nodes {
|
||||
md := treenode.FormatNode(n, luteEngine)
|
||||
buf.WriteString(md)
|
||||
buf.WriteString("\n\n")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func renderBlockMarkdownR0(id string, depth *int) (ret []*ast.Node) {
|
||||
*depth++
|
||||
if 7 < *depth {
|
||||
return
|
||||
}
|
||||
b := treenode.GetBlockTree(id)
|
||||
if nil == b {
|
||||
return
|
||||
}
|
||||
|
||||
var err error
|
||||
var t *parse.Tree
|
||||
if t, err = loadTreeByBlockID(b.ID); nil != err {
|
||||
return
|
||||
}
|
||||
node := treenode.GetNodeInTree(t, b.ID)
|
||||
if nil == node {
|
||||
return
|
||||
}
|
||||
|
||||
var children []*ast.Node
|
||||
if ast.NodeHeading == node.Type {
|
||||
children = append(children, node)
|
||||
children = append(children, treenode.HeadingChildren(node)...)
|
||||
} else if ast.NodeDocument == node.Type {
|
||||
for c := node.FirstChild; nil != c; c = c.Next {
|
||||
children = append(children, c)
|
||||
}
|
||||
} else {
|
||||
children = append(children, node)
|
||||
}
|
||||
|
||||
for _, child := range children {
|
||||
var unlinks, inserts []*ast.Node
|
||||
ast.Walk(child, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || !n.IsBlock() {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeBlockQueryEmbed == n.Type {
|
||||
stmt := n.ChildByType(ast.NodeBlockQueryEmbedScript).TokensStr()
|
||||
stmt = html.UnescapeString(stmt)
|
||||
sqlBlocks := sql.SelectBlocksRawStmt(stmt, Conf.Search.Limit)
|
||||
for _, sqlBlock := range sqlBlocks {
|
||||
subNodes := renderBlockMarkdownR0(sqlBlock.ID, depth)
|
||||
for _, subNode := range subNodes {
|
||||
inserts = append(inserts, subNode)
|
||||
}
|
||||
}
|
||||
unlinks = append(unlinks, n)
|
||||
return ast.WalkSkipChildren
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
for _, n := range unlinks {
|
||||
n.Unlink()
|
||||
}
|
||||
|
||||
if ast.NodeBlockQueryEmbed != child.Type {
|
||||
ret = append(ret, child)
|
||||
} else {
|
||||
for _, n := range inserts {
|
||||
ret = append(ret, n)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func renderBlockMarkdown(node *ast.Node) string {
|
||||
var nodes []*ast.Node
|
||||
ast.Walk(node, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if entering {
|
||||
nodes = append(nodes, n)
|
||||
if ast.NodeHeading == node.Type {
|
||||
// 支持“标题块”引用
|
||||
children := treenode.HeadingChildren(n)
|
||||
nodes = append(nodes, children...)
|
||||
}
|
||||
}
|
||||
return ast.WalkSkipChildren
|
||||
})
|
||||
|
||||
root := &ast.Node{Type: ast.NodeDocument}
|
||||
luteEngine := NewLute()
|
||||
luteEngine.SetKramdownIAL(false)
|
||||
luteEngine.SetSuperBlock(false)
|
||||
tree := &parse.Tree{Root: root, Context: &parse.Context{ParseOption: luteEngine.ParseOptions}}
|
||||
renderer := render.NewFormatRenderer(tree, luteEngine.RenderOptions)
|
||||
renderer.Writer = &bytes.Buffer{}
|
||||
renderer.Writer.Grow(4096)
|
||||
renderer.NodeWriterStack = append(renderer.NodeWriterStack, renderer.Writer) // 因为有可能不是从 root 开始渲染,所以需要初始化
|
||||
for _, node := range nodes {
|
||||
ast.Walk(node, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
rendererFunc := renderer.RendererFuncs[n.Type]
|
||||
return rendererFunc(n, entering)
|
||||
})
|
||||
}
|
||||
return strings.TrimSpace(renderer.Writer.String())
|
||||
}
|
||||
513
kernel/model/search.go
Normal file
513
kernel/model/search.go
Normal file
|
|
@ -0,0 +1,513 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"path"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/jinzhu/copier"
|
||||
"github.com/siyuan-note/siyuan/kernel/conf"
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
"github.com/xrash/smetrics"
|
||||
)
|
||||
|
||||
func SearchEmbedBlock(stmt string, excludeIDs []string, headingMode int) (ret []*Block) {
|
||||
WaitForWritingFiles()
|
||||
return searchEmbedBlock(stmt, excludeIDs, headingMode)
|
||||
}
|
||||
|
||||
func searchEmbedBlock(stmt string, excludeIDs []string, headingMode int) (ret []*Block) {
|
||||
sqlBlocks := sql.SelectBlocksRawStmtNoParse(stmt, Conf.Search.Limit)
|
||||
var tmp []*sql.Block
|
||||
for _, b := range sqlBlocks {
|
||||
if !gulu.Str.Contains(b.ID, excludeIDs) {
|
||||
tmp = append(tmp, b)
|
||||
}
|
||||
}
|
||||
sqlBlocks = tmp
|
||||
for _, sb := range sqlBlocks {
|
||||
block := getBlockRendered(sb.ID, headingMode)
|
||||
if nil == block {
|
||||
continue
|
||||
}
|
||||
ret = append(ret, block)
|
||||
}
|
||||
|
||||
if 1 > len(ret) {
|
||||
ret = []*Block{}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func SearchRefBlock(id, rootID, keyword string, beforeLen int) (ret []*Block, newDoc bool) {
|
||||
if "" == keyword {
|
||||
// 查询为空时默认的块引排序规则按最近使用优先 https://github.com/siyuan-note/siyuan/issues/3218
|
||||
refs := sql.QueryRefsRecent()
|
||||
for _, ref := range refs {
|
||||
sqlBlock := sql.GetBlock(ref.DefBlockID)
|
||||
block := fromSQLBlock(sqlBlock, "", beforeLen)
|
||||
if nil == block {
|
||||
continue
|
||||
}
|
||||
block.Content = maxContent(block.Content, Conf.Editor.BlockRefDynamicAnchorTextMaxLen)
|
||||
block.RefText = block.Content
|
||||
if block.IsContainerBlock() {
|
||||
block.RefText = block.FContent // `((` 引用列表项时使用第一个子块作为动态锚文本 https://github.com/siyuan-note/siyuan/issues/4536
|
||||
}
|
||||
ret = append(ret, block)
|
||||
}
|
||||
if 1 > len(ret) {
|
||||
ret = []*Block{}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
ret = fullTextSearchRefBlock(keyword, beforeLen)
|
||||
tmp := ret[:0]
|
||||
trees := map[string]*parse.Tree{}
|
||||
for _, b := range ret {
|
||||
hitFirstChildID := false
|
||||
b.RefText = b.Content
|
||||
if b.IsContainerBlock() {
|
||||
b.RefText = b.FContent // `((` 引用列表项时使用第一个子块作为动态锚文本 https://github.com/siyuan-note/siyuan/issues/4536
|
||||
|
||||
// `((` 引用候选中排除当前块的父块 https://github.com/siyuan-note/siyuan/issues/4538
|
||||
tree := trees[b.RootID]
|
||||
if nil == tree {
|
||||
tree, _ = loadTreeByBlockID(b.RootID)
|
||||
trees[b.RootID] = tree
|
||||
}
|
||||
if nil != tree {
|
||||
bNode := treenode.GetNodeInTree(tree, b.ID)
|
||||
if fc := treenode.FirstLeafBlock(bNode); nil != fc && fc.ID == id {
|
||||
hitFirstChildID = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if b.ID != id && !hitFirstChildID && b.ID != rootID {
|
||||
b.Content = maxContent(b.Content, Conf.Editor.BlockRefDynamicAnchorTextMaxLen)
|
||||
tmp = append(tmp, b)
|
||||
}
|
||||
}
|
||||
ret = tmp
|
||||
|
||||
if "" != keyword {
|
||||
if block := treenode.GetBlockTree(id); nil != block {
|
||||
p := path.Join(block.HPath, keyword)
|
||||
newDoc = nil == treenode.GetBlockTreeRootByHPath(block.BoxID, p)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func FindReplace(keyword, replacement string, ids []string) (err error) {
|
||||
keyword = strings.Trim(keyword, "\"") // FTS 字符串需要去除双引号
|
||||
if keyword == replacement {
|
||||
return
|
||||
}
|
||||
|
||||
ids = util.RemoveDuplicatedElem(ids)
|
||||
for _, id := range ids {
|
||||
var tree *parse.Tree
|
||||
tree, err = loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return
|
||||
}
|
||||
|
||||
ast.Walk(node, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
switch n.Type {
|
||||
case ast.NodeDocument:
|
||||
title := n.IALAttr("title")
|
||||
if strings.Contains(title, keyword) {
|
||||
n.SetIALAttr("title", strings.ReplaceAll(title, keyword, replacement))
|
||||
}
|
||||
case ast.NodeText, ast.NodeLinkText, ast.NodeLinkTitle, ast.NodeCodeSpanContent, ast.NodeCodeBlockCode, ast.NodeInlineMathContent, ast.NodeMathBlockContent:
|
||||
if bytes.Contains(n.Tokens, []byte(keyword)) {
|
||||
n.Tokens = bytes.ReplaceAll(n.Tokens, []byte(keyword), []byte(replacement))
|
||||
}
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
if err = writeJSONQueue(tree); nil != err {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
WaitForWritingFiles()
|
||||
if 1 < len(ids) {
|
||||
go func() {
|
||||
time.Sleep(time.Second)
|
||||
util.ReloadUI()
|
||||
}()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func FullTextSearchBlock(query, box, path string, types map[string]bool, querySyntax bool) (ret []*Block) {
|
||||
query = strings.TrimSpace(query)
|
||||
if queryStrLower := strings.ToLower(query); strings.Contains(queryStrLower, "select ") && strings.Contains(queryStrLower, " * ") && strings.Contains(queryStrLower, " from ") {
|
||||
ret = searchBySQL(query, 12)
|
||||
} else {
|
||||
filter := searchFilter(types)
|
||||
ret = fullTextSearch(query, box, path, filter, 12, querySyntax)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func searchFilter(types map[string]bool) string {
|
||||
s := conf.NewSearch()
|
||||
if err := copier.Copy(s, Conf.Search); nil != err {
|
||||
util.LogErrorf("copy search conf failed: %s", err)
|
||||
}
|
||||
if nil != types {
|
||||
s.Document = types["document"]
|
||||
s.Heading = types["heading"]
|
||||
s.List = types["list"]
|
||||
s.ListItem = types["listItem"]
|
||||
s.CodeBlock = types["codeBlock"]
|
||||
s.MathBlock = types["mathBlock"]
|
||||
s.Table = types["table"]
|
||||
s.Blockquote = types["blockquote"]
|
||||
s.SuperBlock = types["superBlock"]
|
||||
s.Paragraph = types["paragraph"]
|
||||
s.HTMLBlock = types["htmlBlock"]
|
||||
} else {
|
||||
s.Document = Conf.Search.Document
|
||||
s.Heading = Conf.Search.Heading
|
||||
s.List = Conf.Search.List
|
||||
s.ListItem = Conf.Search.ListItem
|
||||
s.CodeBlock = Conf.Search.CodeBlock
|
||||
s.MathBlock = Conf.Search.MathBlock
|
||||
s.Table = Conf.Search.Table
|
||||
s.Blockquote = Conf.Search.Blockquote
|
||||
s.SuperBlock = Conf.Search.SuperBlock
|
||||
s.Paragraph = Conf.Search.Paragraph
|
||||
s.HTMLBlock = Conf.Search.HTMLBlock
|
||||
}
|
||||
return s.TypeFilter()
|
||||
}
|
||||
|
||||
func searchBySQL(stmt string, beforeLen int) (ret []*Block) {
|
||||
stmt = util.RemoveInvisible(stmt)
|
||||
blocks := sql.SelectBlocksRawStmt(stmt, Conf.Search.Limit)
|
||||
ret = fromSQLBlocks(&blocks, "", beforeLen)
|
||||
if 1 > len(ret) {
|
||||
ret = []*Block{}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fullTextSearchRefBlock(keyword string, beforeLen int) (ret []*Block) {
|
||||
keyword = util.RemoveInvisible(keyword)
|
||||
|
||||
if util.IsIDPattern(keyword) {
|
||||
ret = searchBySQL("SELECT * FROM `blocks` WHERE `id` = '"+keyword+"'", 12)
|
||||
return
|
||||
}
|
||||
|
||||
quotedKeyword := stringQuery(keyword)
|
||||
table := "blocks_fts" // 大小写敏感
|
||||
if !Conf.Search.CaseSensitive {
|
||||
table = "blocks_fts_case_insensitive"
|
||||
}
|
||||
|
||||
projections := "id, parent_id, root_id, hash, box, path, " +
|
||||
"highlight(" + table + ", 6, '__@mark__', '__mark@__') AS hpath, " +
|
||||
"highlight(" + table + ", 7, '__@mark__', '__mark@__') AS name, " +
|
||||
"highlight(" + table + ", 8, '__@mark__', '__mark@__') AS alias, " +
|
||||
"highlight(" + table + ", 9, '__@mark__', '__mark@__') AS memo, " +
|
||||
"tag, " +
|
||||
"highlight(" + table + ", 11, '__@mark__', '__mark@__') AS content, " +
|
||||
"fcontent, markdown, length, type, subtype, ial, sort, created, updated"
|
||||
stmt := "SELECT " + projections + " FROM " + table + " WHERE " + table + " MATCH '" + columnFilter() + ":(" + quotedKeyword + ")' AND type IN " + Conf.Search.TypeFilter()
|
||||
orderBy := ` order by case
|
||||
when name = '${keyword}' then 10
|
||||
when alias = '${keyword}' then 20
|
||||
when memo = '${keyword}' then 30
|
||||
when content = '${keyword}' and type = 'd' then 40
|
||||
when content LIKE '%${keyword}%' and type = 'd' then 41
|
||||
when name LIKE '%${keyword}%' then 50
|
||||
when alias LIKE '%${keyword}%' then 60
|
||||
when content = '${keyword}' and type = 'h' then 70
|
||||
when content LIKE '%${keyword}%' and type = 'h' then 71
|
||||
when fcontent = '${keyword}' and type = 'i' then 80
|
||||
when fcontent LIKE '%${keyword}%' and type = 'i' then 81
|
||||
when memo LIKE '%${keyword}%' then 90
|
||||
when content LIKE '%${keyword}%' and type != 'i' and type != 'l' then 100
|
||||
else 65535 end ASC, sort ASC, length ASC`
|
||||
orderBy = strings.ReplaceAll(orderBy, "${keyword}", keyword)
|
||||
stmt += orderBy + " LIMIT " + strconv.Itoa(Conf.Search.Limit)
|
||||
blocks := sql.SelectBlocksRawStmt(stmt, Conf.Search.Limit)
|
||||
ret = fromSQLBlocks(&blocks, "", beforeLen)
|
||||
if 1 > len(ret) {
|
||||
ret = []*Block{}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fullTextSearch(query, box, path, filter string, beforeLen int, querySyntax bool) (ret []*Block) {
|
||||
query = util.RemoveInvisible(query)
|
||||
if util.IsIDPattern(query) {
|
||||
ret = searchBySQL("SELECT * FROM `blocks` WHERE `id` = '"+query+"'", beforeLen)
|
||||
return
|
||||
}
|
||||
|
||||
if !querySyntax {
|
||||
query = stringQuery(query)
|
||||
}
|
||||
|
||||
table := "blocks_fts" // 大小写敏感
|
||||
if !Conf.Search.CaseSensitive {
|
||||
table = "blocks_fts_case_insensitive"
|
||||
}
|
||||
projections := "id, parent_id, root_id, hash, box, path, " +
|
||||
"highlight(" + table + ", 6, '__@mark__', '__mark@__') AS hpath, " +
|
||||
"highlight(" + table + ", 7, '__@mark__', '__mark@__') AS name, " +
|
||||
"highlight(" + table + ", 8, '__@mark__', '__mark@__') AS alias, " +
|
||||
"highlight(" + table + ", 9, '__@mark__', '__mark@__') AS memo, " +
|
||||
"tag, " +
|
||||
"highlight(" + table + ", 11, '__@mark__', '__mark@__') AS content, " +
|
||||
"fcontent, markdown, length, type, subtype, ial, sort, created, updated"
|
||||
stmt := "SELECT " + projections + " FROM " + table + " WHERE " + table + " MATCH '" + columnFilter() + ":(" + query + ")' AND type IN " + filter
|
||||
if "" != box {
|
||||
stmt += " AND box = '" + box + "'"
|
||||
}
|
||||
if "" != path {
|
||||
stmt += " AND path LIKE '" + path + "%'"
|
||||
}
|
||||
stmt += " ORDER BY sort ASC, rank ASC LIMIT " + strconv.Itoa(Conf.Search.Limit)
|
||||
blocks := sql.SelectBlocksRawStmt(stmt, Conf.Search.Limit)
|
||||
ret = fromSQLBlocks(&blocks, "", beforeLen)
|
||||
if 1 > len(ret) {
|
||||
ret = []*Block{}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func query2Stmt(queryStr string) (ret string) {
|
||||
buf := bytes.Buffer{}
|
||||
if util.IsIDPattern(queryStr) {
|
||||
buf.WriteString("id = '" + queryStr + "'")
|
||||
} else {
|
||||
var tags []string
|
||||
luteEngine := NewLute()
|
||||
t := parse.Inline("", []byte(queryStr), luteEngine.ParseOptions)
|
||||
ast.Walk(t.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
if ast.NodeTag == n.Type {
|
||||
tags = append(tags, n.Text())
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
for _, tag := range tags {
|
||||
queryStr = strings.ReplaceAll(queryStr, "#"+tag+"#", "")
|
||||
}
|
||||
parts := strings.Split(queryStr, " ")
|
||||
|
||||
for i, part := range parts {
|
||||
if "" == part {
|
||||
continue
|
||||
}
|
||||
part = strings.ReplaceAll(part, "'", "''")
|
||||
buf.WriteString("(content LIKE '%" + part + "%'")
|
||||
buf.WriteString(Conf.Search.NAMFilter(part))
|
||||
buf.WriteString(")")
|
||||
if i < len(parts)-1 {
|
||||
buf.WriteString(" AND ")
|
||||
}
|
||||
}
|
||||
|
||||
if 0 < len(tags) {
|
||||
if 0 < buf.Len() {
|
||||
buf.WriteString(" OR ")
|
||||
}
|
||||
for i, tag := range tags {
|
||||
buf.WriteString("(content LIKE '%#" + tag + "#%')")
|
||||
if i < len(tags)-1 {
|
||||
buf.WriteString(" AND ")
|
||||
}
|
||||
}
|
||||
buf.WriteString(" OR ")
|
||||
for i, tag := range tags {
|
||||
buf.WriteString("ial LIKE '%tags=\"%" + tag + "%\"%'")
|
||||
if i < len(tags)-1 {
|
||||
buf.WriteString(" AND ")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if 1 > buf.Len() {
|
||||
buf.WriteString("1=1")
|
||||
}
|
||||
ret = buf.String()
|
||||
return
|
||||
}
|
||||
|
||||
func markSearch(text string, keyword string, beforeLen int) (pos int, marked string, score float64) {
|
||||
if 0 == len(keyword) {
|
||||
marked = text
|
||||
if maxLen := 5120; maxLen < utf8.RuneCountInString(marked) {
|
||||
marked = gulu.Str.SubStr(marked, maxLen) + "..."
|
||||
}
|
||||
marked = html.EscapeString(marked)
|
||||
marked = strings.ReplaceAll(marked, "__@mark__", "<mark>")
|
||||
marked = strings.ReplaceAll(marked, "__mark@__", "</mark>")
|
||||
return
|
||||
}
|
||||
|
||||
pos, marked = search.MarkText(text, keyword, beforeLen, Conf.Search.CaseSensitive)
|
||||
if -1 < pos {
|
||||
if 0 == pos {
|
||||
score = 1
|
||||
}
|
||||
score += float64(strings.Count(marked, "<mark>"))
|
||||
winkler := smetrics.JaroWinkler(text, keyword, 0.7, 4)
|
||||
score += winkler
|
||||
}
|
||||
score = -score // 分越小排序越靠前
|
||||
return
|
||||
}
|
||||
|
||||
func fromSQLBlocks(sqlBlocks *[]*sql.Block, terms string, beforeLen int) (ret []*Block) {
|
||||
for _, sqlBlock := range *sqlBlocks {
|
||||
ret = append(ret, fromSQLBlock(sqlBlock, terms, beforeLen))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func fromSQLBlock(sqlBlock *sql.Block, terms string, beforeLen int) (block *Block) {
|
||||
if nil == sqlBlock {
|
||||
return
|
||||
}
|
||||
|
||||
id := sqlBlock.ID
|
||||
content := sqlBlock.Content
|
||||
p := sqlBlock.Path
|
||||
|
||||
_, content, _ = markSearch(content, terms, beforeLen)
|
||||
markdown := maxContent(sqlBlock.Markdown, 5120)
|
||||
content = maxContent(content, 5120)
|
||||
|
||||
block = &Block{
|
||||
Box: sqlBlock.Box,
|
||||
Path: p,
|
||||
ID: id,
|
||||
RootID: sqlBlock.RootID,
|
||||
ParentID: sqlBlock.ParentID,
|
||||
Alias: sqlBlock.Alias,
|
||||
Name: sqlBlock.Name,
|
||||
Memo: sqlBlock.Memo,
|
||||
Tag: sqlBlock.Tag,
|
||||
Content: content,
|
||||
FContent: sqlBlock.FContent,
|
||||
Markdown: markdown,
|
||||
Type: treenode.FromAbbrType(sqlBlock.Type),
|
||||
SubType: sqlBlock.SubType,
|
||||
}
|
||||
if "" != sqlBlock.IAL {
|
||||
block.IAL = map[string]string{}
|
||||
ialStr := strings.TrimPrefix(sqlBlock.IAL, "{:")
|
||||
ialStr = strings.TrimSuffix(ialStr, "}")
|
||||
ial := parse.Tokens2IAL([]byte(ialStr))
|
||||
for _, kv := range ial {
|
||||
block.IAL[kv[0]] = kv[1]
|
||||
}
|
||||
}
|
||||
|
||||
_, hPath, _ := markSearch(sqlBlock.HPath, terms, 18)
|
||||
if !strings.HasPrefix(hPath, "/") {
|
||||
hPath = "/" + hPath
|
||||
}
|
||||
block.HPath = hPath
|
||||
|
||||
if "" != block.Name {
|
||||
_, block.Name, _ = markSearch(block.Name, terms, 256)
|
||||
}
|
||||
if "" != block.Alias {
|
||||
_, block.Alias, _ = markSearch(block.Alias, terms, 256)
|
||||
}
|
||||
if "" != block.Memo {
|
||||
_, block.Memo, _ = markSearch(block.Memo, terms, 256)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func maxContent(content string, maxLen int) string {
|
||||
if maxLen < utf8.RuneCountInString(content) {
|
||||
return gulu.Str.SubStr(content, maxLen) + "..."
|
||||
}
|
||||
return content
|
||||
}
|
||||
|
||||
func columnFilter() string {
|
||||
buf := bytes.Buffer{}
|
||||
buf.WriteString("{content")
|
||||
if Conf.Search.Name {
|
||||
buf.WriteString(" name")
|
||||
}
|
||||
if Conf.Search.Alias {
|
||||
buf.WriteString(" alias")
|
||||
}
|
||||
if Conf.Search.Memo {
|
||||
buf.WriteString(" memo")
|
||||
}
|
||||
if Conf.Search.Custom {
|
||||
buf.WriteString(" ial")
|
||||
}
|
||||
buf.WriteString(" tag}")
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
func stringQuery(query string) string {
|
||||
query = strings.ReplaceAll(query, "\"", "\"\"")
|
||||
|
||||
buf := bytes.Buffer{}
|
||||
parts := strings.Split(query, " ")
|
||||
for _, part := range parts {
|
||||
part = strings.TrimSpace(part)
|
||||
part = "\"" + part + "\""
|
||||
buf.WriteString(part)
|
||||
buf.WriteString(" ")
|
||||
}
|
||||
return strings.TrimSpace(buf.String())
|
||||
}
|
||||
149
kernel/model/session.go
Normal file
149
kernel/model/session.go
Normal file
|
|
@ -0,0 +1,149 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
ginSessions "github.com/gin-contrib/sessions"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func LogoutAuth(c *gin.Context) {
|
||||
ret := gulu.Ret.NewResult()
|
||||
defer c.JSON(http.StatusOK, ret)
|
||||
|
||||
if "" == Conf.AccessAuthCode {
|
||||
ret.Code = -1
|
||||
ret.Msg = Conf.Language(86)
|
||||
ret.Data = map[string]interface{}{"closeTimeout": 5000}
|
||||
return
|
||||
}
|
||||
|
||||
session := ginSessions.Default(c)
|
||||
session.Options(ginSessions.Options{
|
||||
Path: "/",
|
||||
MaxAge: -1,
|
||||
})
|
||||
session.Clear()
|
||||
if err := session.Save(); nil != err {
|
||||
util.LogErrorf("saves session failed: " + err.Error())
|
||||
ret.Code = -1
|
||||
ret.Msg = "save session failed"
|
||||
}
|
||||
}
|
||||
|
||||
func LoginAuth(c *gin.Context) {
|
||||
ret := gulu.Ret.NewResult()
|
||||
defer c.JSON(http.StatusOK, ret)
|
||||
arg, ok := util.JsonArg(c, ret)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
authCode := arg["authCode"].(string)
|
||||
if Conf.AccessAuthCode != authCode {
|
||||
ret.Code = -1
|
||||
ret.Msg = Conf.Language(83)
|
||||
return
|
||||
}
|
||||
|
||||
session := &util.SessionData{ID: gulu.Rand.Int(0, 1024), AccessAuthCode: authCode}
|
||||
if err := session.Save(c); nil != err {
|
||||
util.LogErrorf("saves session failed: " + err.Error())
|
||||
ret.Code = -1
|
||||
ret.Msg = "save session failed"
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func CheckReadonly(c *gin.Context) {
|
||||
if util.ReadOnly {
|
||||
result := util.NewResult()
|
||||
result.Code = -1
|
||||
result.Msg = Conf.Language(34)
|
||||
result.Data = map[string]interface{}{"closeTimeout": 5000}
|
||||
c.JSON(200, result)
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func CheckAuth(c *gin.Context) {
|
||||
//util.LogInfof("check auth for [%s]", c.Request.RequestURI)
|
||||
|
||||
// 放过 /appearance/
|
||||
if strings.HasPrefix(c.Request.RequestURI, "/appearance/") ||
|
||||
strings.HasPrefix(c.Request.RequestURI, "/stage/build/export/") ||
|
||||
strings.HasPrefix(c.Request.RequestURI, "/stage/build/fonts/") ||
|
||||
strings.HasPrefix(c.Request.RequestURI, "/stage/protyle/") {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// 放过来自本机的资源文件请求
|
||||
if strings.HasPrefix(c.Request.RemoteAddr, "127.0.0.1") &&
|
||||
(strings.HasPrefix(c.Request.RequestURI, "/assets/") || strings.HasPrefix(c.Request.RequestURI, "/history/assets/")) {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// 通过 Cookie
|
||||
session := util.GetSession(c)
|
||||
if session.AccessAuthCode == Conf.AccessAuthCode {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
// 通过 API token
|
||||
if authHeader := c.GetHeader("Authorization"); "" != authHeader {
|
||||
if strings.HasPrefix(authHeader, "Token ") {
|
||||
token := strings.TrimPrefix(authHeader, "Token ")
|
||||
if Conf.Api.Token == token {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(401, map[string]interface{}{"code": -1, "msg": "Auth failed"})
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if strings.HasSuffix(c.Request.RequestURI, "/check-auth") {
|
||||
c.Next()
|
||||
return
|
||||
}
|
||||
|
||||
if session.AccessAuthCode != Conf.AccessAuthCode {
|
||||
userAgentHeader := c.GetHeader("User-Agent")
|
||||
if strings.HasPrefix(userAgentHeader, "SiYuan/") || strings.HasPrefix(userAgentHeader, "Mozilla/") {
|
||||
c.Redirect(302, "/check-auth")
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(401, map[string]interface{}{"code": -1, "msg": "Auth failed"})
|
||||
c.Abort()
|
||||
return
|
||||
}
|
||||
|
||||
c.Next()
|
||||
}
|
||||
1308
kernel/model/sync.go
Normal file
1308
kernel/model/sync.go
Normal file
File diff suppressed because it is too large
Load diff
368
kernel/model/tag.go
Normal file
368
kernel/model/tag.go
Normal file
|
|
@ -0,0 +1,368 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/html"
|
||||
"github.com/emirpasic/gods/sets/hashset"
|
||||
"github.com/facette/natsort"
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func RemoveTag(label string) (err error) {
|
||||
if "" == label {
|
||||
return
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(116))
|
||||
util.RandomSleep(1000, 2000)
|
||||
|
||||
tags := sql.QueryTagSpansByKeyword(label, 102400)
|
||||
treeBlocks := map[string][]string{}
|
||||
for _, tag := range tags {
|
||||
if blocks, ok := treeBlocks[tag.RootID]; !ok {
|
||||
treeBlocks[tag.RootID] = []string{tag.BlockID}
|
||||
} else {
|
||||
treeBlocks[tag.RootID] = append(blocks, tag.BlockID)
|
||||
}
|
||||
}
|
||||
|
||||
for treeID, blocks := range treeBlocks {
|
||||
util.PushEndlessProgress("[" + treeID + "]")
|
||||
tree, e := loadTreeByBlockID(treeID)
|
||||
if nil != e {
|
||||
util.ClearPushProgress(100)
|
||||
return e
|
||||
}
|
||||
|
||||
var unlinks []*ast.Node
|
||||
for _, blockID := range blocks {
|
||||
node := treenode.GetNodeInTree(tree, blockID)
|
||||
if nil == node {
|
||||
continue
|
||||
}
|
||||
|
||||
if ast.NodeDocument == node.Type {
|
||||
if docTagsVal := node.IALAttr("tags"); strings.Contains(docTagsVal, label) {
|
||||
docTags := strings.Split(docTagsVal, ",")
|
||||
var tmp []string
|
||||
for _, docTag := range docTags {
|
||||
if docTag != label {
|
||||
tmp = append(tmp, docTag)
|
||||
continue
|
||||
}
|
||||
}
|
||||
node.SetIALAttr("tags", strings.Join(tmp, ","))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
nodeTags := node.ChildrenByType(ast.NodeTag)
|
||||
for _, nodeTag := range nodeTags {
|
||||
nodeLabels := nodeTag.ChildrenByType(ast.NodeText)
|
||||
for _, nodeLabel := range nodeLabels {
|
||||
if bytes.Equal(nodeLabel.Tokens, []byte(label)) {
|
||||
unlinks = append(unlinks, nodeTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, n := range unlinks {
|
||||
n.Unlink()
|
||||
}
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(111), tree.Root.IALAttr("title")))
|
||||
if err = writeJSONQueue(tree); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
util.RandomSleep(50, 150)
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(113))
|
||||
sql.WaitForWritingDatabase()
|
||||
util.ReloadUI()
|
||||
return
|
||||
}
|
||||
|
||||
func RenameTag(oldLabel, newLabel string) (err error) {
|
||||
if treenode.ContainsMarker(newLabel) {
|
||||
return errors.New(Conf.Language(112))
|
||||
}
|
||||
|
||||
newLabel = strings.TrimSpace(newLabel)
|
||||
newLabel = strings.TrimPrefix(newLabel, "/")
|
||||
newLabel = strings.TrimSuffix(newLabel, "/")
|
||||
newLabel = strings.TrimSpace(newLabel)
|
||||
|
||||
if "" == newLabel {
|
||||
return errors.New(Conf.Language(114))
|
||||
}
|
||||
|
||||
if oldLabel == newLabel {
|
||||
return
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(110))
|
||||
util.RandomSleep(1000, 2000)
|
||||
|
||||
tags := sql.QueryTagSpansByKeyword(oldLabel, 102400)
|
||||
treeBlocks := map[string][]string{}
|
||||
for _, tag := range tags {
|
||||
if blocks, ok := treeBlocks[tag.RootID]; !ok {
|
||||
treeBlocks[tag.RootID] = []string{tag.BlockID}
|
||||
} else {
|
||||
treeBlocks[tag.RootID] = append(blocks, tag.BlockID)
|
||||
}
|
||||
}
|
||||
|
||||
for treeID, blocks := range treeBlocks {
|
||||
util.PushEndlessProgress("[" + treeID + "]")
|
||||
tree, e := loadTreeByBlockID(treeID)
|
||||
if nil != e {
|
||||
util.ClearPushProgress(100)
|
||||
return e
|
||||
}
|
||||
|
||||
for _, blockID := range blocks {
|
||||
node := treenode.GetNodeInTree(tree, blockID)
|
||||
if nil == node {
|
||||
continue
|
||||
}
|
||||
|
||||
if ast.NodeDocument == node.Type {
|
||||
if docTagsVal := node.IALAttr("tags"); strings.Contains(docTagsVal, oldLabel) {
|
||||
docTags := strings.Split(docTagsVal, ",")
|
||||
if gulu.Str.Contains(newLabel, docTags) {
|
||||
continue
|
||||
}
|
||||
var tmp []string
|
||||
for i, docTag := range docTags {
|
||||
if !strings.Contains(docTag, oldLabel) {
|
||||
tmp = append(tmp, docTag)
|
||||
continue
|
||||
}
|
||||
if newTag := strings.ReplaceAll(docTags[i], oldLabel, newLabel); !gulu.Str.Contains(newTag, tmp) {
|
||||
tmp = append(tmp, newTag)
|
||||
}
|
||||
}
|
||||
node.SetIALAttr("tags", strings.Join(tmp, ","))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
nodeTags := node.ChildrenByType(ast.NodeTag)
|
||||
for _, nodeTag := range nodeTags {
|
||||
nodeLabels := nodeTag.ChildrenByType(ast.NodeText)
|
||||
for _, nodeLabel := range nodeLabels {
|
||||
if bytes.Contains(nodeLabel.Tokens, []byte(oldLabel)) {
|
||||
nodeLabel.Tokens = bytes.ReplaceAll(nodeLabel.Tokens, []byte(oldLabel), []byte(newLabel))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
util.PushEndlessProgress(fmt.Sprintf(Conf.Language(111), tree.Root.IALAttr("title")))
|
||||
if err = writeJSONQueue(tree); nil != err {
|
||||
util.ClearPushProgress(100)
|
||||
return
|
||||
}
|
||||
util.RandomSleep(50, 150)
|
||||
}
|
||||
|
||||
util.PushEndlessProgress(Conf.Language(113))
|
||||
sql.WaitForWritingDatabase()
|
||||
util.ReloadUI()
|
||||
return
|
||||
}
|
||||
|
||||
type TagBlocks []*Block
|
||||
|
||||
func (s TagBlocks) Len() int { return len(s) }
|
||||
func (s TagBlocks) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
func (s TagBlocks) Less(i, j int) bool { return s[i].ID < s[j].ID }
|
||||
|
||||
type Tag struct {
|
||||
Name string `json:"name"`
|
||||
Label string `json:"label"`
|
||||
Children Tags `json:"children"`
|
||||
Type string `json:"type"` // "tag"
|
||||
Depth int `json:"depth"`
|
||||
Count int `json:"count"`
|
||||
|
||||
tags Tags
|
||||
}
|
||||
|
||||
type Tags []*Tag
|
||||
|
||||
func BuildTags() (ret *Tags) {
|
||||
WaitForWritingFiles()
|
||||
sql.WaitForWritingDatabase()
|
||||
|
||||
ret = &Tags{}
|
||||
labels := labelTags()
|
||||
tags := Tags{}
|
||||
for label, _ := range labels {
|
||||
tags = buildTags(tags, strings.Split(label, "/"), 0)
|
||||
}
|
||||
appendTagChildren(&tags, labels)
|
||||
sortTags(tags)
|
||||
ret = &tags
|
||||
return
|
||||
}
|
||||
|
||||
func sortTags(tags Tags) {
|
||||
switch Conf.Tag.Sort {
|
||||
case util.SortModeNameASC:
|
||||
sort.Slice(tags, func(i, j int) bool {
|
||||
return util.PinYinCompare(util.RemoveEmoji(tags[i].Name), util.RemoveEmoji(tags[j].Name))
|
||||
})
|
||||
case util.SortModeNameDESC:
|
||||
sort.Slice(tags, func(j, i int) bool {
|
||||
return util.PinYinCompare(util.RemoveEmoji(tags[i].Name), util.RemoveEmoji(tags[j].Name))
|
||||
})
|
||||
case util.SortModeAlphanumASC:
|
||||
sort.Slice(tags, func(i, j int) bool {
|
||||
return natsort.Compare(util.RemoveEmoji((tags)[i].Name), util.RemoveEmoji((tags)[j].Name))
|
||||
})
|
||||
case util.SortModeAlphanumDESC:
|
||||
sort.Slice(tags, func(i, j int) bool {
|
||||
return natsort.Compare(util.RemoveEmoji((tags)[j].Name), util.RemoveEmoji((tags)[i].Name))
|
||||
})
|
||||
case util.SortModeRefCountASC:
|
||||
sort.Slice(tags, func(i, j int) bool { return (tags)[i].Count < (tags)[j].Count })
|
||||
case util.SortModeRefCountDESC:
|
||||
sort.Slice(tags, func(i, j int) bool { return (tags)[i].Count > (tags)[j].Count })
|
||||
default:
|
||||
sort.Slice(tags, func(i, j int) bool {
|
||||
return natsort.Compare(util.RemoveEmoji((tags)[i].Name), util.RemoveEmoji((tags)[j].Name))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func SearchTags(keyword string) (ret []string) {
|
||||
ret = []string{}
|
||||
|
||||
labels := labelBlocksByKeyword(keyword)
|
||||
for label, _ := range labels {
|
||||
_, t := search.MarkText(label, keyword, 1024, Conf.Search.CaseSensitive)
|
||||
ret = append(ret, t)
|
||||
}
|
||||
sort.Strings(ret)
|
||||
return
|
||||
}
|
||||
|
||||
func labelBlocksByKeyword(keyword string) (ret map[string]TagBlocks) {
|
||||
ret = map[string]TagBlocks{}
|
||||
|
||||
tags := sql.QueryTagSpansByKeyword(keyword, Conf.Search.Limit)
|
||||
set := hashset.New()
|
||||
for _, tag := range tags {
|
||||
set.Add(tag.BlockID)
|
||||
}
|
||||
var blockIDs []string
|
||||
for _, v := range set.Values() {
|
||||
blockIDs = append(blockIDs, v.(string))
|
||||
}
|
||||
sort.SliceStable(blockIDs, func(i, j int) bool {
|
||||
return blockIDs[i] > blockIDs[j]
|
||||
})
|
||||
|
||||
sqlBlocks := sql.GetBlocks(blockIDs)
|
||||
blockMap := map[string]*sql.Block{}
|
||||
for _, block := range sqlBlocks {
|
||||
blockMap[block.ID] = block
|
||||
}
|
||||
|
||||
for _, tag := range tags {
|
||||
label := tag.Content
|
||||
|
||||
parentSQLBlock := blockMap[tag.BlockID]
|
||||
block := fromSQLBlock(parentSQLBlock, "", 0)
|
||||
if blocks, ok := ret[label]; ok {
|
||||
blocks = append(blocks, block)
|
||||
ret[label] = blocks
|
||||
} else {
|
||||
ret[label] = []*Block{block}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func labelTags() (ret map[string]Tags) {
|
||||
ret = map[string]Tags{}
|
||||
|
||||
tagSpans := sql.QueryTagSpans("", 10240)
|
||||
for _, tagSpan := range tagSpans {
|
||||
label := tagSpan.Content
|
||||
if _, ok := ret[label]; ok {
|
||||
ret[label] = append(ret[label], &Tag{})
|
||||
} else {
|
||||
ret[label] = Tags{}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func appendTagChildren(tags *Tags, labels map[string]Tags) {
|
||||
for _, tag := range *tags {
|
||||
tag.Label = tag.Name
|
||||
tag.Count = len(labels[tag.Label]) + 1
|
||||
appendChildren0(tag, labels)
|
||||
sortTags(tag.Children)
|
||||
}
|
||||
}
|
||||
|
||||
func appendChildren0(tag *Tag, labels map[string]Tags) {
|
||||
sortTags(tag.tags)
|
||||
for _, t := range tag.tags {
|
||||
t.Label = tag.Label + "/" + t.Name
|
||||
t.Count = len(labels[t.Label]) + 1
|
||||
tag.Children = append(tag.Children, t)
|
||||
}
|
||||
for _, child := range tag.tags {
|
||||
appendChildren0(child, labels)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTags(root Tags, labels []string, depth int) Tags {
|
||||
if 1 > len(labels) {
|
||||
return root
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(root); i++ {
|
||||
if (root)[i].Name == labels[0] {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i == len(root) {
|
||||
root = append(root, &Tag{Name: html.EscapeHTMLStr(labels[0]), Type: "tag", Depth: depth})
|
||||
}
|
||||
depth++
|
||||
root[i].tags = buildTags(root[i].tags, labels[1:], depth)
|
||||
return root
|
||||
}
|
||||
289
kernel/model/template.go
Normal file
289
kernel/model/template.go
Normal file
|
|
@ -0,0 +1,289 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"text/template"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/88250/lute/render"
|
||||
"github.com/88250/protyle"
|
||||
"github.com/araddon/dateparse"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
sprig "github.com/Masterminds/sprig/v3"
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
)
|
||||
|
||||
func RenderCreateDocNameTemplate(nameTemplate string) (ret string, err error) {
|
||||
tpl, err := template.New("").Funcs(sprig.TxtFuncMap()).Parse(nameTemplate)
|
||||
if nil != err {
|
||||
return "", errors.New(fmt.Sprintf(Conf.Language(44), err.Error()))
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
buf.Grow(4096)
|
||||
err = tpl.Execute(buf, nil)
|
||||
if nil != err {
|
||||
return "", errors.New(fmt.Sprintf(Conf.Language(44), err.Error()))
|
||||
}
|
||||
ret = buf.String()
|
||||
return
|
||||
}
|
||||
|
||||
func SearchTemplate(keyword string) (ret []*Block) {
|
||||
templates := filepath.Join(util.DataDir, "templates")
|
||||
k := strings.ToLower(keyword)
|
||||
filepath.Walk(templates, func(path string, info fs.FileInfo, err error) error {
|
||||
name := strings.ToLower(info.Name())
|
||||
if !strings.HasSuffix(name, ".md") {
|
||||
return nil
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, ".") || "readme.md" == name {
|
||||
if info.IsDir() {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if strings.Contains(name, k) {
|
||||
content := strings.TrimPrefix(path, templates)
|
||||
content = strings.ReplaceAll(content, "templates"+string(os.PathSeparator), "")
|
||||
content = strings.TrimSuffix(content, ".md")
|
||||
content = filepath.ToSlash(content)
|
||||
content = content[1:]
|
||||
_, content = search.MarkText(content, keyword, 32, Conf.Search.CaseSensitive)
|
||||
b := &Block{Path: path, Content: content}
|
||||
ret = append(ret, b)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
func DocSaveAsTemplate(id string, overwrite bool) (code int, err error) {
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
var blocks []*ast.Node
|
||||
// 添加 block ial,后面格式化渲染需要
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering || !n.IsBlock() {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if ast.NodeBlockQueryEmbed == n.Type {
|
||||
if script := n.ChildByType(ast.NodeBlockQueryEmbedScript); nil != script {
|
||||
script.Tokens = bytes.ReplaceAll(script.Tokens, []byte("\n"), []byte(" "))
|
||||
}
|
||||
} else if ast.NodeHTMLBlock == n.Type {
|
||||
n.Tokens = bytes.TrimSpace(n.Tokens)
|
||||
// 使用 <div> 包裹,否则后续解析模板时会识别为行级 HTML https://github.com/siyuan-note/siyuan/issues/4244
|
||||
if !bytes.HasPrefix(n.Tokens, []byte("<div>")) {
|
||||
n.Tokens = append([]byte("<div>\n"), n.Tokens...)
|
||||
}
|
||||
if !bytes.HasSuffix(n.Tokens, []byte("</div>")) {
|
||||
n.Tokens = append(n.Tokens, []byte("\n</div>")...)
|
||||
}
|
||||
}
|
||||
|
||||
n.RemoveIALAttr("updated")
|
||||
if 0 < len(n.KramdownIAL) {
|
||||
blocks = append(blocks, n)
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
for _, block := range blocks {
|
||||
block.InsertAfter(&ast.Node{Type: ast.NodeKramdownBlockIAL, Tokens: parse.IAL2Tokens(block.KramdownIAL)})
|
||||
}
|
||||
|
||||
luteEngine := NewLute()
|
||||
formatRenderer := render.NewFormatRenderer(tree, luteEngine.RenderOptions)
|
||||
md := formatRenderer.Render()
|
||||
title := tree.Root.IALAttr("title")
|
||||
title = util.FilterFileName(title)
|
||||
title += ".md"
|
||||
savePath := filepath.Join(util.DataDir, "templates", title)
|
||||
if gulu.File.IsExist(savePath) {
|
||||
if !overwrite {
|
||||
code = 1
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
err = os.WriteFile(savePath, md, 0644)
|
||||
return
|
||||
}
|
||||
|
||||
func RenderTemplate(p, id string) (string, error) {
|
||||
return renderTemplate(p, id)
|
||||
}
|
||||
|
||||
func renderTemplate(p, id string) (string, error) {
|
||||
tree, err := loadTreeByBlockID(id)
|
||||
if nil != err {
|
||||
return "", err
|
||||
}
|
||||
|
||||
node := treenode.GetNodeInTree(tree, id)
|
||||
if nil == node {
|
||||
return "", ErrBlockNotFound
|
||||
}
|
||||
block := sql.BuildBlockFromNode(node, tree)
|
||||
md, err := os.ReadFile(p)
|
||||
if nil != err {
|
||||
return "", err
|
||||
}
|
||||
|
||||
dataModel := map[string]string{}
|
||||
var titleVar string
|
||||
if nil != block {
|
||||
titleVar = block.Name
|
||||
if "d" == block.Type {
|
||||
titleVar = block.Content
|
||||
}
|
||||
dataModel["title"] = titleVar
|
||||
dataModel["id"] = block.ID
|
||||
dataModel["name"] = block.Name
|
||||
dataModel["alias"] = block.Alias
|
||||
}
|
||||
|
||||
funcMap := sprig.TxtFuncMap()
|
||||
funcMap["queryBlocks"] = func(stmt string, args ...string) (ret []*sql.Block) {
|
||||
for _, arg := range args {
|
||||
stmt = strings.Replace(stmt, "?", arg, 1)
|
||||
}
|
||||
ret = sql.SelectBlocksRawStmt(stmt, Conf.Search.Limit)
|
||||
return
|
||||
}
|
||||
funcMap["querySpans"] = func(stmt string, args ...string) (ret []*sql.Span) {
|
||||
for _, arg := range args {
|
||||
stmt = strings.Replace(stmt, "?", arg, 1)
|
||||
}
|
||||
ret = sql.SelectSpansRawStmt(stmt, Conf.Search.Limit)
|
||||
return
|
||||
}
|
||||
funcMap["parseTime"] = func(dateStr string) time.Time {
|
||||
now := time.Now()
|
||||
ret, err := dateparse.ParseIn(dateStr, now.Location())
|
||||
if nil != err {
|
||||
util.LogWarnf("parse date [%s] failed [%s], return current time instead", dateStr, err)
|
||||
return now
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
goTpl := template.New("").Delims(".action{", "}")
|
||||
tpl, err := goTpl.Funcs(funcMap).Parse(gulu.Str.FromBytes(md))
|
||||
if nil != err {
|
||||
return "", errors.New(fmt.Sprintf(Conf.Language(44), err.Error()))
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
buf.Grow(4096)
|
||||
if err = tpl.Execute(buf, dataModel); nil != err {
|
||||
return "", errors.New(fmt.Sprintf(Conf.Language(44), err.Error()))
|
||||
}
|
||||
md = buf.Bytes()
|
||||
tree = parseKTree(md)
|
||||
if nil == tree {
|
||||
msg := fmt.Sprintf("parse tree [%s] failed", p)
|
||||
util.LogErrorf(msg)
|
||||
return "", errors.New(msg)
|
||||
}
|
||||
|
||||
var nodesNeedAppendChild []*ast.Node
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if "" != n.ID {
|
||||
// 重新生成 ID
|
||||
n.ID = ast.NewNodeID()
|
||||
n.SetIALAttr("id", n.ID)
|
||||
}
|
||||
|
||||
if (ast.NodeListItem == n.Type && (nil == n.FirstChild ||
|
||||
(3 == n.ListData.Typ && (nil == n.FirstChild.Next || ast.NodeKramdownBlockIAL == n.FirstChild.Next.Type)))) ||
|
||||
(ast.NodeBlockquote == n.Type && nil != n.FirstChild && nil != n.FirstChild.Next && ast.NodeKramdownBlockIAL == n.FirstChild.Next.Type) {
|
||||
nodesNeedAppendChild = append(nodesNeedAppendChild, n)
|
||||
}
|
||||
|
||||
appendRefTextRenderResultForBlockRef(n)
|
||||
return ast.WalkContinue
|
||||
})
|
||||
for _, n := range nodesNeedAppendChild {
|
||||
n.AppendChild(protyle.NewParagraph())
|
||||
}
|
||||
|
||||
// 折叠标题导出为模板后使用会出现内容重复 https://github.com/siyuan-note/siyuan/issues/4488
|
||||
ast.Walk(tree.Root, func(n *ast.Node, entering bool) ast.WalkStatus {
|
||||
if !entering {
|
||||
return ast.WalkContinue
|
||||
}
|
||||
|
||||
if "1" == n.IALAttr("heading-fold") { // 为标题折叠下方块添加属性,前端渲染以后会统一做移除处理
|
||||
n.SetIALAttr("status", "temp")
|
||||
}
|
||||
return ast.WalkContinue
|
||||
})
|
||||
|
||||
luteEngine := NewLute()
|
||||
dom := luteEngine.Tree2BlockDOM(tree, luteEngine.RenderOptions)
|
||||
return dom, nil
|
||||
}
|
||||
|
||||
func appendRefTextRenderResultForBlockRef(blockRef *ast.Node) {
|
||||
if ast.NodeBlockRef != blockRef.Type {
|
||||
return
|
||||
}
|
||||
|
||||
refText := blockRef.ChildByType(ast.NodeBlockRefText)
|
||||
if nil != refText {
|
||||
return
|
||||
}
|
||||
refText = blockRef.ChildByType(ast.NodeBlockRefDynamicText)
|
||||
if nil != refText {
|
||||
return
|
||||
}
|
||||
|
||||
// 动态解析渲染 ((id)) 的锚文本
|
||||
// 现行版本已经不存在该语法情况,这里保留是为了迁移历史数据
|
||||
refID := blockRef.ChildByType(ast.NodeBlockRefID)
|
||||
text := sql.GetRefText(refID.TokensStr())
|
||||
if Conf.Editor.BlockRefDynamicAnchorTextMaxLen < utf8.RuneCountInString(text) {
|
||||
text = gulu.Str.SubStr(text, Conf.Editor.BlockRefDynamicAnchorTextMaxLen) + "..."
|
||||
}
|
||||
blockRef.AppendChild(&ast.Node{Type: ast.NodeBlockRefDynamicText, Tokens: gulu.Str.ToBytes(text)})
|
||||
}
|
||||
1254
kernel/model/transaction.go
Normal file
1254
kernel/model/transaction.go
Normal file
File diff suppressed because it is too large
Load diff
88
kernel/model/tree.go
Normal file
88
kernel/model/tree.go
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io/fs"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/lute/parse"
|
||||
"github.com/88250/protyle"
|
||||
"github.com/siyuan-note/siyuan/kernel/filesys"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func loadTrees(localPath string) (ret []*parse.Tree) {
|
||||
luteEngine := NewLute()
|
||||
filepath.Walk(localPath, func(path string, info fs.FileInfo, err error) error {
|
||||
if info.IsDir() && strings.HasPrefix(info.Name(), ".") {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(info.Name(), ".sy") {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := filesys.NoLockFileRead(path)
|
||||
if nil != err {
|
||||
util.LogErrorf("get data [path=%s] failed: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
tree, err := protyle.ParseJSONWithoutFix(luteEngine, data)
|
||||
if nil != err {
|
||||
util.LogErrorf("parse json to tree [%s] failed: %s", path, err)
|
||||
return nil
|
||||
}
|
||||
ret = append(ret, tree)
|
||||
return nil
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
var ErrBoxNotFound = errors.New("notebook not found")
|
||||
var ErrBlockNotFound = errors.New("block not found")
|
||||
var ErrTreeNotFound = errors.New("tree not found")
|
||||
|
||||
func loadTreeByBlockID(id string) (ret *parse.Tree, err error) {
|
||||
if "" == id {
|
||||
return nil, ErrTreeNotFound
|
||||
}
|
||||
|
||||
bt := treenode.GetBlockTree(id)
|
||||
if nil == bt {
|
||||
return nil, ErrBlockNotFound
|
||||
}
|
||||
ret, err = LoadTree(bt.BoxID, bt.Path)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func LoadTree(boxID, p string) (*parse.Tree, error) {
|
||||
luteEngine := NewLute()
|
||||
tree, err := filesys.LoadTree(boxID, p, luteEngine)
|
||||
if nil != err {
|
||||
util.LogErrorf("load tree [%s] failed: %s", boxID+p, err)
|
||||
return nil, err
|
||||
}
|
||||
return tree, nil
|
||||
}
|
||||
65
kernel/model/updater.go
Normal file
65
kernel/model/updater.go
Normal file
|
|
@ -0,0 +1,65 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
var (
|
||||
checkUpdateLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func CheckUpdate(showMsg bool) {
|
||||
if !showMsg {
|
||||
return
|
||||
}
|
||||
|
||||
if "ios" == util.Container {
|
||||
if showMsg {
|
||||
util.PushMsg(Conf.Language(36), 5000)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
checkUpdateLock.Lock()
|
||||
defer checkUpdateLock.Unlock()
|
||||
|
||||
result, err := util.GetRhyResult(showMsg, Conf.System.NetworkProxy.String())
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
ver := result["ver"].(string)
|
||||
release := result["release"].(string)
|
||||
var msg string
|
||||
var timeout int
|
||||
if ver == util.Ver {
|
||||
msg = Conf.Language(10)
|
||||
timeout = 3000
|
||||
} else {
|
||||
msg = fmt.Sprintf(Conf.Language(9), "<a href=\""+release+"\">"+release+"</a>")
|
||||
showMsg = true
|
||||
timeout = 15000
|
||||
}
|
||||
if showMsg {
|
||||
util.PushMsg(msg, timeout)
|
||||
}
|
||||
}
|
||||
197
kernel/model/upload.go
Normal file
197
kernel/model/upload.go
Normal file
|
|
@ -0,0 +1,197 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/88250/gulu"
|
||||
"github.com/88250/lute/ast"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/siyuan-note/siyuan/kernel/sql"
|
||||
"github.com/siyuan-note/siyuan/kernel/treenode"
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
)
|
||||
|
||||
func InsertLocalAssets(id string, assetPaths []string) (succMap map[string]interface{}, err error) {
|
||||
succMap = map[string]interface{}{}
|
||||
|
||||
bt := treenode.GetBlockTree(id)
|
||||
if nil == bt {
|
||||
err = errors.New(Conf.Language(71))
|
||||
return
|
||||
}
|
||||
|
||||
docDirLocalPath := filepath.Join(util.DataDir, bt.BoxID, path.Dir(bt.Path))
|
||||
assets := getAssetsDir(filepath.Join(util.DataDir, bt.BoxID), docDirLocalPath)
|
||||
for _, p := range assetPaths {
|
||||
fName := filepath.Base(p)
|
||||
fName = util.FilterUploadFileName(fName)
|
||||
ext := filepath.Ext(fName)
|
||||
fName = strings.TrimSuffix(fName, ext)
|
||||
ext = strings.ToLower(ext)
|
||||
fName += ext
|
||||
baseName := fName
|
||||
if gulu.File.IsDir(p) {
|
||||
succMap[baseName] = "file://" + p
|
||||
continue
|
||||
}
|
||||
|
||||
var f *os.File
|
||||
f, err = os.Open(p)
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
var data []byte
|
||||
data, err = io.ReadAll(f)
|
||||
f.Close()
|
||||
if nil != err {
|
||||
return
|
||||
}
|
||||
|
||||
hash := fmt.Sprintf("%x", sha256.Sum256(data))
|
||||
if existAsset := sql.QueryAssetByHash(hash); nil != existAsset {
|
||||
// 已经存在同样数据的资源文件的话不重复保存
|
||||
succMap[baseName] = existAsset.Path
|
||||
} else {
|
||||
ext := path.Ext(fName)
|
||||
fName = fName[0 : len(fName)-len(ext)]
|
||||
fName = fName + "-" + ast.NewNodeID() + ext
|
||||
writePath := filepath.Join(assets, fName)
|
||||
if err = gulu.File.WriteFileSafer(writePath, data, 0644); nil != err {
|
||||
return
|
||||
}
|
||||
succMap[baseName] = "assets/" + fName
|
||||
}
|
||||
}
|
||||
IncWorkspaceDataVer()
|
||||
return
|
||||
}
|
||||
|
||||
func Upload(c *gin.Context) {
|
||||
ret := gulu.Ret.NewResult()
|
||||
defer c.JSON(200, ret)
|
||||
|
||||
form, err := c.MultipartForm()
|
||||
if nil != err {
|
||||
util.LogErrorf("insert asset failed: %s", err)
|
||||
ret.Code = -1
|
||||
ret.Msg = err.Error()
|
||||
return
|
||||
}
|
||||
assetsDirPath := filepath.Join(util.DataDir, "assets")
|
||||
if nil != form.Value["id"] {
|
||||
id := form.Value["id"][0]
|
||||
bt := treenode.GetBlockTree(id)
|
||||
if nil == bt {
|
||||
ret.Code = -1
|
||||
ret.Msg = Conf.Language(71)
|
||||
return
|
||||
}
|
||||
docDirLocalPath := filepath.Join(util.DataDir, bt.BoxID, path.Dir(bt.Path))
|
||||
assetsDirPath = getAssetsDir(filepath.Join(util.DataDir, bt.BoxID), docDirLocalPath)
|
||||
}
|
||||
if nil != form.Value["assetsDirPath"] {
|
||||
assetsDirPath = form.Value["assetsDirPath"][0]
|
||||
assetsDirPath = filepath.Join(util.DataDir, assetsDirPath)
|
||||
if err := os.MkdirAll(assetsDirPath, 0755); nil != err {
|
||||
ret.Code = -1
|
||||
ret.Msg = err.Error()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var errFiles []string
|
||||
succMap := map[string]interface{}{}
|
||||
files := form.File["file[]"]
|
||||
for _, file := range files {
|
||||
fName := file.Filename
|
||||
fName = util.FilterUploadFileName(fName)
|
||||
ext := filepath.Ext(fName)
|
||||
fName = strings.TrimSuffix(fName, ext)
|
||||
ext = strings.ToLower(ext)
|
||||
fName += ext
|
||||
baseName := fName
|
||||
f, err := file.Open()
|
||||
if nil != err {
|
||||
errFiles = append(errFiles, fName)
|
||||
ret.Msg = err.Error()
|
||||
break
|
||||
}
|
||||
|
||||
data, err := io.ReadAll(f)
|
||||
if nil != err {
|
||||
errFiles = append(errFiles, fName)
|
||||
ret.Msg = err.Error()
|
||||
break
|
||||
}
|
||||
f.Close()
|
||||
|
||||
hash := fmt.Sprintf("%x", sha256.Sum256(data))
|
||||
if existAsset := sql.QueryAssetByHash(hash); nil != existAsset {
|
||||
// 已经存在同样数据的资源文件的话不重复保存
|
||||
succMap[baseName] = existAsset.Path
|
||||
} else {
|
||||
_, id := util.LastID(fName)
|
||||
ext := path.Ext(fName)
|
||||
fName = fName[0 : len(fName)-len(ext)]
|
||||
if !util.IsIDPattern(id) {
|
||||
id = ast.NewNodeID()
|
||||
fName = fName + "-" + id + ext
|
||||
} else {
|
||||
if !util.IsIDPattern(fName) {
|
||||
fName = fName[:len(fName)-len(id)-1] + "-" + id + ext
|
||||
} else {
|
||||
fName = fName + ext
|
||||
}
|
||||
}
|
||||
writePath := filepath.Join(assetsDirPath, fName)
|
||||
if err = gulu.File.WriteFileSafer(writePath, data, 0644); nil != err {
|
||||
errFiles = append(errFiles, fName)
|
||||
ret.Msg = err.Error()
|
||||
break
|
||||
}
|
||||
succMap[baseName] = "assets/" + fName
|
||||
}
|
||||
}
|
||||
|
||||
ret.Data = map[string]interface{}{
|
||||
"errFiles": errFiles,
|
||||
"succMap": succMap,
|
||||
}
|
||||
|
||||
IncWorkspaceDataVer()
|
||||
}
|
||||
|
||||
func getAssetsDir(boxLocalPath, docDirLocalPath string) (assets string) {
|
||||
assets = filepath.Join(docDirLocalPath, "assets")
|
||||
if !gulu.File.IsExist(assets) {
|
||||
assets = filepath.Join(boxLocalPath, "assets")
|
||||
if !gulu.File.IsExist(assets) {
|
||||
assets = filepath.Join(util.DataDir, "assets")
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
55
kernel/model/widget.go
Normal file
55
kernel/model/widget.go
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
// SiYuan - Build Your Eternal Digital Garden
|
||||
// Copyright (c) 2020-present, b3log.org
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package model
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/siyuan-note/siyuan/kernel/util"
|
||||
|
||||
"github.com/siyuan-note/siyuan/kernel/search"
|
||||
)
|
||||
|
||||
func SearchWidget(keyword string) (ret []*Block) {
|
||||
ret = []*Block{}
|
||||
widgets := filepath.Join(util.DataDir, "widgets")
|
||||
dirs, err := os.ReadDir(widgets)
|
||||
if nil != err {
|
||||
util.LogErrorf("read dir [%s] failed: %s", widgets, err)
|
||||
return
|
||||
}
|
||||
|
||||
k := strings.ToLower(keyword)
|
||||
for _, dir := range dirs {
|
||||
name := strings.ToLower(dir.Name())
|
||||
if strings.HasPrefix(name, ".") {
|
||||
continue
|
||||
}
|
||||
|
||||
if strings.Contains(name, k) {
|
||||
name = dir.Name()
|
||||
if "" != keyword {
|
||||
_, name = search.MarkText(dir.Name(), keyword, 32, Conf.Search.CaseSensitive)
|
||||
}
|
||||
b := &Block{Content: name}
|
||||
ret = append(ret, b)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
Loading…
Add table
Add a link
Reference in a new issue