#4809 V20231018

Merged
ychao_1983 merged 273 commits from V20231018 into develop 6 months ago
  1. +5
    -5
      custom/conf/app.ini.sample
  2. +1
    -0
      entity/ai_task.go
  3. +29
    -0
      models/cloudbrain.go
  4. +20
    -0
      models/file_chunk.go
  5. +94
    -0
      models/llm_chat.go
  6. +97
    -0
      models/llm_chat_visit.go
  7. +2
    -0
      models/models.go
  8. +7
    -5
      models/resource_queue.go
  9. +32
    -0
      modules/baiduai/baiduai.go
  10. +72
    -0
      modules/baiduai/resty.go
  11. +12
    -0
      modules/cron/tasks_basic.go
  12. +56
    -0
      modules/llm_chat/llm_chat.go
  13. +578
    -0
      modules/llm_chat/resty.go
  14. +13
    -1
      modules/modelappservice/modelsevice.go
  15. +54
    -0
      modules/setting/setting.go
  16. +82
    -0
      modules/structs/llm_chat.go
  17. +11
    -2
      modules/templates/helper.go
  18. +5
    -1
      modules/urfs_client/objectstorage/mocks/objectstorage_mock.go
  19. +11
    -1
      options/locale/locale_en-US.ini
  20. +10
    -1
      options/locale/locale_zh-CN.ini
  21. +32
    -5
      routers/api/v1/api.go
  22. +327
    -0
      routers/api/v1/llm_chat/llm_chat.go
  23. +32
    -1
      routers/api/v1/repo/attachments.go
  24. +51
    -2
      routers/modelapp/wenxin.go
  25. +30
    -22
      routers/repo/attachment_model.go
  26. +99
    -1
      routers/repo/flow_control.go
  27. +1
    -1
      routers/repo/grampus.go
  28. +1
    -0
      routers/response/response_list.go
  29. +5
    -3
      routers/routes/routes.go
  30. +4
    -2
      services/ai_task_service/cluster/c2net.go
  31. +8
    -2
      services/ai_task_service/cluster/cloudbrain_two.go
  32. +1
    -0
      services/ai_task_service/task/cloudbrain_one_notebook_task.go
  33. +1
    -0
      services/ai_task_service/task/cloudbrain_two_notebook_task.go
  34. +1
    -0
      services/ai_task_service/task/grampus_notebook_task.go
  35. +18
    -0
      services/ai_task_service/task/opt_handler.go
  36. +45
    -0
      services/ai_task_service/task/task_service.go
  37. +564
    -0
      services/llm_chat/llm_chat.go
  38. +5
    -1
      templates/admin/cloudbrain/imagecommit.tmpl
  39. +10
    -12
      templates/base/head_navbar.tmpl
  40. +10
    -12
      templates/base/head_navbar_fluid.tmpl
  41. +10
    -12
      templates/base/head_navbar_home.tmpl
  42. +10
    -12
      templates/base/head_navbar_pro.tmpl
  43. +8
    -0
      templates/model/llmchat/create/index.tmpl
  44. +12
    -0
      templates/model/llmchat/index.tmpl
  45. +5
    -1
      templates/repo/cloudbrain/image/edit.tmpl
  46. +6
    -1
      templates/repo/cloudbrain/image/submit.tmpl
  47. +1
    -1
      templates/user/dashboard/dashboard.tmpl
  48. +1
    -1
      templates/wenxin_privacy.tmpl
  49. +2
    -2
      web_src/js/index.js
  50. +30
    -0
      web_src/less/_model.less
  51. +1
    -0
      web_src/vuepages/apis/modules/common.js
  52. +132
    -0
      web_src/vuepages/apis/modules/llmchat.js
  53. +3
    -1
      web_src/vuepages/apis/modules/modelbase.js
  54. +19
    -4
      web_src/vuepages/components/cloudbrain/details/ConfigInfo.vue
  55. +68
    -1
      web_src/vuepages/langs/config/en-US.js
  56. +69
    -2
      web_src/vuepages/langs/config/zh-CN.js
  57. +92
    -92
      web_src/vuepages/pages/cloudbrain/configs.js
  58. +13
    -5
      web_src/vuepages/pages/cloudbrain/create/index.vue
  59. +246
    -0
      web_src/vuepages/pages/model/llms/App.vue
  60. +96
    -0
      web_src/vuepages/pages/model/llms/componenes/SessionWindow.vue
  61. +118
    -0
      web_src/vuepages/pages/model/llms/componenes/WindowAssistant.vue
  62. +39
    -0
      web_src/vuepages/pages/model/llms/componenes/WindowUser.vue
  63. +119
    -0
      web_src/vuepages/pages/model/llms/componenes/createKbDialog.vue
  64. +389
    -0
      web_src/vuepages/pages/model/llms/componenes/dialogLeft.vue
  65. +673
    -0
      web_src/vuepages/pages/model/llms/componenes/dialogRight.vue
  66. +126
    -0
      web_src/vuepages/pages/model/llms/componenes/headerModel.vue
  67. +117
    -0
      web_src/vuepages/pages/model/llms/create/index.vue
  68. +22
    -0
      web_src/vuepages/pages/model/llms/create/vp-llms-create.js
  69. +22
    -0
      web_src/vuepages/pages/model/llms/vp-model-llms.js
  70. +3
    -3
      web_src/vuepages/pages/model/wenxin/index.vue
  71. +2
    -1
      web_src/vuepages/pages/modelbase/home/index.vue
  72. +1
    -0
      web_src/vuepages/pages/modelbase/model/index.vue
  73. +27
    -33
      web_src/vuepages/pages/modelmanage/local/index.vue
  74. +10
    -3
      web_src/vuepages/pages/modelmanage/settings/index.vue
  75. +1
    -1
      web_src/vuepages/pages/resources/scene/index.vue
  76. +1
    -1
      web_src/vuepages/pages/resources/specification/index.vue

+ 5
- 5
custom/conf/app.ini.sample View File

@@ -394,12 +394,12 @@ MAX_OPEN_CONNS = 0
DB_TYPE = postgres
HOST = 127.0.0.1:5432
NAME = statistic
USER =
PASSWD =
USER =
PASSWD =
SCHEMA =
SSL_MODE = disable
CHARSET = utf8
PATH =
PATH =

[indexer]
; Issue indexer type, currently support: bleve, db or elasticsearch, default is bleve
@@ -1150,5 +1150,5 @@ growth_comments=0.2

[grampus]
USERNAME =
PASSWORD =
SERVER_HOST =
PASSWORD =
SERVER_HOST =

+ 1
- 0
entity/ai_task.go View File

@@ -143,6 +143,7 @@ type AITaskDetailInfo struct {
FailedReason string `json:"failed_reason"`
UserId int64 `json:"-"`
AppName string `json:"app_name"`
HasInternet int `json:"has_internet"`
}

func (a *AITaskDetailInfo) Tr(language string) {


+ 29
- 0
models/cloudbrain.go View File

@@ -310,6 +310,7 @@ type Cloudbrain struct {
Spec *Specification `xorm:"-"`
Config *CloudbrainConfig `xorm:"-"`
AppName string //超算任务的应用类型
HasInternet int
}

type CloudbrainShow struct {
@@ -3452,6 +3453,34 @@ func GetCloudbrainByIDs(ids []int64) ([]*Cloudbrain, error) {
Find(&cloudbrains)
}

type CountPerUserID struct {
Count int64
UserID int64
}

func GetNotebookCountGreaterThanN(n int) ([]CountPerUserID, error) {
cpuis := []CountPerUserID{}
err := x.
Table("cloudbrain").
GroupBy("user_id").Having("count(*)>"+strconv.Itoa(n)).
Select("user_id, count(*) AS count").
Where("job_type=? and (deleted_at=? or deleted_at is NULL)", "DEBUG", "0001-01-01 00:00:00").OrderBy("count(*) desc").
Find(&cpuis)
return cpuis, err

}
func GetNotebooksByUser(uid int64, offset int) ([]int64, error) {
var ints []int64
err := x.Table("cloudbrain").Cols("id").Where("job_type=? and user_id=? and (deleted_at=? or deleted_at is NULL)", "DEBUG", uid, "0001-01-01 00:00:00").Desc("id").Limit(1000, offset).Find(&ints)
return ints, err
}

func GetNotebooksCountByUser(uid int64) (int64, error) {
cloudbrain := new(Cloudbrain)
return x.Where("user_id=? and job_type=?", uid, "DEBUG").Count(cloudbrain)

}

func GetCloudbrainWithDeletedByIDs(ids []int64) ([]*Cloudbrain, error) {
cloudbrains := make([]*Cloudbrain, 0)
return cloudbrains, x.


+ 20
- 0
models/file_chunk.go View File

@@ -145,6 +145,26 @@ func getModelFileChunkByUUID(e Engine, uuid string) (*ModelFileChunk, error) {
return fileChunk, nil
}

func GetModelFileChunksByUserId(userId int64, lastTime int64, isUploadFinished bool) ([]*ModelFileChunk, error) {
return getModelFileChunksByUserId(x, userId, lastTime, isUploadFinished)
}

func getModelFileChunksByUserId(e Engine, userId int64, lastTime int64, isUploadFinished bool) ([]*ModelFileChunk, error) {
fileChunks := make([]*ModelFileChunk, 0)
cond := builder.NewCond()
cond = cond.And(builder.Eq{"user_id": userId})
if lastTime > 0 {
cond = cond.And(builder.Gte{"created_unix": lastTime})
}
if !isUploadFinished {
cond = cond.And(builder.Eq{"is_uploaded": 0})
}
if err := e.Where(cond).Find(&fileChunks); err != nil {
return nil, err
}
return fileChunks, nil
}

// InsertFileChunk insert a record into file_chunk.
func InsertFileChunk(fileChunk *FileChunk) (_ *FileChunk, err error) {
if _, err := x.Insert(fileChunk); err != nil {


+ 94
- 0
models/llm_chat.go View File

@@ -0,0 +1,94 @@
package models

import (
"code.gitea.io/gitea/modules/timeutil"
"fmt"
)

type LlmChat struct {
ID string `xorm:"pk"`
UserId int64 `xorm:"INDEX"`
Count int
Prompt string `xorm:"text"`
Answer string `xorm:"text"`
InvalidCount int
InvalidType string
InvalidTool string
InvalidDetail string `xorm:"text"`
ChatStatus int
ModelName string
ChatType string
ChatId string
KnowledgeBaseName string
VectorStoreType string
EmbeddingModel string
Endpoint string
CreatedUnix timeutil.TimeStamp `xorm:"created"`
UpdatedUnix timeutil.TimeStamp `xorm:"updated"`
}

func SaveChat(llmChat *LlmChat) error {
sess := xStatistic.NewSession()
defer sess.Close()
re, err := sess.Insert(llmChat)
if err != nil {
fmt.Printf("insert llmChat error %s\n", err.Error())
return err
}
fmt.Printf("success to save llmChat db.re=%+v\n", fmt.Sprint(re))
return nil
}

func QueryChatCount(userId int64, modelName string) int64 {
sess := xStatistic.NewSession()
defer sess.Close()
query := "SELECT SUM(count) AS count FROM public.llm_chat WHERE chat_status = 1 AND user_id = ? AND model_name = ?"
sumList, err := sess.QueryInterface(query, userId, modelName)
if err == nil {
if len(sumList) == 1 {
return convertInterfaceToInt64(sumList[0]["count"])
}
}
return 0
}

func QueryInvalidPromptCount(userId int64) int64 {
sess := xStatistic.NewSession()
defer sess.Close()
query := "SELECT SUM(invalid_count) AS count FROM public.llm_chat WHERE invalid_type='prompt' and user_id = ?"
sumList, err := sess.QueryInterface(query, userId)
if err == nil {
if len(sumList) == 1 {
return convertInterfaceToInt64(sumList[0]["count"])
}
}
return 0
}

func QueryChatStatistics() ([]map[string]interface{}, error) {
sess := xStatistic.NewSession()
query := `
SELECT
COALESCE(model_name, 'total') as model_name,
COUNT(DISTINCT id) as chat,
COUNT(DISTINCT user_id) as chat_user,
COUNT(DISTINCT id) / COUNT(DISTINCT user_id) as chat_per_user_avg,
COUNT(DISTINCT CASE WHEN chat_type = 'llm' THEN id END) as chat_llm,
COUNT(DISTINCT CASE WHEN chat_type = 'llm' THEN user_id END) as chat_llm_user,
COUNT(DISTINCT CASE WHEN chat_type = 'kb' THEN id END) as chat_kb,
COUNT(DISTINCT CASE WHEN chat_type = 'kb' THEN user_id END) as chat_kb_user,
COALESCE(SUM(invalid_count),0) as chat_illegal,
COALESCE(SUM(CASE WHEN invalid_type = 'prompt' THEN 1 END),0) as chat_illegal_prompt,
COALESCE(SUM(CASE WHEN invalid_type = 'answer' THEN 1 END),0) as chat_illegal_answer
FROM
llm_chat
GROUP BY
ROLLUP(model_name)`

results, err := sess.SQL(query).QueryInterface()
if err != nil {
return nil, err
}

return results, nil
}

+ 97
- 0
models/llm_chat_visit.go View File

@@ -0,0 +1,97 @@
package models

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/timeutil"
"fmt"
)

type LlmChatVisit struct {
ID int64 `xorm:"pk autoincr"`
UserId int64 `xorm:"INDEX"`
ChatId string
ModelName string
Agreement int
ExpiredTime string
ExpiredUnix int64
CreatedUnix timeutil.TimeStamp `xorm:"created"`
}

func SaveVisit(llmChatVisit *LlmChatVisit) error {
sess := xStatistic.NewSession()
defer sess.Close()
re, err := sess.Insert(llmChatVisit)
if err != nil {
log.Info("insert llmChatVisit error %s\n", err.Error())
return err
}
log.Info("success to save llmChatVisit db.re=%+v\n", fmt.Sprint(re))
return nil
}

func QueryFirstVisit(userId int64) int64 {
sess := xStatistic.NewSession()
defer sess.Close()
query := "SELECT SUM(agreement) AS count FROM public.llm_chat_visit WHERE user_id = ?"
sumList, err := sess.QueryInterface(query, userId)
if err == nil {
if len(sumList) == 1 {
val := convertInterfaceToInt64(sumList[0]["count"])
return val
}
}
return 0
}

func QueryRunningChat(userId int64, modelName string, currentTime int64) (*LlmChatVisit, error) {
sess := xStatistic.NewSession()
defer sess.Close()
re := new(LlmChatVisit)
isExist, err := sess.Table(new(LlmChatVisit)).Where("user_id = ? AND model_name = ? AND ? > created_unix AND ? < expired_unix", userId, modelName, currentTime, currentTime).Get(re)
if err == nil && isExist {
return re, nil
}
return nil, err
}

func QueryByChatId(chatId string) (*LlmChatVisit, error) {
sess := xStatistic.NewSession()
defer sess.Close()
re := new(LlmChatVisit)
isExist, err := sess.Table(new(LlmChatVisit)).Where("chat_id = ?", chatId).Get(re)
if err == nil && isExist {
return re, nil
}
return nil, err
}

func UpdateChat(llmChatVisit *LlmChatVisit) error {
sess := xStatistic.ID(llmChatVisit.ID)
defer sess.Close()
re, err := sess.Cols("agreement").Update(llmChatVisit)
if err != nil {
return err
}
log.Info("update llmChatVisit db.re=" + fmt.Sprint(re))
return nil
}

func QueryChatVisitStatistics() ([]map[string]interface{}, error) {
sess := xStatistic.NewSession()
query := `
SELECT
COALESCE(model_name, 'total') as model_name,
COUNT(DISTINCT chat_id) AS visit,
COUNT(DISTINCT user_id) AS visit_user
FROM
llm_chat_visit
GROUP BY
ROLLUP(model_name)`

results, err := sess.SQL(query).QueryInterface()
if err != nil {
return nil, err
}

return results, nil
}

+ 2
- 0
models/models.go View File

@@ -198,6 +198,8 @@ func init() {
new(CloudbrainDurationStatistic),
new(UserSummaryCurrentYear),
new(ModelApp),
new(LlmChat),
new(LlmChatVisit),
)

gonicNames := []string{"SSL", "UID"}


+ 7
- 5
models/resource_queue.go View File

@@ -100,11 +100,13 @@ type ResourceQueueListRes struct {
}

type ResourceQueueCodesRes struct {
ID int64
QueueCode string
Cluster string
AiCenterCode string
AiCenterName string
ID int64
QueueCode string
Cluster string
AiCenterCode string
AiCenterName string
ComputeResource string
AccCardType string
}

func (ResourceQueueCodesRes) TableName() string {


+ 32
- 0
modules/baiduai/baiduai.go View File

@@ -0,0 +1,32 @@
package baiduai

type LegalTextResponse struct {
Conclusion string `json:"conclusion"`
LogId string `json:"log_id"`
IsHitMd5 bool `json:"isHitMd5"`
ConclusionType int `json:"conclusionType"`
Data []Data `json:"data"`
}

type Data struct {
Msg string `json:"msg"`
Conclusion string `json:"conclusion"`
SubType int `json:"subType"`
ConclusionType int `json:"conclusionType"`
Type int `json:"type"`
Hits []Hit `json:"hits"`
}

type Hit struct {
Probability int `json:"probability"`
DatasetName string `json:"datasetName"`
Words []string `json:"words"`
ModelHitPositions [][]float64 `json:"modelHitPositions"`
WordHitPositions []WordHitPosition `json:"wordHitPositions"`
}

type WordHitPosition struct {
Positions [][]int `json:"positions"`
Label string `json:"label"`
Keyword string `json:"keyword"`
}

+ 72
- 0
modules/baiduai/resty.go View File

@@ -0,0 +1,72 @@
package baiduai

import (
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"strings"
)

/**
* 使用 AK,SK 生成鉴权签名(Access Token)
* @return string 鉴权签名信息(Access Token)
*/
func getAccessToken() string {
postData := fmt.Sprintf("grant_type=client_credentials&client_id=%s&client_secret=%s", setting.BAIDU_AI.API_KEY, setting.BAIDU_AI.SECRET_KEY)
resp, err := http.Post(setting.BAIDU_AI.URL, "application/x-www-form-urlencoded", strings.NewReader(postData))
if err != nil {
fmt.Println(err)
return ""
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
fmt.Println(err)
return ""
}
accessTokenObj := map[string]string{}
json.Unmarshal([]byte(body), &accessTokenObj)
return accessTokenObj["access_token"]
}

/**
* 百度api文本内容合规检测
*/
func CheckLegalText(query string) (*LegalTextResponse, error) {
var result LegalTextResponse

url := setting.BAIDU_AI.LEGAL_TEXT_URL + getAccessToken()
payload := strings.NewReader("text=" + query)
log.Info("resty CheckLegalText() payload %+v", payload)
client := &http.Client{}

req, err := http.NewRequest("POST", url, payload)
if err != nil {
log.Error("resty CheckLegalText() Request error: %s", err.Error())
return &result, fmt.Errorf("resty CheckLegalText(): %s", err)
}
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
req.Header.Add("Accept", "application/json")

res, err := client.Do(req)
if err != nil {
log.Error("resty CheckLegalText() Response error: %s", err.Error())
return &result, fmt.Errorf("resty CheckLegalText(): %s", err)
}
defer res.Body.Close()
log.Error("resty CheckLegalText() Response status: %s\n", res.Status)

body, err := ioutil.ReadAll(res.Body)
if err != nil {
log.Error("resty CheckLegalText() Response body error: %s", err.Error())
return &result, fmt.Errorf("resty CheckLegalText(): %s", err)
}

response := string(body)
json.Unmarshal([]byte(response), &result)
log.Info("resty CheckLegalText() results: %+v\n", result)
return &result, nil
}

+ 12
- 0
modules/cron/tasks_basic.go View File

@@ -209,6 +209,17 @@ func registerHandleClearCloudbrainResult() {
})
}

func registerHandleClearNotebook() {
RegisterTaskFatal("handle_notebook_clear", &BaseConfig{
Enabled: true,
RunAtStart: setting.NotebookStrategy.RunAtStart,
Schedule: setting.NotebookStrategy.Cron,
}, func(ctx context.Context, _ *models.User, _ Config) error {
task.ClearNotebook()
return nil
})
}

func registerHandleSummaryStatistic() {
RegisterTaskFatal("handle_summary_statistic", &BaseConfig{
Enabled: true,
@@ -379,6 +390,7 @@ func initBasicTasks() {
registerHandleRepoAndUserStatistic()
registerHandleSummaryStatistic()
registerHandleClearCloudbrainResult()
registerHandleClearNotebook()

registerSyncCloudbrainStatus()
registerHandleOrgStatistic()


+ 56
- 0
modules/llm_chat/llm_chat.go View File

@@ -0,0 +1,56 @@
package llm_chat

type LLMChatResponse struct {
Answer string `json:"answer"`
}

type KBChatResponse struct {
Answer string `json:"answer"`
Docs []string `json:"docs"`
}

type SearchDocResponse struct {
Results []SearchDocResult `json:"results"`
}

type SearchDocResult struct {
PageContent string `json:"page_content"`
Metadata struct {
} `json:"metadata"`
Score float64 `json:"score"`
}

type RecreateVectorStoreResponse struct {
Code int `json:"code"`
Msg string `json:"msg"`
Total int `json:"total"`
Finished int `json:"finished"`
Doc string `json:"doc"`
}

type LLMBasicMsgWithData struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data []string `json:"data"`
}

type LLMDeleteDocMsg struct {
Code int `json:"code"`
Msg string `json:"msg"`
Data struct {
FailedFiles map[string]string `json:"failed_files"`
} `json:"data"`
}

type LLMBasicMsg struct {
Code int `json:"code"`
Msg string `json:"msg"`
}

type LLMErrorMsg struct {
Detail []struct {
Loc []interface{} `json:"loc"`
Msg string `json:"msg"`
Type string `json:"type"`
} `json:"detail"`
}

+ 578
- 0
modules/llm_chat/resty.go View File

@@ -0,0 +1,578 @@
package llm_chat

import (
"bufio"
"bytes"
"code.gitea.io/gitea/modules/log"
constants "code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"crypto/tls"
"encoding/json"
"fmt"
"github.com/go-resty/resty/v2"
"io"
"mime/multipart"
"net/http"
"unicode/utf8"
)

var (
restyClient *resty.Client
)

const (
urlLLMChat = "/chat/chat"
urlKnowledgeBaseChat = "/chat/knowledge_base_chat"
urlKnowledgeBaseList = "/knowledge_base/list_knowledge_bases"
urlKnowledgeBaseCreate = "/knowledge_base/create_knowledge_base"
urlKnowledgeBaseDelete = "/knowledge_base/delete_knowledge_base"
urlKnowledgeBaseListFiles = "/knowledge_base/list_files"
urlKnowledgeBaseSearchDoc = "/knowledge_base/search_docs"
urlKnowledgeBaseUploadDoc = "/knowledge_base/upload_docs"
urlKnowledgeBaseDeleteDoc = "/knowledge_base/delete_docs"
urlKnowledgeBaseUpdateDoc = "/knowledge_base/update_docs"
urlKnowledgeBaseDownload = "/knowledge_base/download_doc"
urlKnowledgeBaseRecreate = "/knowledge_base/recreate_vector_store"
)

func GetEndpoint(modelName string) string {
//endpoint := constants.LLM_CHAT_API.HOST + ":"
var endpoint string
switch modelName {
case "chatglm2-6b":
endpoint = constants.LLM_CHAT_API.CHATGLM2_HOST
case "llama2-7b-chat-hf":
endpoint = constants.LLM_CHAT_API.LLAMA2_HOST
default:
endpoint = constants.LLM_CHAT_API.CHATGLM2_HOST
}

return endpoint
}

func getRestyClient() *resty.Client {
if restyClient == nil {
restyClient = resty.New()
restyClient.SetTLSClientConfig(&tls.Config{InsecureSkipVerify: true})
}
return restyClient
}

// Custom function to split by character
func scanByCharacter(data []byte, atEOF bool) (int, []byte, error) {
if len(data) == 0 {
return 0, nil, nil
}
return 1, data[:1], nil
}

func StreamLLMChat(params api.LLMChatMessage, resultChan chan string, errChan chan error, done chan struct{}) {
client := http.Client{}
endpoint := GetEndpoint(params.ModelName)
requestBody, _ := json.Marshal(params)
log.Info("Request body: %s\n", requestBody)
request, err := http.NewRequest("POST", endpoint+urlLLMChat, bytes.NewBuffer(requestBody))
if err != nil {
log.Error("Error creating request: %v", err)
errChan <- err
return
}
request.Header.Set("Content-Type", "application/json")
resp, err := client.Do(request)
if err != nil {
log.Error("Error sending request: %v", err)
errChan <- err
return
}
defer resp.Body.Close()
log.Info("Response status: %s\n", resp.Status)

scanner := bufio.NewScanner(resp.Body)
scanner.Split(scanByCharacter)
var invalidCharBuffer string
for scanner.Scan() {
char := scanner.Text()
if len(invalidCharBuffer) > 0 {
char = invalidCharBuffer + char
invalidCharBuffer = ""
}
if utf8.ValidString(char) {
//runes := []rune(char)
//log.Info("%s -> %U \n", char, runes[0])
resultChan <- char
} else {
invalidCharBuffer += char
}
}
if len(invalidCharBuffer) > 0 {
log.Info("Unprocessed invalid UTF-8 characters: %s\n", invalidCharBuffer)
}
close(done)

if scanner.Err() != nil {
errChan <- scanner.Err()
}
}

func StreamKBChat(params api.KBChatMessage, resultChan chan string, errChan chan error, done chan struct{}) {
client := http.Client{}
endpoint := GetEndpoint(params.ModelName)
requestBody, _ := json.Marshal(params)
log.Info("Request body: %s\n", requestBody)
request, err := http.NewRequest("POST", endpoint+urlKnowledgeBaseChat, bytes.NewBuffer(requestBody))
if err != nil {
log.Error("Error creating request: %v", err)
errChan <- err
return
}
request.Header.Set("Content-Type", "application/json")
resp, err := client.Do(request)
if err != nil {
log.Error("Error sending request: %v", err)
errChan <- err
return
}
defer resp.Body.Close()
log.Info("Response status: %s\n", resp.Status)

//Create a buffer to read 2048-byte blocks
buffer := make([]byte, 4096)
for {
// Read a 4096-byte block from the response body
n, err := resp.Body.Read(buffer)
if err != nil {
if err != io.EOF {
errChan <- err
}
break
}
resultChan <- string(buffer[:n])
}
close(done)
}

func SendLLMChat(params api.LLMChatMessage) (*LLMChatResponse, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint(params.ModelName)

request, _ := json.Marshal(params)
log.Info("resty request body: %s", request)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
Post(endpoint + urlLLMChat)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

result := LLMChatResponse{
Answer: res.String(),
}
log.Info("resty response: %+v", result)

if err != nil {
return &result, fmt.Errorf("resty SendLLMChat(): %s", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func SendKBChat(params api.KBChatMessage) (*KBChatResponse, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint(params.ModelName)
var result KBChatResponse

request, _ := json.Marshal(params)
log.Info("resty request body: %s", request)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
Post(endpoint + urlKnowledgeBaseChat)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

response := res.String()
json.Unmarshal([]byte(response), &result)
log.Info("resty response: %+v", result)

if err != nil {
return &result, fmt.Errorf("resty SendLLMChat(): %s", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func ListKnowledgeBase() (*LLMBasicMsgWithData, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result LLMBasicMsgWithData

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetResult(&result).
Get(endpoint + urlKnowledgeBaseList)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

response, _ := json.Marshal(result)
log.Info("resty response: %s", response)

if err != nil {
return &result, fmt.Errorf("resty ListKnowledgeBase(): %s", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func CreateKnowledgeBase(params api.CreateKnowledgeBaseParams) (*LLMBasicMsg, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result LLMBasicMsg

request, _ := json.Marshal(params)
log.Info("resty request body: %s", request)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
SetResult(&result).
Post(endpoint + urlKnowledgeBaseCreate)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

response, _ := json.Marshal(result)
log.Info("resty response: %s", response)

if err != nil {
return &result, fmt.Errorf("resty CreateKnowledgeBase(): %s", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func DeleteKnowledgeBase(knowledgeBaseName string) (*LLMBasicMsgWithData, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result LLMBasicMsgWithData

log.Info("resty request body: %s", knowledgeBaseName)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/text").
SetBody(knowledgeBaseName).
SetResult(&result).
Post(endpoint + urlKnowledgeBaseDelete)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

response, _ := json.Marshal(result)
log.Info("resty response: %s", response)

if err != nil {
log.Error("resty DeleteKnowledgeBase(): %s", err)
return &result, fmt.Errorf("resty DeleteKnowledgeBase(): %s", err)
}
if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func KBListFiles(knowledgeBaseName string) (*LLMBasicMsgWithData, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result LLMBasicMsgWithData

log.Info("resty request body: %s", knowledgeBaseName)

sendjob:
res, err := client.R().
SetQueryParams(map[string]string{
"knowledge_base_name": knowledgeBaseName,
}).
SetHeader("Content-Type", "application/text").
SetBody(knowledgeBaseName).
SetResult(&result).
Get(endpoint + urlKnowledgeBaseListFiles)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

response, _ := json.Marshal(result)
log.Info("resty response: %s", response)

if err != nil {
log.Error("resty KBListFiles(): %s", err)
return &result, fmt.Errorf("resty KBListFiles(): %s", err)
}
if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func KBSearchDoc(params api.SearchDocParams) (*SearchDocResponse, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result []SearchDocResult

log.Info("resty request body: %+v", params)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
SetResult(&result).
Post(endpoint + urlKnowledgeBaseSearchDoc)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

resultAPI := SearchDocResponse{
Results: result,
}
response, _ := json.Marshal(resultAPI)
log.Info("resty response: %s", response)

if err != nil {
log.Error("resty KBListFiles(): %s", err)
return &resultAPI, fmt.Errorf("resty KBListFiles(): %s", err)
}
if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &resultAPI, nil
}

func KBDeleteDoc(params api.DeleteDocParams) (interface{}, error) { // *LLMDeleteDocMsg, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result LLMDeleteDocMsg

request, _ := json.Marshal(params)
log.Info("resty request body: %s", request)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
SetResult(&result).
Post(endpoint + urlKnowledgeBaseDeleteDoc)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)
if err != nil {
var errResult LLMErrorMsg
json.Unmarshal([]byte(res.String()), &errResult)
return &errResult, fmt.Errorf("resty KBDeleteDoc(): %s", err)
}

response, _ := json.Marshal(result)
log.Info("resty response: %s", response)

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func KBUpdateDoc(params api.UpdateDocParams) (*LLMBasicMsg, error) {
client := getRestyClient()
retry := 0
endpoint := GetEndpoint("")
var result LLMBasicMsg

request, _ := json.Marshal(params)
log.Info("resty request body: %s", request)

sendjob:
res, err := client.R().
SetHeader("Content-Type", "application/json").
SetBody(params).
SetResult(&result).
Post(endpoint + urlKnowledgeBaseUpdateDoc)

log.Info("resty status: %+v, route: %+v", res.StatusCode(), res.Request.URL)

response, _ := json.Marshal(result)
log.Info("resty response: %s", response)

if err != nil {
return &result, fmt.Errorf("resty KBDeleteDoc(): %s", err)
}

if res.StatusCode() == http.StatusUnauthorized && retry < 1 {
retry++
goto sendjob
}
return &result, nil
}

func KBRecreateVectorStore(params api.RecreateVectorStoreParams, resultChan chan string, errChan chan error, done chan struct{}) {
client := http.Client{}
endpoint := GetEndpoint("")
requestBody, _ := json.Marshal(params)
log.Info("Request body: %s\n", requestBody)
request, err := http.NewRequest("POST", endpoint+urlKnowledgeBaseRecreate, bytes.NewBuffer(requestBody))
if err != nil {
log.Error("Error creating request: %v", err)
errChan <- err
return
}

request.Header.Set("Content-Type", "application/json")
resp, err := client.Do(request)
if err != nil {
log.Error("Error sending request: %v", err)
errChan <- err
return
}
defer resp.Body.Close()
log.Info("Response status: %s\n", resp.Status)

// Create a buffer to read 2048-byte blocks
buffer := make([]byte, 4096)
for {
// Read a 2048-byte block from the response body
n, err := resp.Body.Read(buffer)
if err != nil {
if err != io.EOF {
errChan <- err
}
break
}
resultChan <- string(buffer[:n])
}
close(done)
}

func GetUploadDocUrl() (string, error) {
endpoint := GetEndpoint("") + urlKnowledgeBaseUploadDoc
log.Info("resty GetUploadDocUrl: %s", endpoint)
return endpoint, nil
}

func writeDocs(fileHeader *multipart.FileHeader, writer *multipart.Writer) error {
filename := fileHeader.Filename
file, err := fileHeader.Open()
if err != nil {
log.Error(err.Error())
return err
}
defer file.Close()
part, err := writer.CreateFormFile("files", filename)
if err != nil {
log.Error("Error creating form file:", err)
return err
}
_, err = io.Copy(part, file)
return nil
}

func writeBody(requestBody *bytes.Buffer, form api.LLMChatUploadForm) (string, error) {
writer := multipart.NewWriter(requestBody)
defer writer.Close()
err := writer.WriteField("knowledge_base_name", form.KnowledgeBaseName)
if err != nil {
log.Error("failed to create upload_doc() writer")
return "", err
}
for _, fileHeader := range form.Files {
err = writeDocs(fileHeader, writer)
if err != nil {
log.Error("Error getting doc content: %s", err)
return "", err
}
}
return writer.FormDataContentType(), nil
}

func UploadDocs(modelName string, form api.LLMChatUploadForm) (*map[string]interface{}, error) {
log.Info("######### received by resty\n")

var requestBody bytes.Buffer
headerValue, err := writeBody(&requestBody, form)
if err != nil {
log.Error("upload docs write body failed.")
return nil, err
}

endpoint := GetEndpoint(modelName)
req, err := http.NewRequest("POST", endpoint+urlKnowledgeBaseUploadDoc, &requestBody)
if err != nil {
log.Info("Error creating request:", err)
return nil, err
}

req.Header.Set("Content-Type", headerValue)

client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
log.Info("Error making request:", err)
return nil, err
}
defer resp.Body.Close()
log.Info("############## Response Status:", resp.Status)

var errResult map[string]interface{}
//if resp.StatusCode == http.StatusUnprocessableEntity {
if resp.StatusCode != http.StatusOK {
bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
log.Info("Error reading response body:", err)
return nil, err
}
err = json.Unmarshal(bodyBytes, &errResult)
log.Error("##############upload_docs() errResult: %+v\n", errResult)
return &errResult, nil
}
log.Info("############## Response Body: %+v\n", resp.Body)

// Parse the response
var result map[string]interface{}
err = json.NewDecoder(resp.Body).Decode(&result)
if err != nil {
log.Info("Error decoding response:", err)
return nil, err
}
return &result, nil
}

func GetDownloadDocUrl(knowledgeBaseName string, fileName string) (string, error) {
endpoint := GetEndpoint("") + urlKnowledgeBaseDownload
params := "?knowledge_base_name=" + knowledgeBaseName + "&file_name=" + fileName
log.Info("resty GetDownloadDocUrl: %s", endpoint+params)
return endpoint + params, nil
}

+ 13
- 1
modules/modelappservice/modelsevice.go View File

@@ -14,9 +14,15 @@ import (
)

var wenxinChannel = make(chan *models.ModelApp, 10000)
var isCD bool

func Init() {
urls := setting.BaiduWenXin.ModelArtsWenXinURL
if strings.Index(urls, "cdzs.cn") > 0 {
isCD = true
} else {
isCD = false
}
urlarray := strings.Split(urls, ",")
urlNums := len(urlarray)
log.Info("url nums=" + fmt.Sprint(urlNums))
@@ -45,7 +51,13 @@ func consumerOrder(in <-chan *models.ModelApp, url string) {
continue
}
log.Info("goroutine id=" + fmt.Sprint(goroutine_id) + " wenxin text=" + modelApp.Desc)
result, err := modelarts.CreateWenXinJobToCD(modelApp, url)
var result *modelarts.WenXinResult
var err error
if isCD {
result, err = modelarts.CreateWenXinJobToCD(modelApp, url)
} else {
result, err = modelarts.CreateWenXinJob(modelApp, url)
}
if err == nil {
if !modelarts.SendPictureReivew(result.Result) {
modelApp.Status = -1


+ 54
- 0
modules/setting/setting.go View File

@@ -652,6 +652,13 @@ var (
Cron string
RunAtStart bool
}{}
NotebookStrategy = struct {
ClearEnabled bool
ClearBatchSize int
MaxNumberPerUser int
Cron string
RunAtStart bool
}{}

C2NetInfos *C2NetSqInfos
CenterInfos *AiCenterInfos
@@ -856,6 +863,25 @@ var (
ATTACHEMENT_SIZE_A_USER int64 //G
ALL_ATTACHEMENT_NUM_SDK int
}{}

LLM_CHAT_API = struct {
CHATGLM2_HOST string
CHATGLM2_MAX_LENGTH int
LLAMA2_HOST string
LLAMA2_MAX_LENGTH int
COMMON_KB string
MAX_FREE_TRIES int64
LEGAL_CHECK bool
LEGAL_MAX_COUNT int64
CHAT_EXPIRED_MINUTES int64
}{}

BAIDU_AI = struct {
API_KEY string
SECRET_KEY string
URL string
LEGAL_TEXT_URL string
}{}
)

// DateLang transforms standard language locale name to corresponding value in datetime plugin.
@@ -1735,12 +1761,30 @@ func NewContext() {
BaiduWenXin.RUN_WORKERS = sec.Key("RUN_WORKERS").MustInt(1)
BaiduWenXin.MODEL_SERVERS = sec.Key("MODEL_SERVERS").MustInt(1)

sec = Cfg.Section("llm_chat_api")
LLM_CHAT_API.CHATGLM2_HOST = sec.Key("CHATGLM2_HOST").MustString("")
LLM_CHAT_API.CHATGLM2_MAX_LENGTH = sec.Key("CHATGLM2_MAX_LENGTH").MustInt(8192)
LLM_CHAT_API.LLAMA2_HOST = sec.Key("LLAMA2_HOST").MustString("")
LLM_CHAT_API.LLAMA2_MAX_LENGTH = sec.Key("LLAMA2_MAX_LENGTH").MustInt(4096)
LLM_CHAT_API.COMMON_KB = sec.Key("COMMON_KNOWLEDGE_BASE").MustString("")
LLM_CHAT_API.MAX_FREE_TRIES = sec.Key("MAX_FREE_TRIES").MustInt64(200)
LLM_CHAT_API.LEGAL_CHECK = sec.Key("LEGAL_CHECK").MustBool(false)
LLM_CHAT_API.LEGAL_MAX_COUNT = sec.Key("LEGAL_MAX_COUNT").MustInt64(5)
LLM_CHAT_API.CHAT_EXPIRED_MINUTES = sec.Key("CHAT_EXPIRED_MINUTES").MustInt64(30)

sec = Cfg.Section("baidu_ai")
BAIDU_AI.API_KEY = sec.Key("API_KEY").MustString("")
BAIDU_AI.SECRET_KEY = sec.Key("SECRET_KEY").MustString("")
BAIDU_AI.URL = sec.Key("URL").MustString("https://aip.baidubce.com/oauth/2.0/token")
BAIDU_AI.LEGAL_TEXT_URL = sec.Key("LEGAL_TEXT_URL").MustString("https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined?access_token=")

GetGrampusConfig()
GetModelartsCDConfig()
getModelConvertConfig()
getModelSafetyConfig()
getModelAppConfig()
getClearStrategy()
getNotebookStrategy()
NewScreenMapConfig()
}

@@ -1850,6 +1894,16 @@ func getClearStrategy() {
ClearStrategy.RunAtStart = sec.Key("RUN_AT_START").MustBool(false)
}

func getNotebookStrategy() {

sec := Cfg.Section("notebook_strategy")
NotebookStrategy.ClearEnabled = sec.Key("CLEAR_ENABLED").MustBool(false)
NotebookStrategy.ClearBatchSize = sec.Key("CLEAR_BATCH_SIZE").MustInt(300)
NotebookStrategy.MaxNumberPerUser = sec.Key("MAX_NUMBER").MustInt(5)
NotebookStrategy.Cron = sec.Key("CRON").MustString("* 0,0 2-8 * * ?")
NotebookStrategy.RunAtStart = sec.Key("RUN_AT_START").MustBool(false)
}

func GetGrampusConfig() {
sec := Cfg.Section("grampus")



+ 82
- 0
modules/structs/llm_chat.go View File

@@ -0,0 +1,82 @@
package structs

import "mime/multipart"

type LLMChatHistory struct {
Role string `json:"role"`
Content string `json:"content"`
}

type LLMChatMessage struct {
ModelName string `json:"model_name" binding:"Required"`
Query string `json:"query" binding:"Required"`
History []LLMChatHistory `json:"history"`
Stream bool `json:"stream" `
}

type KBChatMessage struct {
ModelName string `json:"model_name" binding:"Required"`
Query string `json:"query" binding:"Required"`
KnowledgeBaseName string `json:"knowledge_base_name" binding:"Required"`
History []LLMChatHistory `json:"history"`
Stream bool `json:"stream"`
TopK int `json:"top_k"`
ScoreThreshold float64 `json:"score_threshold"`
}

type CreateKnowledgeBaseParams struct {
KnowledgeBaseName string `json:"knowledge_base_name"`
VectorStoreType string `json:"vector_store_type"`
EmbedModel string `json:"embed_model"`
}

type SearchDocParams struct {
Query string `json:"query"`
KnowledgeBaseName string `json:"knowledge_base_name"`
TopK int `json:"top_k"`
ScoreThreshold float64 `json:"score_threshold"`
}

type DeleteDocParams struct {
KnowledgeBaseName string `json:"knowledge_base_name" binding:"Required"`
FileNames []string `json:"file_names" binding:"Required"`
DeleteContent bool `json:"delete_content"`
NotRefreshVsCache bool `json:"not_refresh_vs_cache"`
}

type UpdateDocParams struct {
KnowledgeBaseName string `json:"knowledge_base_name"`
FileNames string `json:"file_names"`
NotRefreshVsCache bool `json:"not_refresh_vs_cache"`
}

type RecreateVectorStoreParams struct {
KnowledgeBaseName string `json:"knowledge_base_name"`
AllowEmptyKb bool `json:"allow_empty_kb"`
VsType string `json:"vs_type"`
EmbedModel string `json:"embed_model"`
}

type LLMChatCountsResults struct {
MaxTries int64 `json:"max_tries"`
Counts int64 `json:"counts"`
CanChat bool `json:"can_chat"`
//FirstVisit bool `json:"first_visit"`
}

type KBChatAnswer struct {
Answer string `json:"answer"`
}

type KBChatDocs struct {
Docs []string `json:"docs"`
}
type LegalTextParams struct {
Text string `json:"text"`
}

type LLMChatUploadForm struct {
KnowledgeBaseName string `form:"knowledge_base_name"`
Files []*multipart.FileHeader `form:"files"`
Override bool `form:"override"`
}

+ 11
- 2
modules/templates/helper.go View File

@@ -107,6 +107,15 @@ func NewFuncMap() []template.FuncMap {
"DebugAttachSize": func() int {
return setting.DebugAttachSize * 1000 * 1000 * 1000
},
"LlmCommonKB": func() string {
return setting.LLM_CHAT_API.COMMON_KB
},
"LlmMaxCounts": func() string {
return strconv.FormatInt(setting.LLM_CHAT_API.MAX_FREE_TRIES, 10)
},
"LlmExpireMinutes": func() string {
return strconv.FormatInt(setting.LLM_CHAT_API.CHAT_EXPIRED_MINUTES, 10)
},
"AvatarLink": models.AvatarLink,
"Safe": Safe,
"SafeJS": SafeJS,
@@ -260,7 +269,7 @@ func NewFuncMap() []template.FuncMap {
return dict, nil
},
"Printf": fmt.Sprintf,
"ToLower": strings.ToLower,
"ToLower": strings.ToLower,
"Escape": Escape,
"Sec2Time": models.SecToTime,
"ParseDeadline": func(deadline string) []string {
@@ -423,7 +432,7 @@ func NewTextFuncMap() []texttmpl.FuncMap {
return dict, nil
},
"Printf": fmt.Sprintf,
"ToLower": strings.ToLower,
"ToLower": strings.ToLower,
"Escape": Escape,
"Sec2Time": models.SecToTime,
"ParseDeadline": func(deadline string) []string {


+ 5
- 1
modules/urfs_client/objectstorage/mocks/objectstorage_mock.go View File

@@ -1,5 +1,9 @@
// Code generated by MockGen. DO NOT EDIT.
// Source: objectstorage.go

// Package mocks is a generated GoMock package.
package mocks

import (
gomock "github.com/golang/mock/gomock"
reflect "reflect"
)

+ 11
- 1
options/locale/locale_en-US.ini View File

@@ -354,7 +354,7 @@ reset_password_mail_sent_prompt = A confirmation email has been sent to <b>%s</b
active_your_account = Activate Your Account
account_activated = Account has been activated
prohibit_login = Sign In Prohibited
prohibit_login_desc = Your account is prohibited to sign in, please contact your site administrator.
prohibit_login_desc = Your account is prohibited to sign in, please contact your site administrator: secretariat@openi.org.cn
resent_limit_prompt = You have already requested an activation email recently. Please wait 3 minutes and try again.
has_unconfirmed_mail = Hi %s, you have an unconfirmed email address (<b>%s</b>).
has_unconfirmed_mail_resend = If you did not receive the activation email, or need to resend it, please click the "Resend your activation email" button below.
@@ -1125,6 +1125,7 @@ images.name_placerholder = Please enter the image name
images.descr_placerholder = The description should not exceed 1000 characters
image.label_tooltips = Example Python 3.7, Tensorflow 2.0, cuda 10, pytorch 1.6
images.public_tooltips = After the image is set to public, it can be seen by other users.
images.submit_tooltips = The code directory /code, dataset directory /dataset will not be submitted with the image, and other directories will be packaged into the image.
images.name_format_err=The format of image tag is wrong.
images.name_rule50 = Please enter letters, numbers, _ and - up to 50 characters and starts with a letter.
images.name_rule100 = Please enter letters, numbers, _ and - up to 100 characters and cannot end with a dash (-).
@@ -3466,6 +3467,7 @@ branch_not_exists = The branch does not exist. Please refresh and select again.
dataset_number_over_limit = The dataset count exceed the limit
result_cleared=The files of the task have been cleared, can not restart or retrain any more, please create a new task instead
model_not_exist=The model in the task does not exist or has been deleted
too_many_notebook=A user can have up to 5 debug tasks, please try again after delete some debug tasks.

[common_error]
system_error = System error.Please try again later
@@ -3482,3 +3484,11 @@ builidng_fail = Failed to build AI Model, please try again later
deletion_notice_repo = There is a deploying or running service related to this repository, please stop the service before deletion.
deletion_notice_trainjob = There is a deploying or running service related to this task, please stop the service before deletion.
stop_service_failed = Failed to stop deploy service


[llm_chat]
chat_expired = Chat session expired, please create a new chat.
max_free_exceed = You have reached the maximum number of free chat.
query_empty = Empty prompt is not allowed.
query_too_long = Your prompt is too long.
server_error = Server busy, please try again later.

+ 10
- 1
options/locale/locale_zh-CN.ini View File

@@ -357,7 +357,7 @@ reset_password_mail_sent_prompt=确认电子邮件已被发送到 <b>%s</b>。
active_your_account=激活您的帐户
account_activated=帐户已激活
prohibit_login=禁止登录
prohibit_login_desc=您的帐户被禁止登录,请与网站管理员联系
prohibit_login_desc=您的帐户被禁止登录,请与网站管理员联系:secretariat@openi.org.cn
resent_limit_prompt=您请求发送激活邮件过于频繁,请等待 3 分钟后再试!
has_unconfirmed_mail=%s 您好,系统检测到您有一封发送至 <b>%s</b> 但未被确认的邮件。
has_unconfirmed_mail_resend=如果您未收到激活邮件,或需要重新发送,请单击下方的 "重新发送确认邮件 " 按钮。
@@ -1125,6 +1125,7 @@ images.name_placerholder = 请输入镜像Tag
images.descr_placerholder = 描述字数不超过1000个字符
image.label_tooltips = 如Python 3.7, Tensorflow 2.0, cuda 10, pytorch 1.6
images.public_tooltips = 镜像设置为公开后,可被其他用户看到。
images.submit_tooltips = 代码目录/code,数据集目录/dataset不会随镜像提交,其他目录都会打包到镜像中。
images.name_format_err=镜像Tag格式错误。
images.name_rule50 = 请输入字母、数字、_和-,最长50个字符,且以字母开头。
images.name_rule100 = 请输入字母、数字、_和-,最长100个字符,且不能以中划线(-)结尾。
@@ -3489,6 +3490,7 @@ branch_not_exists = 代码分支不存在,请刷新后重试
dataset_number_over_limit = 选择的数据集文件数量超出限制
result_cleared=源任务的文件已被清理,无法再次调试或复用训练结果,请新建任务。
model_not_exist=选择的预训练模型不存在或者已被删除
too_many_notebook=每个用户最多只能创建5个调试任务,请删除历史任务再新建。


[common_error]
@@ -3506,3 +3508,10 @@ builidng_fail = AI应用创建失败
deletion_notice_repo = 此项目有正在部署或正在体验的服务,请先停止服务,然后再删除。
deletion_notice_trainjob = 此任务有正在部署或正在体验的服务,请先停止服务,然后再删除。
stop_service_failed = 停止部署服务失败

[llm_chat]
chat_expired = 对话已过期,请重新创建对话
max_free_exceed = 您的对话次数已达上限
query_empty = 您发送的指令不能为空
query_too_long = 您发送的指令长度超过限制
server_error = 服务器繁忙,请稍后再试

+ 32
- 5
routers/api/v1/api.go View File

@@ -88,6 +88,7 @@ import (
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"code.gitea.io/gitea/routers/api/v1/admin"
"code.gitea.io/gitea/routers/api/v1/llm_chat"
"code.gitea.io/gitea/routers/api/v1/misc"
"code.gitea.io/gitea/routers/api/v1/notify"
"code.gitea.io/gitea/routers/api/v1/org"
@@ -651,8 +652,8 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Group("/:username/:reponame", func() {
m.Group("/ai_task", func() {
m.Post("/create", reqWeChatStandard(), reqRepoWriter(models.UnitTypeCloudBrain), bind(entity.CreateReq{}), ai_task.CreateAITask)
m.Post("/stop", reqWeChatStandard(), reqRepoWriter(models.UnitTypeCloudBrain), reqAITaskInRepo(), reqAdminOrOwnerAITaskCreator(), ai_task.StopAITask)
m.Post("/del", reqWeChatStandard(), reqRepoWriter(models.UnitTypeCloudBrain), reqAITaskInRepo(), reqAdminOrOwnerAITaskCreator(), ai_task.DelAITask)
m.Post("/stop", reqRepoWriter(models.UnitTypeCloudBrain), reqAITaskInRepo(), reqAdminOrOwnerAITaskCreator(), ai_task.StopAITask)
m.Post("/del", reqRepoWriter(models.UnitTypeCloudBrain), reqAITaskInRepo(), reqAdminOrOwnerAITaskCreator(), ai_task.DelAITask)
m.Post("/restart", reqWeChatStandard(), reqRepoWriter(models.UnitTypeCloudBrain), reqAITaskInRepo(), reqAdminOrAITaskCreator(), ai_task.RestartAITask)
m.Get("/debug_url", reqWeChatStandard(), reqRepoWriter(models.UnitTypeCloudBrain), reqAITaskInRepo(), ai_task.GetNotebookUrl)
m.Get("/creation/required", reqWeChatStandard(), reqRepoWriter(models.UnitTypeCloudBrain), ai_task.GetCreationRequiredInfo)
@@ -723,6 +724,32 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/spec", finetune.GetSpec)
}, reqToken())

// llm_chat
m.Group("/llm", func() {
m.Get("/stats", reqAdmin(), llm_chat.GetChatStats)
m.Group("/chat", func() {
m.Get("/counts", llm_chat.GetFreeTries)
m.Post("/visit", llm_chat.NewVisit)
m.Post("/agree", llm_chat.SaveAgreement)
m.Post("/legaltext", bind(api.LegalTextParams{}), llm_chat.LegalText)
m.Post("/chat", bind(api.LLMChatMessage{}), llm_chat.LLMChat)
m.Post("/knowledge_base_chat", bind(api.KBChatMessage{}), llm_chat.KBChat)
})
m.Group("/knowledge_base", func() {
m.Get("/list", llm_chat.ListKnowledgeBase)
m.Post("/create", bind(api.CreateKnowledgeBaseParams{}), llm_chat.CreateKnowledgeBase)
m.Post("/delete", llm_chat.DeleteKnowledgeBase)
m.Get("/list_files", llm_chat.ListFiles)
m.Post("/search_docs", bind(api.SearchDocParams{}), llm_chat.SearchDoc)
m.Post("/delete_doc", bind(api.DeleteDocParams{}), llm_chat.DeleteDoc)
m.Post("/update_doc", llm_chat.UpdateDoc)
m.Post("/recreate_vector_store", llm_chat.RecreateVectorStore)
m.Get("/upload_doc_url", llm_chat.UploadDocUrl)
m.Post("/upload_doc", binding.MultipartForm(api.LLMChatUploadForm{}), llm_chat.UploadDoc)
m.Get("/download_doc_url", llm_chat.DownloadDoc)
})
}, reqToken(), reqWeChatStandard())

m.Group("/reward_point", func() {
m.Get("/is_admin", user.IsRewardPointAdmin)
m.Group("/list", func() {
@@ -849,10 +876,10 @@ func RegisterRoutes(m *macaron.Macaron) {

// Users
m.Group("/users", func() {
m.Get("/search", user.Search)
m.Get("/search", reqToken(), user.Search)

m.Group("/:username", func() {
m.Get("", user.GetInfo)
m.Get("", reqToken(), user.GetInfo)
m.Get("/heatmap", mustEnableUserHeatmap, user.GetUserHeatmapData)

m.Get("/repos", user.ListUserRepos)
@@ -966,7 +993,7 @@ func RegisterRoutes(m *macaron.Macaron) {
})

m.Group("/repos", func() {
m.Get("/search", repo.Search)
m.Get("/search", reqToken(), repo.Search)

m.Get("/issues/search", repo.SearchIssues)



+ 327
- 0
routers/api/v1/llm_chat/llm_chat.go View File

@@ -0,0 +1,327 @@
package llm_chat

import (
"code.gitea.io/gitea/models"
baiduAPI "code.gitea.io/gitea/modules/baiduai"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
llmService "code.gitea.io/gitea/services/llm_chat"
"net/http"
"strings"
"time"
)

const (
Chatglm = "chatglm2-6b"
Llama = "llama2-7b-chat-hf"
FlagExpired = "<expired>"
)

func chatPreCheck(ctx *context.APIContext, isPrompt bool) *models.LlmChatVisit {
modelName := ctx.Query("model_name")
if modelName == "" {
ctx.Error(http.StatusBadRequest, "model_name can't be empty", "model_name can't be empty")
return nil
}
userID := ctx.User.ID
currentTime := time.Now()
hasChat, _ := models.QueryRunningChat(userID, modelName, currentTime.Unix())

if hasChat == nil {
if isPrompt {
ctx.Resp.Header().Set("Content-Type", "application/octet-stream; charset=utf-8")
ctx.Resp.Write([]byte(FlagExpired))
ctx.Resp.Flush()
return nil
} else {
ctx.JSON(http.StatusOK, map[string]string{
"code": "-1",
"msg": ctx.Tr("llm_chat.chat_expired"),
})
log.Error("userID %d : no running chat session for model %s.", ctx.User.ID, modelName)
return nil
}
}

counts := models.QueryChatCount(ctx.User.ID, modelName)
maxTires := setting.LLM_CHAT_API.MAX_FREE_TRIES
if counts >= maxTires {
ctx.JSON(http.StatusOK, map[string]string{
"code": "-1",
"msg": ctx.Tr("llm_chat.max_free_exceed"),
})
log.Error("userID %d : max free times exceed %d.", ctx.User.ID, maxTires)
return nil
}

return hasChat
}

func promptPreCheck(ctx *context.APIContext, prompt string, modelName string) bool {
queryLen := len(strings.TrimSpace(prompt))
log.Info("query length check: %d tokens\n", queryLen)
if queryLen == 0 {
ctx.JSON(http.StatusOK, map[string]string{
"code": "-1",
"msg": ctx.Tr("llm_chat.query_empty"),
})
log.Error("userID %d : query can't be empty.", ctx.User.ID)
return false
}

lenFlag := false
if modelName == Chatglm {
lenFlag = queryLen > setting.LLM_CHAT_API.CHATGLM2_MAX_LENGTH
} else if modelName == Llama {
lenFlag = queryLen > setting.LLM_CHAT_API.LLAMA2_MAX_LENGTH
}
if lenFlag {
ctx.JSON(http.StatusOK, map[string]string{
"code": "-1",
"msg": ctx.Tr("llm_chat.query_too_long"),
})
log.Error("userID %d : query length too long.", ctx.User.ID)
return false
}

return true
}

func LLMChat(ctx *context.APIContext, data api.LLMChatMessage) {
log.Info("LLM chat by api.")
hasChat := chatPreCheck(ctx, true)
promptFlag := promptPreCheck(ctx, data.Query, data.ModelName)
if !promptFlag || hasChat == nil {
log.Error("userID %d : chat prompt pre-check failed.", ctx.User.ID)
return
}
if data.Stream {
llmService.StreamLLMChatService(ctx.Context, data, hasChat)
} else {
ctx.JSON(http.StatusInternalServerError, "currently not supported")
//llmService.LLMChatService(ctx.Context, data, hasChat)
}
}

func KBChat(ctx *context.APIContext, data api.KBChatMessage) {
log.Info("LLM KnowledgeBase chat by api.")
hasChat := chatPreCheck(ctx, true)
promptFlag := promptPreCheck(ctx, data.Query, data.ModelName)
if !promptFlag || hasChat == nil {
log.Error("userID %d : chat prompt pre-check failed.", ctx.User.ID)
return
}
if data.Stream {
llmService.StreamKBChatService(ctx.Context, data, hasChat)
} else {
ctx.JSON(http.StatusInternalServerError, "currently not supported")
//llmService.KBChatService(ctx.Context, data, hasChat)
}
}

func ListKnowledgeBase(ctx *context.APIContext) {
log.Info("LLM list KnowledgeBase by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.ListKnowledgeBaseService(ctx.Context)
}

func CreateKnowledgeBase(ctx *context.APIContext, data api.CreateKnowledgeBaseParams) {
log.Info("LLM create KnowledgeBase by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.CreateKnowledgeBaseService(ctx.Context, data)
}

func DeleteKnowledgeBase(ctx *context.APIContext) {
log.Info("LLM delete KnowledgeBase by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.DeleteKnowledgeBaseService(ctx.Context)
}

func ListFiles(ctx *context.APIContext) {
log.Info("LLM list files by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.ListFilesService(ctx.Context)
}

func SearchDoc(ctx *context.APIContext, data api.SearchDocParams) {
log.Info("LLM search doc by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.SearchDocService(ctx.Context, data)
}

func DeleteDoc(ctx *context.APIContext, data api.DeleteDocParams) {
log.Info("LLM delete doc by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.DeleteDocService(ctx.Context, data)

}

func UpdateDoc(ctx *context.APIContext) {
log.Info("LLM update doc by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.UpdateDocService(ctx.Context)
}

func RecreateVectorStore(ctx *context.APIContext) {
log.Info("LLM recreate vector store by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.RecreateVectorStoreService(ctx.Context)
}

func UploadDocUrl(ctx *context.APIContext) {
log.Info("LLM upload doc by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.UploadDocUrlService(ctx.Context)
}

func UploadDoc(ctx *context.APIContext, form api.LLMChatUploadForm) {
log.Info("LLM upload doc by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.UploadDocService(ctx.Context, form)
}

func DownloadDoc(ctx *context.APIContext) {
log.Info("LLM download doc by api.")
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
llmService.DownloadDocService(ctx.Context)
}

func GetFreeTries(ctx *context.APIContext) {
log.Info("LLM get free tries by api.")
llmService.GetFreeTriesService(ctx.Context)
}

func LegalText(ctx *context.APIContext, data api.LegalTextParams) {
log.Info("LLM get chat counts by api.")
res, err := baiduAPI.CheckLegalText(data.Text)
if err != nil {
log.Error("CheckLegalText failed: %s", err)
ctx.Error(http.StatusInternalServerError, "CheckLegalText failed", err.Error())
return
}
ctx.JSON(http.StatusOK, res)
}

func NewVisit(ctx *context.APIContext) {
log.Info("LLM new visit by api.")
currentTime := time.Now()
modelName := ctx.Query("model_name")

hasChat, _ := models.QueryRunningChat(ctx.User.ID, modelName, currentTime.Unix())
if hasChat != nil {
ctx.JSON(http.StatusOK, map[string]string{
"chat_id": hasChat.ChatId,
"model_name": hasChat.ModelName,
"expired_at": hasChat.ExpiredTime,
})
log.Info("hasChat %s, expired at %s\n", hasChat.ChatId, hasChat.ExpiredTime)
return
}

chatID := ctx.User.Name + currentTime.Format("20060102150405")
duration := time.Duration(setting.LLM_CHAT_API.CHAT_EXPIRED_MINUTES)
endTime := currentTime.Add(time.Minute * duration)
entTimeStr := endTime.Format("2006-01-02 15:04:05")
llmChatVisit := &models.LlmChatVisit{
UserId: ctx.User.ID,
ChatId: chatID,
ModelName: modelName,
ExpiredUnix: endTime.Unix(),
ExpiredTime: entTimeStr,
Agreement: 0,
}
models.SaveVisit(llmChatVisit)
log.Info("new chat %s, expired at %s\n", chatID, entTimeStr)
ctx.JSON(http.StatusOK, map[string]string{
"chat_id": llmChatVisit.ChatId,
"model_name": llmChatVisit.ModelName,
"expired_at": llmChatVisit.ExpiredTime,
})
}

func SaveAgreement(ctx *context.APIContext) {
hasChat := chatPreCheck(ctx, false)
if hasChat == nil {
return
}
if hasChat.Agreement == 1 {
ctx.JSON(http.StatusOK, map[string]string{
"code": "-1",
"msg": "already saved Agreement",
})
return
}

hasChat.Agreement = 1
models.UpdateChat(hasChat)
ctx.JSON(http.StatusOK, map[string]string{
"code": "1",
"msg": "successfully saved Agreement status",
})
}

func GetChatStats(ctx *context.APIContext) {
log.Info("LLM chat stats by api.")

resChat, err := models.QueryChatStatistics()
if err != nil {
log.Error("QueryChatStatistics failed: %s", err)
ctx.JSON(http.StatusInternalServerError, "QueryChatStatistics failed")
return
}

resVisit, err := models.QueryChatVisitStatistics()
if err != nil {
log.Error("QueryChatVisitStatistics failed: %s", err)
ctx.JSON(http.StatusInternalServerError, "QueryChatVisitStatistics failed")
return
}

res := make(map[string]interface{})
for _, chat := range resChat {
for _, visit := range resVisit {
if chat["model_name"] == visit["model_name"] {
chat["visit"] = visit["visit"]
chat["visit_user"] = visit["visit_user"]
}
}
res[chat["model_name"].(string)] = chat
}

ctx.JSON(http.StatusOK, res)
}

+ 32
- 1
routers/api/v1/repo/attachments.go View File

@@ -2,6 +2,7 @@ package repo

import (
"net/http"
"strings"
"sync"

"code.gitea.io/gitea/modules/log"
@@ -12,6 +13,7 @@ import (
)

var mutex *sync.Mutex = new(sync.Mutex)
var modelMutex *sync.Mutex = new(sync.Mutex)

func GetSuccessChunks(ctx *context.APIContext) {
if errStr := checkDatasetPermission(ctx); errStr != "" {
@@ -146,7 +148,27 @@ func NewModelMultipart(ctx *context.APIContext) {
return
}

routeRepo.NewModelMultipart(ctx.Context)
if err := routeRepo.CheckFlowForModelSDK(); err != nil {
ctx.JSON(200, map[string]string{
"result_code": "-1",
"msg": err.Error(),
})
return
}
modelMutex.Lock()
defer modelMutex.Unlock()
fileName := ctx.Query("file_name")
re, err := routeRepo.NewModelMultipartForApi(ctx.Context, true)
if err != nil {
ctx.JSON(200, map[string]string{
"result_code": "-1",
"msg": err.Error(),
})
} else {
routeRepo.AddModelFileNameToCache(modeluuid, fileName, ctx.User.ID)
re["result_code"] = "0"
ctx.JSON(200, re)
}
}

func checkModelPermission(ctx *context.APIContext, model *models.AiModelManage) string {
@@ -178,5 +200,14 @@ func GetModelMultipartUploadUrl(ctx *context.APIContext) {

func CompleteModelMultipart(ctx *context.APIContext) {
log.Info("CompleteModelMultipart by api.")
modeluuid := ctx.Query("modeluuid")
//fileName := ctx.Query("file_name")
uuid := ctx.Query("uuid")
fileChunk, err := models.GetModelFileChunkByUUID(uuid)
if err == nil {
log.Info("fileChunk.ObjectName=" + fileChunk.ObjectName)
objectNames := strings.Split(fileChunk.ObjectName, "/")
routeRepo.RemoveModelFileFromCache(modeluuid, objectNames[len(objectNames)-1], ctx.User.ID)
}
routeRepo.CompleteModelMultipart(ctx.Context)
}

+ 51
- 2
routers/modelapp/wenxin.go View File

@@ -1,17 +1,21 @@
package modelapp

import (
"fmt"

"code.gitea.io/gitea/models"
"code.gitea.io/gitea/modules/base"
"code.gitea.io/gitea/modules/context"
"code.gitea.io/gitea/modules/modelappservice"
"code.gitea.io/gitea/modules/setting"
"fmt"
uuid "github.com/satori/go.uuid"
"net/http"
"time"
)

var modelMainTpl base.TplName = "model/index"
var modelWenXinTpl base.TplName = "model/wenxin/index"
var modelLLMChatTpl base.TplName = "model/llmchat/index"
var modelLLMChatCreateTpl base.TplName = "model/llmchat/create/index"

const WAIT_TIME int = 7

@@ -23,6 +27,51 @@ func WenXinPage(ctx *context.Context) {
ctx.HTML(200, modelWenXinTpl)
}

func LLMChatCreate(ctx *context.Context) {
currentTime := time.Now()
modelName := ctx.Query("model_name")
hasChat, _ := models.QueryRunningChat(ctx.User.ID, modelName, currentTime.Unix())
if hasChat != nil {
ctx.Data["chatID"] = hasChat.ChatId
} else {
ctx.Data["chatID"] = "-1"
}
ctx.HTML(200, modelLLMChatCreateTpl)
return
}

func LLMChatPage(ctx *context.Context) {
chatID := ctx.Params("chatID")
modelName := ctx.Query("model_name")

currentTime := time.Now().Unix()
hasChat, _ := models.QueryByChatId(chatID)
if hasChat != nil {
if ctx.User.ID != hasChat.UserId || hasChat.ModelName != modelName {
ctx.Data["Title"] = "Page Not Found"
ctx.HTML(http.StatusNotFound, base.TplName("status/404"))
return
}
if currentTime > hasChat.ExpiredUnix {
ctx.Redirect(setting.AppSubURL + "/extension/llm_chat/create?model_name=" + modelName) //HTML(200, modelLLMChatCreateTpl)
return
}
ctx.Data["expiredUnix"] = hasChat.ExpiredUnix
ctx.Data["landingUnix"] = currentTime
}

firstVisit := models.QueryFirstVisit(ctx.User.ID)
ctx.Data["firstVisit"] = firstVisit == int64(0)

counts := models.QueryChatCount(ctx.User.ID, modelName)
maxTries := setting.LLM_CHAT_API.MAX_FREE_TRIES
ctx.Data["can_chat"] = counts < maxTries
ctx.Data["counts"] = counts
ctx.Data["max_tries"] = maxTries

ctx.HTML(200, modelLLMChatTpl)
}

func WenXinPaintNew(ctx *context.Context) {
textDesc := ctx.Query("textDesc")
uuid := uuid.NewV4()


+ 30
- 22
routers/repo/attachment_model.go View File

@@ -1,6 +1,7 @@
package repo

import (
"errors"
"fmt"
"path"
"strconv"
@@ -142,37 +143,48 @@ func getObjectName(filename string, modeluuid string) string {
}

func NewModelMultipart(ctx *context.Context) {
if !setting.Attachment.Enabled {
ctx.Error(404, "attachment is not enabled")
re, err := NewModelMultipartForApi(ctx, false)
if err != nil {
ctx.ServerError("NewMultipart failed", err)
return
}
ctx.JSON(200, re)
}

func NewModelMultipartForApi(ctx *context.Context, isFlowControl bool) (map[string]string, error) {
if !setting.Attachment.Enabled {
return nil, errors.New("attachment is not enabled")
}
fileName := ctx.Query("file_name")
modeluuid := ctx.Query("modeluuid")

err := upload.VerifyFileType(ctx.Query("fileType"), strings.Split(setting.Attachment.AllowedTypes, ","))
if err != nil {
ctx.Error(400, err.Error())
return
return nil, err
}
if isFlowControl {
err = CheckFlowForModel(ctx)
if err != nil {
log.Info("check error," + err.Error())
return nil, err
}
}

typeCloudBrain := ctx.QueryInt("type")
err = checkTypeCloudBrain(typeCloudBrain)
if err != nil {
ctx.ServerError("checkTypeCloudBrain failed", err)
return
return nil, err
}

if setting.Attachment.StoreType == storage.MinioStorageType {
totalChunkCounts := ctx.QueryInt("totalChunkCounts")
if totalChunkCounts > minio_ext.MaxPartsCount {
ctx.Error(400, fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
return
return nil, errors.New(fmt.Sprintf("chunk counts(%d) is too much", totalChunkCounts))
}

fileSize := ctx.QueryInt64("size")
if fileSize > minio_ext.MaxMultipartPutObjectSize {
ctx.Error(400, fmt.Sprintf("file size(%d) is too big", fileSize))
return
return nil, errors.New(fmt.Sprintf("file size(%d) is too big", fileSize))
}

uuid := gouuid.NewV4().String()
@@ -182,16 +194,14 @@ func NewModelMultipart(ctx *context.Context) {
objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/")
uploadID, err = storage.NewMultiPartUpload(objectName)
if err != nil {
ctx.ServerError("NewMultipart", err)
return
return nil, err
}
} else {

objectName = strings.TrimPrefix(path.Join(Model_prefix, path.Join(modeluuid[0:1], modeluuid[1:2], modeluuid, fileName)), "/")
uploadID, err = storage.NewObsMultiPartUpload(objectName)
if err != nil {
ctx.ServerError("NewObsMultiPartUpload", err)
return
return nil, err
}
}

@@ -208,17 +218,15 @@ func NewModelMultipart(ctx *context.Context) {
})

if err != nil {
ctx.Error(500, fmt.Sprintf("InsertFileChunk: %v", err))
return
}
return nil, err

ctx.JSON(200, map[string]string{
}
return map[string]string{
"uuid": uuid,
"uploadID": uploadID,
})
}, nil
} else {
ctx.Error(404, "storage type is not enabled")
return
return nil, errors.New("storage type is not enabled")
}
}



+ 99
- 1
routers/repo/flow_control.go View File

@@ -16,10 +16,12 @@ import (
)

const (
REDIS_FLOW_ATTACHMENT_KEY = "flow_attachment_key"
REDIS_FLOW_ATTACHMENT_KEY = "flow_attachment_key"
REDIS_FLOW_MODEL_ATTACHMENT_KEY = "flow_model_attachment_key"
)

var mutex *sync.RWMutex = new(sync.RWMutex)
var modelMutex *sync.Mutex = new(sync.Mutex)

func CheckFlowForDataset(ctx *context.Context) error {
if ctx.User == nil {
@@ -86,6 +88,31 @@ func AddFileNameToCache(datasetId int64, fileName string, userId int64) {
setSDKUploadFileCache(REDIS_FLOW_ATTACHMENT_KEY, cacheMap)
}

func AddModelFileNameToCache(modelId string, fileName string, userId int64) {
modelMutex.Lock()
defer modelMutex.Unlock()
cacheMap := getSDKUploadFileMap(REDIS_FLOW_MODEL_ATTACHMENT_KEY)
expireTimeKeys := make([]string, 0)
currentTime := time.Now().Unix()
for tmpKey, tmpValue := range cacheMap {
time, err := strconv.ParseInt(tmpValue, 10, 64)
if err == nil {
if currentTime-time > 24*3600 {
expireTimeKeys = append(expireTimeKeys, tmpKey)
continue
}
}
}
for _, delKey := range expireTimeKeys {
delete(cacheMap, delKey)
}
key := modelId + "_" + fileName + "_" + fmt.Sprint(userId)
value := fmt.Sprint(time.Now().Unix())
cacheMap[key] = value
log.Info("set key=" + key + " value=" + value + " to cache.")
setSDKUploadFileCache(REDIS_FLOW_MODEL_ATTACHMENT_KEY, cacheMap)
}

func RemoveFileFromCache(datasetId int64, fileName string, userId int64) {
mutex.Lock()
defer mutex.Unlock()
@@ -96,6 +123,16 @@ func RemoveFileFromCache(datasetId int64, fileName string, userId int64) {
setSDKUploadFileCache(REDIS_FLOW_ATTACHMENT_KEY, cacheMap)
}

func RemoveModelFileFromCache(modelId string, fileName string, userId int64) {
modelMutex.Lock()
defer modelMutex.Unlock()
key := modelId + "_" + fileName + "_" + fmt.Sprint(userId)
cacheMap := getSDKUploadFileMap(REDIS_FLOW_MODEL_ATTACHMENT_KEY)
delete(cacheMap, key)
log.Info("remove key=" + key + " from cache.")
setSDKUploadFileCache(REDIS_FLOW_MODEL_ATTACHMENT_KEY, cacheMap)
}

func getSDKUploadFileMap(msgKey string) map[string]string {
valueStr, err := redis_client.Get(msgKey)
msgMap := make(map[string]string, 0)
@@ -144,3 +181,64 @@ func CheckFlowForDatasetSDK() error {
}
return nil
}

func CheckFlowForModelSDK() error {
cacheMap := getSDKUploadFileMap(REDIS_FLOW_MODEL_ATTACHMENT_KEY)
currentTime := time.Now().Unix()
count := 0
for _, tmpValue := range cacheMap {
time, err := strconv.ParseInt(tmpValue, 10, 64)
if err == nil {
if currentTime-time > 24*3600 {
continue
}
}
count += 1
}
log.Info("total find " + fmt.Sprint(count) + " uploading files.")
if count >= setting.FLOW_CONTROL.ALL_ATTACHEMENT_NUM_SDK {
log.Info("The number of model files uploaded using the SDK simultaneously cannot exceed " + fmt.Sprint(setting.FLOW_CONTROL.ALL_ATTACHEMENT_NUM_SDK))
return errors.New("The number of model files uploaded using the SDK simultaneously cannot exceed " + fmt.Sprint(setting.FLOW_CONTROL.ALL_ATTACHEMENT_NUM_SDK))
}
return nil
}

func CheckFlowForModel(ctx *context.Context) error {
if ctx.User == nil {
return errors.New("User not login.")
}
log.Info("start to check flow for upload model file.")
fileName := ctx.Query("file_name")
currentTimeNow := time.Now()
currentLongTime := currentTimeNow.Unix()
last24Hour := currentTimeNow.AddDate(0, 0, -1).Unix()
filechunks, err := models.GetModelFileChunksByUserId(ctx.User.ID, last24Hour, true)
if err == nil {
if len(filechunks) >= setting.FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST24HOUR {
log.Info("A single user cannot upload more than " + fmt.Sprint(setting.FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST24HOUR) + " files within the last 24 hours. so " + fileName + " is rejected. user id=" + fmt.Sprint(ctx.User.ID))
return errors.New("A single user cannot upload more than " + fmt.Sprint(setting.FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST24HOUR) + " files within the last 24 hours.")
}
var totalSize int64
totalSize += ctx.QueryInt64("size")
concurrentUpload := 0
for _, file := range filechunks {
totalSize += file.Size
if (currentLongTime - int64(file.CreatedUnix)) < 10*60 {
log.Info("the file " + file.Md5 + " in 10min upload." + file.CreatedUnix.Format("2006-01-02 15:04:05"))
concurrentUpload += 1
} else {
log.Info("the file " + file.Md5 + " not in 10min upload." + file.CreatedUnix.Format("2006-01-02 15:04:05"))
}
}
log.Info("The concurrentUpload is " + fmt.Sprint(concurrentUpload) + " to checked " + fileName + ". user id=" + fmt.Sprint(ctx.User.ID))
if concurrentUpload >= setting.FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST10M {
log.Info("A single user cannot upload more than " + fmt.Sprint(setting.FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST10M) + " files within the past 10 minutes. so " + fileName + " is rejected. user id=" + fmt.Sprint(ctx.User.ID))
return errors.New("A single user cannot upload more than " + fmt.Sprint(setting.FLOW_CONTROL.ATTACHEMENT_NUM_A_USER_LAST10M) + " files within the past 10 minutes.")
}
if totalSize >= setting.FLOW_CONTROL.ATTACHEMENT_SIZE_A_USER*1024*1024*1024 {
log.Info("The total file size uploaded by a single user within the past 24 hours cannot exceed " + fmt.Sprint(setting.FLOW_CONTROL.ATTACHEMENT_SIZE_A_USER) + "G. so " + fileName + " is rejected. user id=" + fmt.Sprint(ctx.User.ID))
return errors.New("The total file size uploaded by a single user within the past 24 hours cannot exceed " + fmt.Sprint(setting.FLOW_CONTROL.ATTACHEMENT_SIZE_A_USER) + "G.")
}
}
return nil
}

+ 1
- 1
routers/repo/grampus.go View File

@@ -2017,7 +2017,7 @@ func GrampusCommitImage(ctx *context.Context, form auth.CommitImageGrampusForm)

if err != nil {
log.Error("CommitImage(%s) failed:%v", ctx.Cloudbrain.JobName, err.Error(), ctx.Data["msgID"])
if models.IsErrImageTagExist(err) {
if models.IsErrImageTagExist(err) || strings.Contains(err.Error(), "Image already exists") || strings.Contains(err.Error(), "image exists") {
ctx.JSON(200, models.BaseErrorMessage(ctx.Tr("repo.image_exist")))

} else if models.IsErrorImageCommitting(err) {


+ 1
- 0
routers/response/response_list.go View File

@@ -36,3 +36,4 @@ var LOAD_CODE_FAILED = &BizError{Code: 2019, DefaultMsg: "Fail to load code, ple
var BRANCH_NOT_EXISTS = &BizError{Code: 2020, DefaultMsg: "The branch does not exist", TrCode: "ai_task.branch_not_exists"}
var MODEL_NUM_OVER_LIMIT = &BizError{Code: 2021, DefaultMsg: "The number of models exceeds the limit of 30", TrCode: "repo.debug.manage.model_num_over_limit"}
var DATASET_NUMBER_OVER_LIMIT = &BizError{Code: 2022, DefaultMsg: "The dataset count exceed the limit", TrCode: "ai_task.dataset_number_over_limit"}
var NOTEBOOK_EXCEED_MAX_NUM = &BizError{Code: 2023, DefaultMsg: "You can have up to 5 Debug Tasks, please try again after delete some tasks. ", TrCode: "ai_task.too_many_notebook"}

+ 5
- 3
routers/routes/routes.go View File

@@ -364,7 +364,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Post("/user/login/kanban", user.SignInPostAPI)
m.Get("/home/term", routers.HomeTerm)
m.Get("/home/annual_privacy", routers.HomeAnnual)
m.Get("/home/wenxin_privacy", routers.HomeWenxin)
m.Get("/home/model_privacy", routers.HomeWenxin)
m.Get("/home/notice", routers.HomeNoticeTmpl)
m.Get("/home/privacy", routers.HomePrivacy)

@@ -381,6 +381,10 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/tuomin/upload", modelapp.ProcessImageUI)
m.Post("/tuomin/upload", reqSignIn, modelapp.ProcessImage)
m.Get("/wenxin", modelapp.WenXinPage)
m.Group("/llm_chat", func() {
m.Get("/create", reqSignIn, reqWechatBind, modelapp.LLMChatCreate)
m.Get("/:chatID", reqSignIn, reqWechatBind, modelapp.LLMChatPage)
})
m.Get("/wenxin/paint_new", reqSignIn, modelapp.WenXinPaintNew)
m.Get("/wenxin/query_paint_result", reqSignIn, modelapp.QueryWenXinPaintResult)
m.Get("/wenxin/query_paint_image", reqSignIn, modelapp.QueryWenXinPaintById)
@@ -391,9 +395,7 @@ func RegisterRoutes(m *macaron.Macaron) {
m.Get("/create", reqSignIn, reqWechatBind, modelapp.PanguFinetuneCreateUI)
m.Get("/inference", reqSignIn, modelapp.PanguInferenceUI)
})

})

})

m.Group("/explore", func() {


+ 4
- 2
services/ai_task_service/cluster/c2net.go View File

@@ -271,7 +271,8 @@ func (c C2NetClusterAdapter) DeleteNoteBook(opts entity.JobIdAndVersionId) error
_, err := grampus.DeleteJob(opts.JobID, string(models.JobTypeDebug))
if err != nil {
log.Error("DeleteNoteBook(%s) failed:%v", opts, err)
return err
log.Info("error=" + err.Error())
return nil
}
return nil
}
@@ -598,7 +599,8 @@ func (c C2NetClusterAdapter) DeleteTrainJob(opts entity.JobIdAndVersionId) error
_, err := grampus.DeleteJob(opts.JobID)
if err != nil {
log.Error("Delete train job(%s) failed:%v", opts, err)
return err
log.Info("error=" + err.Error())
return nil
}
return nil
}


+ 8
- 2
services/ai_task_service/cluster/cloudbrain_two.go View File

@@ -186,7 +186,8 @@ func (c CloudbrainTwoClusterAdapter) DeleteNoteBook(opts entity.JobIdAndVersionI
}
if err != nil {
log.Error("DeleteNoteBook err.jobID=%s err=%v", opts, err)
return err
log.Info("error=" + err.Error())
return nil
}
return nil
}
@@ -552,7 +553,12 @@ func getCloudbrainTwoModelUrl(datasets []entity.ContainerData) string {

func (c CloudbrainTwoClusterAdapter) DeleteTrainJob(opts entity.JobIdAndVersionId) error {
_, err := modelarts.DelTrainJobVersion(opts.JobID, strconv.FormatInt(opts.VersionID, 10))
return err
if err != nil {
log.Error("DeleteTrainJob err.jobID=%s err=%v", opts, err)
log.Info("error=" + err.Error())
return nil
}
return nil
}

func (c CloudbrainTwoClusterAdapter) StopTrainJob(opts entity.JobIdAndVersionId) error {


+ 1
- 0
services/ai_task_service/task/cloudbrain_one_notebook_task.go View File

@@ -83,6 +83,7 @@ func (t CloudbrainOneNotebookTaskTemplate) Create(ctx *context.CreationContext)
c := &CreateOperator{}
err := c.Next(t.CheckParamFormat).
Next(t.CheckMultiRequest).
Next(t.CheckNotebookCount).
Next(t.CheckDisplayJobName).
Next(t.LoadSpec).
Next(t.CheckPointBalance).


+ 1
- 0
services/ai_task_service/task/cloudbrain_two_notebook_task.go View File

@@ -68,6 +68,7 @@ func (t CloudbrainTwoNotebookTaskTemplate) Create(ctx *context.CreationContext)
err := c.Next(t.CheckParamFormat).
Next(t.CheckMultiRequest).
Next(t.CheckDisplayJobName).
Next(t.CheckNotebookCount).
Next(t.LoadSpec).
Next(t.CheckPointBalance).
Next(t.CheckDatasets).


+ 1
- 0
services/ai_task_service/task/grampus_notebook_task.go View File

@@ -149,6 +149,7 @@ func (t GrampusNoteBookTaskTemplate) Create(ctx *context.CreationContext) (*enti
err := c.Next(t.CheckParamFormat).
Next(t.CheckMultiRequest).
Next(t.CheckDisplayJobName).
Next(t.CheckNotebookCount).
Next(t.LoadSpec).
Next(t.CheckPointBalance).
Next(t.CheckDatasets).


+ 18
- 0
services/ai_task_service/task/opt_handler.go View File

@@ -42,6 +42,7 @@ type CreationHandler interface {
CallRestartAPI(ctx *context.CreationContext) *response.BizError
NotifyCreation(ctx *context.CreationContext) *response.BizError
HandleErr4Async(ctx *context.CreationContext) *response.BizError
CheckNotebookCount(ctx *context.CreationContext) *response.BizError
}

//DefaultCreationHandler CreationHandler的默认实现,公共逻辑可以在此结构体中实现
@@ -106,6 +107,7 @@ func (DefaultCreationHandler) BuildRequest4Restart(ctx *context.CreationContext)
IsFileNoteBookRequest: task.BootFile != "",
IsRestartRequest: true,
DatasetNames: task.DatasetName,
HasInternet: models.SpecInternetQuery(task.HasInternet),
}
log.Info("BuildRequest4Restart success.displayJobName=%s jobType=%s cluster=%s", ctx.Request.DisplayJobName, ctx.Request.JobType, ctx.Request.Cluster)
return nil
@@ -421,6 +423,7 @@ func (DefaultCreationHandler) InsertCloudbrainRecord4Async(ctx *context.Creation
UpdatedUnix: timeutil.TimeStampNow(),
GpuQueue: ctx.Spec.QueueCode,
AppName: req.AppName,
HasInternet: int(req.HasInternet),
}

err := models.CreateCloudbrain(c)
@@ -594,6 +597,7 @@ func (DefaultCreationHandler) CreateCloudbrainRecord4Restart(ctx *context.Creati
SubTaskName: models.SubTaskName,
ModelId: req.PretrainModelId,
GpuQueue: ctx.Spec.QueueCode,
HasInternet: int(req.HasInternet),
}
err := models.RestartCloudbrain(ctx.SourceCloudbrain, c)

@@ -685,3 +689,17 @@ func (DefaultCreationHandler) HandleErr4Async(ctx *context.CreationContext) *res
func (g DefaultCreationHandler) NotifyCreation(ctx *context.CreationContext) *response.BizError {
return nil
}

func (DefaultCreationHandler) CheckNotebookCount(ctx *context.CreationContext) *response.BizError {

if setting.NotebookStrategy.ClearEnabled && ctx.Request.JobType == models.JobTypeDebug {
count, err := models.GetNotebooksCountByUser(ctx.User.ID)
if err != nil {
log.Warn("can not get user notebook count", err)
}
if count >= int64(setting.NotebookStrategy.MaxNumberPerUser) {
return response.NOTEBOOK_EXCEED_MAX_NUM
}
}
return nil
}

+ 45
- 0
services/ai_task_service/task/task_service.go View File

@@ -138,6 +138,7 @@ func buildAITaskInfo(task *models.Cloudbrain, creator *models.User, config *enti
EngineName: task.EngineName,
UserId: task.UserID,
AppName: task.AppName,
HasInternet: task.HasInternet,
}, nil
}

@@ -805,3 +806,47 @@ func HandleNewAITaskDelete(cloudbrainId int64) (isHandled bool, err error) {
}
return true, nil
}

func ClearNotebook() {
defer func() {
if err := recover(); err != nil {
log.Error("panic occurred:", err)
}
}()

if !setting.NotebookStrategy.ClearEnabled {
return
}

userCountInfo, err := models.GetNotebookCountGreaterThanN(setting.NotebookStrategy.MaxNumberPerUser)
if err != nil {
log.Error("can not get Notebook user count info", err)
return
}
deleteCount := 0
for _, userCount := range userCountInfo {
ids, err := models.GetNotebooksByUser(userCount.UserID, setting.NotebookStrategy.MaxNumberPerUser)
if err != nil {
log.Error("can not get Notebook by user id", err)
continue
}
for _, id := range ids {
t, _ := GetAITaskTemplateByCloudbrainId(id)
if t == nil {
log.Error("can not get task template")
continue
}
err := t.Delete(id)
if err != nil {
log.Error("Delete error.%v", err)
continue
}
log.Info("Clear Notebook id is " + strconv.FormatInt(id, 10))
deleteCount += 1
if deleteCount >= setting.NotebookStrategy.ClearBatchSize {
return
}
}
}

}

+ 564
- 0
services/llm_chat/llm_chat.go View File

@@ -0,0 +1,564 @@
package llm_chat

import (
"code.gitea.io/gitea/models"
baiduAPI "code.gitea.io/gitea/modules/baiduai"
"code.gitea.io/gitea/modules/context"
llmChatAPI "code.gitea.io/gitea/modules/llm_chat"
"code.gitea.io/gitea/modules/log"
"code.gitea.io/gitea/modules/setting"
api "code.gitea.io/gitea/modules/structs"
"encoding/json"
uuid "github.com/satori/go.uuid"
"net/http"
"strconv"
"strings"
"time"
)

const (
VectorStoreType = "faiss"
EmbeddingModel = "m3e-base"
TopK = 5
ScoreThreshold = 0.5
DeleteContent = true
NotRefreshVsCache = false
FlagTextInvalid = "<illegal>"
FlagAccountBanned = "<banned>"
FlagTextDoc = "<docs>"
ValidationTool = "baidu_api"
)

func getKnowledgeBaseName(ctx *context.Context) string {
kbName := ctx.Query("knowledge_base_name")
userID := strconv.FormatInt(ctx.User.ID, 10)
if kbName != setting.LLM_CHAT_API.COMMON_KB {
return userID + "_" + kbName
}
return kbName
}

func LLMChatService(ctx *context.Context, data api.LLMChatMessage, chat *models.LlmChatVisit) {
log.Info("received by api %+v", data)
res, err := llmChatAPI.SendLLMChat(data)
if err != nil {
log.Error("LLMChatService failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
}
ctx.JSON(http.StatusOK, res)
}

func KBChatService(ctx *context.Context, data api.KBChatMessage, chat *models.LlmChatVisit) {
if data.TopK == 0 || data.ScoreThreshold == 0 {
data.TopK = TopK
data.ScoreThreshold = ScoreThreshold
}
log.Info("received by api %+v", data)
res, err := llmChatAPI.SendKBChat(data)
if err != nil {
log.Error("KnowledgeBaseChatService failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
}
log.Info("received by resty %+v", res)
ctx.JSON(http.StatusOK, res)
}

func isInvalidQuery(ctx *context.Context, chat *models.LlmChat, queryType string) bool {
var query string
if queryType == "prompt" {
query = chat.Prompt
} else {
query = chat.Answer
}

if query == "" {
return false
}

chat.InvalidCount = 0
chat.InvalidTool = ValidationTool
res, err := baiduAPI.CheckLegalText(query)
if err != nil {
log.Error("isInvalidQuery() failed: %s", err)
return false
}
if res.ConclusionType != 1 {
chat.InvalidCount = 1
chat.InvalidType = queryType
jsonRes, _ := json.Marshal(res)
chat.InvalidDetail = string(jsonRes)
err := models.SaveChat(chat)
if err != nil {
log.Error("isInvalidQuery() SaveChat failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return true
}

invalidTotal := models.QueryInvalidPromptCount(ctx.User.ID)
if invalidTotal >= setting.LLM_CHAT_API.LEGAL_MAX_COUNT {
log.Info("isInvalidQuery() invalid total reach max: %d\n", invalidTotal)
ctx.User.ProhibitLogin = true
models.UpdateUserCols(ctx.User, "prohibit_login")
ctx.Resp.Write([]byte(FlagAccountBanned))
ctx.Resp.Flush()
} else {
ctx.Resp.Write([]byte(FlagTextInvalid))
ctx.Resp.Flush()
}
return true
}
return false
}

func StreamLLMChatService(ctx *context.Context, data api.LLMChatMessage, chat *models.LlmChatVisit) {
uuid := uuid.NewV4()
id := uuid.String()
llmChat := &models.LlmChat{
ID: id,
UserId: ctx.User.ID,
ChatId: chat.ChatId,
Prompt: data.Query,
ModelName: data.ModelName,
Endpoint: llmChatAPI.GetEndpoint(data.ModelName),
ChatType: "llm",
ChatStatus: 1,
Count: 1,
}

var answer string
ctx.Resp.Header().Set("Content-Type", "application/octet-stream; charset=utf-8")
ctx.Resp.Header().Set("X-Accel-Buffering", "no")

//call baiduai api to check legality of query
if setting.LLM_CHAT_API.LEGAL_CHECK {
invalidPrompt := isInvalidQuery(ctx, llmChat, "prompt")
if invalidPrompt {
log.Info("StreamLLMChatService() invalid prompt: %s\n", llmChat.Prompt)
return
}
}

resultChan := make(chan string)
errChan := make(chan error)
done := make(chan struct{})
go llmChatAPI.StreamLLMChat(data, resultChan, errChan, done)

for {
select {
case data := <-resultChan:
answer += data
ctx.Resp.Write([]byte(data))
ctx.Resp.Flush()
case err := <-errChan:
response := ctx.Tr("llm_chat.server_error")
for _, v := range response {
ctx.Resp.Write([]byte(string(v)))
ctx.Resp.Flush()
time.Sleep(50 * time.Millisecond)
}
log.Error("StreamLLMChatService() failed: %s", err)
log.Info("StreamLLMChatService() chat server api error, save to db")
llmChat.ChatStatus = 0
err = models.SaveChat(llmChat)
if err != nil {
log.Error("StreamLLMChatService() SaveChat failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
}
close(resultChan)
close(errChan)
close(done)
return
case <-done:
llmChat.Answer = answer
if llmChat.Answer == "" {
llmChat.ChatStatus = 0
}
if setting.LLM_CHAT_API.LEGAL_CHECK {
invalidAnswer := isInvalidQuery(ctx, llmChat, "answer")
if invalidAnswer {
log.Info("StreamLLMChatService() invalid answer: %s\n", llmChat.Answer)
close(resultChan)
close(errChan)
return
}
}
log.Info("StreamLLMChatService() nothing invalid, save to db")
err := models.SaveChat(llmChat)
if err != nil {
log.Error("StreamLLMChatService() SaveChat failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
}
close(resultChan)
close(errChan)
return
}
}
}

func StreamKBChatService(ctx *context.Context, data api.KBChatMessage, chat *models.LlmChatVisit) {
userID := strconv.FormatInt(ctx.User.ID, 10)
if data.KnowledgeBaseName != setting.LLM_CHAT_API.COMMON_KB {
data.KnowledgeBaseName = userID + "_" + data.KnowledgeBaseName
}

uuid := uuid.NewV4()
id := uuid.String()
llmChat := &models.LlmChat{
ID: id,
UserId: ctx.User.ID,
ChatId: chat.ChatId,
Prompt: data.Query,
ModelName: data.ModelName,
Endpoint: llmChatAPI.GetEndpoint(data.ModelName),
KnowledgeBaseName: data.KnowledgeBaseName,
VectorStoreType: VectorStoreType,
EmbeddingModel: EmbeddingModel,
ChatType: "kb",
ChatStatus: 1,
Count: 1,
}

var answer string
var docs string
ctx.Resp.Header().Set("Content-Type", "application/octet-stream; charset=utf-8")
ctx.Resp.Header().Set("X-Accel-Buffering", "no")

//call baiduai api to check legality of query
if setting.LLM_CHAT_API.LEGAL_CHECK {
invalidPrompt := isInvalidQuery(ctx, llmChat, "prompt")
if invalidPrompt {
log.Info("StreamKBChatService() invalid prompt: %s\n", llmChat.Prompt)
return
}
}

resultChan := make(chan string)
errChan := make(chan error)
done := make(chan struct{})
go llmChatAPI.StreamKBChat(data, resultChan, errChan, done)

for {
select {
case data := <-resultChan:
if strings.Contains(data, "answer") {
var result api.KBChatAnswer
json.Unmarshal([]byte(data), &result)
//ctx.JSON(http.StatusOK, result)
ctx.Resp.Write([]byte(result.Answer))
ctx.Resp.Flush()
answer += result.Answer
}
if strings.Contains(data, "docs") {
docs += data
}
case err := <-errChan:
response := ctx.Tr("llm_chat.server_error")
for _, v := range response {
ctx.Resp.Write([]byte(string(v)))
ctx.Resp.Flush()
time.Sleep(50 * time.Millisecond)
}
log.Error("StreamKBChatService() failed: %s", err)
log.Info("StreamKBChatService() chat server api error, save to db")
llmChat.ChatStatus = 0
err = models.SaveChat(llmChat)
if err != nil {
log.Error("SaveChat failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
}
close(resultChan)
close(errChan)
close(done)
return
case <-done:
if docs != "" {
ctx.Resp.Write([]byte(FlagTextDoc + docs))
ctx.Resp.Flush()
}
llmChat.Answer = answer
if llmChat.Answer == "" {
llmChat.ChatStatus = 0
}
//call baiduai api to check legality of query
if setting.LLM_CHAT_API.LEGAL_CHECK {
invalidAnswer := isInvalidQuery(ctx, llmChat, "answer")
if invalidAnswer {
log.Info("StreamKBChatService() invalid answer: %s\n", llmChat.Answer)
close(resultChan)
close(errChan)
return
}
}
log.Info("StreamKBChatService() nothing invalid, save to db")
err := models.SaveChat(llmChat)
if err != nil {
log.Error("StreamKBChatService() SaveChat failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
}
close(resultChan)
close(errChan)
return
}
}
}

func ListKnowledgeBaseService(ctx *context.Context) {
userID := strconv.FormatInt(ctx.User.ID, 10)
res, err := llmChatAPI.ListKnowledgeBase()
if err != nil {
log.Error("LLMChatService failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}
log.Info("received by resty %+v", res)
log.Info("######## received by resty %+v\n", res)
var realKB []string
for i := len(res.Data) - 1; i >= 0; i-- {
v := res.Data[i]
if strings.Contains(v, userID) {
substr := strings.Replace(v, userID+"_", "", -1)
if strings.TrimSpace(substr) != "" {
realKB = append(realKB, substr)
}
}
}

if setting.LLM_CHAT_API.COMMON_KB != "" {
realKB = append(realKB, setting.LLM_CHAT_API.COMMON_KB)
}

realData := llmChatAPI.LLMBasicMsgWithData{
Code: res.Code,
Msg: res.Msg,
Data: realKB,
}
log.Info("######## sent %+v\n", realData)
ctx.JSON(http.StatusOK, realData)
}

func CreateKnowledgeBaseService(ctx *context.Context, data api.CreateKnowledgeBaseParams) {
userID := strconv.FormatInt(ctx.User.ID, 10)
realKB := userID + "_" + data.KnowledgeBaseName
params := api.CreateKnowledgeBaseParams{
KnowledgeBaseName: realKB,
VectorStoreType: VectorStoreType,
EmbedModel: EmbeddingModel,
}
log.Info("received by api %+v\n", params)
res, err := llmChatAPI.CreateKnowledgeBase(params)
if err != nil {
log.Error("KnowledgeBaseChatService failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}
log.Info("##### received by resty %+v\n", res)
if strings.Contains(res.Msg, userID) {
substr := strings.Replace(res.Msg, userID+"_", "", -1)
if strings.TrimSpace(substr) != "" {
res.Msg = substr
}
}
log.Info("##### sent to client %+v\n", res)
ctx.JSON(http.StatusOK, res)
}

func DeleteKnowledgeBaseService(ctx *context.Context) {
userID := strconv.FormatInt(ctx.User.ID, 10)
data := getKnowledgeBaseName(ctx)
log.Info("### received by api %+v\n", data)
if data == setting.LLM_CHAT_API.COMMON_KB {
ctx.Error(http.StatusForbidden, "You can't operate %s", data)
return
}
res, err := llmChatAPI.DeleteKnowledgeBase(data)
if err != nil {
log.Error("DeleteKnowledgeBaseService failed: %s", err)
ctx.JSON(http.StatusOK, res)
return
}
log.Info("######## received by resty %+v\n", res)
if strings.Contains(res.Msg, userID) {
substr := strings.Replace(res.Msg, userID+"_", "", -1)
if strings.TrimSpace(substr) != "" {
res.Msg = substr
}
}
log.Info("######## sent to client %+v\n", res)
ctx.JSON(http.StatusOK, res)
}

func ListFilesService(ctx *context.Context) {
data := getKnowledgeBaseName(ctx)
log.Info("received by api %+v", data)
res, err := llmChatAPI.KBListFiles(data)
if err != nil {
log.Error("ListFiles failed: %s", err)
ctx.JSON(http.StatusOK, res)
return
}
log.Info("received by resty %+v", res)
ctx.JSON(http.StatusOK, res)
}

func SearchDocService(ctx *context.Context, data api.SearchDocParams) {
if data.TopK == 0 || data.ScoreThreshold == 0 {
data.TopK = TopK
data.ScoreThreshold = ScoreThreshold
}
realKB := getKnowledgeBaseName(ctx)
data.KnowledgeBaseName = realKB
log.Info("received by api %+v", data)
res, err := llmChatAPI.KBSearchDoc(data)
if err != nil {
log.Error("SearchDocService failed: %s", err)
ctx.JSON(http.StatusOK, res)
return
}
log.Info("received by resty %+v", res)
ctx.JSON(http.StatusOK, res)
}

func DeleteDocService(ctx *context.Context, data api.DeleteDocParams) {
data.DeleteContent = DeleteContent
data.NotRefreshVsCache = NotRefreshVsCache
userID := strconv.FormatInt(ctx.User.ID, 10)
realKB := userID + "_" + data.KnowledgeBaseName
data.KnowledgeBaseName = realKB
log.Info("received by api %+v", data)
if data.KnowledgeBaseName == setting.LLM_CHAT_API.COMMON_KB {
ctx.Error(http.StatusForbidden, "You can't operate %s", data.KnowledgeBaseName)
return
}
res, err := llmChatAPI.KBDeleteDoc(data)
if err != nil {
log.Error("LLMChatService failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}
ctx.JSON(http.StatusOK, res)
}

func UpdateDocService(ctx *context.Context) {
data := api.UpdateDocParams{
KnowledgeBaseName: getKnowledgeBaseName(ctx),
FileNames: ctx.Query("file_name"),
NotRefreshVsCache: NotRefreshVsCache,
}
log.Info("received by api %+v", data)
if data.KnowledgeBaseName == setting.LLM_CHAT_API.COMMON_KB {
ctx.Error(http.StatusForbidden, "You can't operate %s", data.KnowledgeBaseName)
return
}
res, err := llmChatAPI.KBUpdateDoc(data)
if err != nil {
log.Error("LLMChatService failed: %s", err)
ctx.JSON(http.StatusOK, models.BaseErrorMessageApi(err.Error()))
return
}
ctx.JSON(http.StatusOK, res)
}

func RecreateVectorStoreService(ctx *context.Context) {
data := api.RecreateVectorStoreParams{
KnowledgeBaseName: getKnowledgeBaseName(ctx),
AllowEmptyKb: true,
VsType: VectorStoreType,
EmbedModel: EmbeddingModel,
}
log.Info("received by api %+v", data)
if data.KnowledgeBaseName == setting.LLM_CHAT_API.COMMON_KB {
ctx.Error(http.StatusForbidden, "You can't operate %s", data.KnowledgeBaseName)
return
}

resultChan := make(chan string)
errChan := make(chan error)
done := make(chan struct{})

go llmChatAPI.KBRecreateVectorStore(data, resultChan, errChan, done)
ctx.Resp.Header().Set("Content-Type", "application/octet-stream; charset=utf-8")
ctx.Resp.Header().Set("X-Accel-Buffering", "no")

for {
select {
case data := <-resultChan:
_, err := ctx.Resp.Write([]byte(data))
if err != nil {
log.Error("Error writing response: %s", err)
ctx.JSON(http.StatusInternalServerError, models.BaseErrorMessageApi(err.Error()))
return
}
log.Info("%s\n", []byte(data))
ctx.Resp.Flush() // Flush the response to send it immediately
case err := <-errChan:
log.Error("Error writing response: %s", err)
ctx.JSON(http.StatusInternalServerError, models.BaseErrorMessageApi(err.Error()))
return
case <-done:
close(resultChan)
close(errChan)
return
}
}
}

func UploadDocUrlService(ctx *context.Context) {
data := getKnowledgeBaseName(ctx)
if data == setting.LLM_CHAT_API.COMMON_KB {
ctx.Error(http.StatusForbidden, "You can't operate %s", data)
return
}
url, _ := llmChatAPI.GetUploadDocUrl()
log.Info("received by api %+v", url)

ctx.JSON(http.StatusOK, url)
}

func UploadDocService(ctx *context.Context, form api.LLMChatUploadForm) {
log.Info("######### received request %+v\n", ctx.Req.Request)
log.Info("######### form api.LLMChatUploadForm %+v\n", form)
modelName := ctx.Query("model_name")
userID := strconv.FormatInt(ctx.User.ID, 10)
if form.KnowledgeBaseName != setting.LLM_CHAT_API.COMMON_KB {
form.KnowledgeBaseName = userID + "_" + form.KnowledgeBaseName
}
res, err := llmChatAPI.UploadDocs(modelName, form)
log.Info("######### received by resty %+v\n", res)

if err != nil {
log.Error("UploadDocService failed: %s", err)
ctx.JSON(http.StatusOK, err.Error())
return
}

ctx.JSON(http.StatusOK, res)
}

func DownloadDocService(ctx *context.Context) {
data := getKnowledgeBaseName(ctx)
if data == setting.LLM_CHAT_API.COMMON_KB {
ctx.Error(http.StatusForbidden, "You can't operate %s", data)
return
}
fileName := ctx.Query("file_name")
log.Info("received by api knowledgeBaseName:%s, fileName: %s", data, fileName)
url, _ := llmChatAPI.GetDownloadDocUrl(data, fileName)
log.Info("received by api %+v", url)
http.Redirect(ctx.Resp, ctx.Req.Request, url, http.StatusMovedPermanently)
}

func GetFreeTriesService(ctx *context.Context) {
modelName := ctx.Query("model_name")
maxTries := setting.LLM_CHAT_API.MAX_FREE_TRIES
counts := models.QueryChatCount(ctx.User.ID, modelName)
//firstVisit := models.QueryFirstVisit(ctx.User.ID)

data := api.LLMChatCountsResults{
MaxTries: maxTries,
Counts: counts,
CanChat: counts < maxTries,
//FirstVisit: firstVisit == 0,
}
log.Info("user %+v, GetFreeTriesService() data= %+v", ctx, data)
ctx.JSON(http.StatusOK, data)
}

+ 5
- 1
templates/admin/cloudbrain/imagecommit.tmpl View File

@@ -119,7 +119,11 @@
</div>
</div>
-->
<div class="inline required field" style="padding-top: 2rem;">
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<span style="color: rgb(255, 94, 0);display: inline-flex;"><i class="ri-error-warning-line" style="margin-right: 0.3rem;"></i>{{.i18n.Tr "repo.images.submit_tooltips"}}</span>
</div>
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{.i18n.Tr "repo.cloudbrain.commit_image"}}


+ 10
- 12
templates/base/head_navbar.tmpl View File

@@ -39,16 +39,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui simple dropdown item" id='dropdown_explore'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -58,13 +57,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageHome}}
<div class="item edge">
<div class="dropdown-menu">
@@ -86,17 +85,16 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui simple dropdown item" id='dropdown_PageHome'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu" >
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -106,13 +104,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageExplore}}
<a class="item {{if .PageIsExplore}}active{{end}}" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "home"}}</a>
{{else if .IsLandingPageOrganizations}}


+ 10
- 12
templates/base/head_navbar_fluid.tmpl View File

@@ -36,16 +36,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_explore'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -55,12 +54,12 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageHome}}
<div class="item edge" >
<div class="dropdown-menu">
@@ -81,16 +80,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_PageHome'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -100,13 +98,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageExplore}}
<a class="item {{if .PageIsExplore}}active{{end}}" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "home"}}</a>
{{else if .IsLandingPageOrganizations}}


+ 10
- 12
templates/base/head_navbar_home.tmpl View File

@@ -28,16 +28,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_explore'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -47,13 +46,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageHome}}
<div class="item edge" >
<div class="dropdown-menu">
@@ -75,16 +74,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_PageHome'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -94,13 +92,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageExplore}}
<a class="item {{if .PageIsExplore}}active{{end}}" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "home"}}</a>
{{else if .IsLandingPageOrganizations}}


+ 10
- 12
templates/base/head_navbar_pro.tmpl View File

@@ -38,16 +38,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_explore'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -57,13 +56,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageHome}}
<div class="item edge">
<div class="dropdown-menu">
@@ -85,16 +84,15 @@
<a class="item" href="{{AppSubUrl}}/explore/repos">{{.i18n.Tr "custom.head.project"}}</a>
<a class="item" href="{{AppSubUrl}}/explore/datasets">{{.i18n.Tr "custom.head.dataset"}}</a>
<div class="ui simple dropdown item" >
{{.i18n.Tr "repo.model_manager"}}
<span class="menu-new-dot">{{.i18n.Tr "repo.model_manager"}}</span>
<i class="dropdown icon"></i>
<div class="menu">
<a class="item" href="{{AppSubUrl}}/modelsquare/main">{{.i18n.Tr "repo.model_square"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase">{{.i18n.Tr "repo.model_base"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/wenxin">{{.i18n.Tr "repo.model_experience"}}</a>
<a class="item" href="{{AppSubUrl}}/extension/modelbase"><span class="menu-new" style="margin-right: 20px;">{{.i18n.Tr "repo.model_base"}}</span></a>
</div>
</div>
<div class="ui dropdown item" id='dropdown_PageHome'>
<span class="menu-new-dot">{{.i18n.Tr "explore"}}</span>
<span>{{.i18n.Tr "explore"}}</span>
<i class="dropdown icon"></i>
<div class="menu" >
<!--<a class="item" href="{{AppSubUrl}}/explore/users">{{.i18n.Tr "explore.users"}}</a>-->
@@ -104,13 +102,13 @@
{{/* <a class="item" href="{{AppSubUrl}}/explore/data_analysis">{{.i18n.Tr "explore.data_analysis"}}</a> */}}
<a class="item" href="{{AppSubUrl}}/kanban/index.html" target="_blank" rel="opener">{{.i18n.Tr "explore.data_analysis"}}</a>
{{end}}
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=9c23803d-b190-4b33-a59a-01a65f439bce&redirect_uri=https://course.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.course"}}</span></a>
<a class="item" href="{{AppSubUrl}}/OpenI">{{.i18n.Tr "custom.head.openi.repo"}}</a>
<a class="item" href="{{AppSubUrl}}/tech/tech_view">科技2030项目</a>
</div>
</div>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span class="menu-new">{{.i18n.Tr "custom.head.forum"}}</span></a>
<a class="item" target="_blank" href="https://openi.pcl.ac.cn/login/oauth/authorize?client_id=08ed2763-e77b-4326-b06d-fab35338fe05&redirect_uri=https://bbs.openi.org.cn/auth/openi/login&response_type=code&state=STATE"><span>{{.i18n.Tr "custom.head.forum"}}</span></a>
{{else if .IsLandingPageExplore}}
<a class="item {{if .PageIsExplore}}active{{end}}" href="{{AppSubUrl}}/explore/repos/square">{{.i18n.Tr "home"}}</a>
{{else if .IsLandingPageOrganizations}}


+ 8
- 0
templates/model/llmchat/create/index.tmpl View File

@@ -0,0 +1,8 @@
{{template "base/head" .}}
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-llms-create.css?v={{MD5 AppVer}}" />
<div id="llms-create-setting" data-has-chat="{{.chatID}}" data-expire-minutes="{{LlmExpireMinutes}}">
<div id="__vue-root"></div>
</div>

<script src="{{StaticUrlPrefix}}/js/vp-llms-create.js?v={{MD5 AppVer}}"></script>
{{template "base/footer" .}}

+ 12
- 0
templates/model/llmchat/index.tmpl View File

@@ -0,0 +1,12 @@

{{template "base/head" .}}
<link rel="stylesheet" href="{{StaticUrlPrefix}}/css/vp-model-llms.css?v={{MD5 AppVer}}" />
<!-- <div id="isSignd" data-sign="{{$.IsSigned}}" style="display: none;"></div> -->
<!-- <iframe id='mainFrame' name='mainFrame' ref='mainFrame' src="http://36.103.234.65:5900/" target="_blank" height='100%' frameborder="0" width="100%" ></iframe> -->
<div id="dialog-setting" data-common-kb="{{LlmCommonKB}}"
data-first-visit="{{.firstVisit}}" data-create-unix="{{.landingUnix}}" data-end-unix="{{.expiredUnix}}"
data-counts="{{.counts}}" data-max-tries="{{.max_tries}}" data-expire-minutes="{{LlmExpireMinutes}}"></div>
<div id="__vue-root"></div>
<script src="{{StaticUrlPrefix}}/js/vp-model-llms.js?v={{MD5 AppVer}}"></script>
{{template "base/footer" .}}

+ 5
- 1
templates/repo/cloudbrain/image/edit.tmpl View File

@@ -97,7 +97,11 @@
</div>
</div>
-->
<div class="inline required field" style="padding-top: 2rem;">
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<span style="color: rgb(255, 94, 0);display: inline-flex;"><i class="ri-error-warning-line" style="margin-right: 0.3rem;"></i>{{.i18n.Tr "repo.images.submit_tooltips"}}</span>
</div>
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{.i18n.Tr "explore.save"}}


+ 6
- 1
templates/repo/cloudbrain/image/submit.tmpl View File

@@ -32,6 +32,7 @@
<div class="submit-image-tmplvalue" style="display: none;" data-link="{{$.Link}}" data-repo-link="{{$.RepoLink}}" data-edit-page="submit"></div>
<div class="ui attached segment" style="padding: 2em 3em;padding-bottom: 7rem;">
<div class="ui form" id="form_image">
{{.CsrfTokenHtml}}
<div class="inline field">
<label class="label_color" for="">{{$.i18n.Tr "dataset.dataset_available_clusters"}}</label>
@@ -89,7 +90,11 @@
</div>
</div>
-->
<div class="inline required field" style="padding-top: 2rem;">
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<span style="color: rgb(255, 94, 0);display: inline-flex;"><i class="ri-error-warning-line" style="margin-right: 0.3rem;"></i>{{.i18n.Tr "repo.images.submit_tooltips"}}</span>
</div>
<div class="inline required field">
<label class="label_color" for="" style="visibility: hidden;"></label>
<button class="ui create_image green button" type="button">
{{.i18n.Tr "repo.cloudbrain.commit_image"}}


+ 1
- 1
templates/user/dashboard/dashboard.tmpl View File

@@ -46,7 +46,7 @@
const {AppSubUrl, StaticUrlPrefix, csrf} = window.config;
uid_ = Number((document.querySelector('meta[name=_context_uid]') || {}).content)
// console.log("uid:",uid_)
let URL = AppSubUrl + '/api/v1/repos/search?sort=updated&order=desc&uid='+uid_ +'&q=&page=1&limit=10&mode= ';
let URL = AppSubUrl + '/api/v1/repos/search?_csrf='+csrf+'&sort=updated&order=desc&uid='+uid_ +'&q=&page=1&limit=10&mode= ';
$.getJSON(URL, (result, _textStatus, request) => {
const counts_pro = request.getResponseHeader('X-Total-Count');


+ 1
- 1
templates/wenxin_privacy.tmpl View File

@@ -8,7 +8,7 @@
<div class="ui container">
<h1></h1>
<h1 style="text-align: center;">OpenI启智社区AI协作平台免责声明和服务使用规范</h1>
<p style="font-size: 16px;">亲爱的用户您好!您在使用启智文心作画大模型服务(以下简称“本服务”)前,请您务必仔细阅读并理解透彻本 《免责声明和服务使用规范》(以下简称“本声明”)。请您知悉,如果您选择继续使用本服务,意味着您充分知悉并接受以下使用条件:</p>
<p style="font-size: 16px;">亲爱的用户您好!您在使用启智大模型服务(以下简称“本服务”)前,请您务必仔细阅读并理解透彻本 《免责声明和服务使用规范》(以下简称“本声明”)。请您知悉,如果您选择继续使用本服务,意味着您充分知悉并接受以下使用条件:</p>
<h1>一、免责声明</h1>
<p><span >1.</span><span>请您确保您所输入的内容未侵害他人权益,未涉及不良信息,同时未输入与政治、暴力、色情相关的内容,且所有输入内容均合法合规。</span></p>
<p><span>2.</span><span>您确认并知悉本服务生成的所有内容均由人工智能模型生成,生成内容具有不完全理性,我方对其生成内容的准确性、完整性和功能性不做任何保证,亦不承担任何法律责任。我们会尽可能为您提供准确且优质稳定的技术服务,但由人工智能模型生成的内容均不代表我们的态度、观点或立场。</span></p>


+ 2
- 2
web_src/js/index.js View File

@@ -2404,7 +2404,7 @@ function searchUsers() {
$searchUserBox.search({
minCharacters: 2,
apiSettings: {
url: `${AppSubUrl}/api/v1/users/search?q={query}`,
url: `${AppSubUrl}/api/v1/users/search?_csrf=${csrf}&q={query}`,
onResponse(response) {
const items = [];
$.each(response.data, (_i, item) => {
@@ -3375,7 +3375,7 @@ function initVueComponents() {
searchURL() {
return `${
this.suburl
}/api/v1/repos/search?sort=updated&order=desc&uid=${this.uid}&q=${
}/api/v1/repos/search?_csrf=${csrf}&sort=updated&order=desc&uid=${this.uid}&q=${
this.searchQuery
}&page=${this.page}&limit=${this.searchLimit}&mode=${
this.repoTypes[this.reposFilter].searchMode


+ 30
- 0
web_src/less/_model.less View File

@@ -299,6 +299,36 @@
}

}
.model-type{
display: inline-flex;
padding: 0.6rem 1.2rem;
border: 1px solid rgba(229, 231, 235, 1);
border-radius: 5px;
color: rgba(16, 16, 16, 1);
margin-right: 2rem;
font-size: 16px;
position: relative;
p{
line-height: 100%;
}
}
.model-activate{
border: 1px solid rgb(1, 145, 255);
color: rgb(1, 145, 255);

}
.model-activate::before{
position: absolute;
content: "";
width: 0;
height: 0;
border-top:7px solid rgb(1, 145, 255) ;
border-left: 7px solid transparent;
border-right: 7px solid transparent;
top: 100%;
left: 42%;

}


/* ----------------------------------------------


+ 1
- 0
web_src/vuepages/apis/modules/common.js View File

@@ -20,3 +20,4 @@ export const getPointAccountInfo = () => {
params: {},
});
}


+ 132
- 0
web_src/vuepages/apis/modules/llmchat.js View File

@@ -0,0 +1,132 @@
import { param } from 'jquery';
import service from '../service';
let csrf = window.config ? window.config.csrf : ''

// 在线体验
export const onlineExperience = (params) => {
return service({
url: `/api/v1/llm/chat/visit`,
method: 'post',
params: params,
data: {}
});
}
// 创建知识库
export const llmAgree = (params) => {
return service({
url: `/api/v1/llm/chat/agree`,
method: 'post',
params: params,
});
}
//llm count
// 查询知识库列表
export const llmCount = (params) => {
return service({
url: '/api/v1/llm/chat/counts',
method: 'get',
params: params,
});
}
// llm 模型对话
export const llmChat = (data,model_name) => {
return fetch(`/api/v1/llm/chat/chat?_csrf=${csrf}&model_name=${model_name}`,{
headers:{
'Content-Type': 'application/json'
},
method: 'POST',
body: JSON.stringify(data)
})
}

// llm 知识库对话
export const llmKbChat = (data,model_name) => {
return fetch(`/api/v1/llm/chat/knowledge_base_chat?_csrf=${csrf}&model_name=${model_name}`,{
headers:{
'Content-Type': 'application/json'
},
method: 'POST',
body: JSON.stringify(data)
})
}

// 创建知识库
export const llmKbcreate = (data,params) => {
return service({
url: `/api/v1/llm/knowledge_base/create`,
method: 'post',
params: params,
data: data
});
}

// 删除知识库
export const llmKbDelete = (params) => {
return service({
url: '/api/v1/llm/knowledge_base/delete',
method: 'post',
params: params,

});
}

// 删除知识库文件
export const llmKbDeleteDoc = (data,params) => {
return service({
url: '/api/v1/llm/knowledge_base/delete_doc',
method: 'post',
params: params,
data: data
});
}


// 查询知识库列表
export const llmKbList = (params) => {
return service({
url: '/api/v1/llm/knowledge_base/list',
method: 'get',
params: params,
});
}

// 知识库文件上传
export const llmKbUploadDocUrl = (params,data) => {
return service({
url: '/api/v1/llm/knowledge_base/upload_doc',
method: 'post',
params: params,
data: data
});
}
export const llmKbUploadDoc = (url,data) => {
return service({
url: url,
method: 'post',
params: {},
data: data
});
}
// 查询当前知识库文件列表





export const llmKbFileList = (params) => {
return service({
url: '/api/v1/llm/knowledge_base/list_files',
method: 'get',
params: params,
});
}

export const llmRecreateVectorStore = (params) => {
return fetch(`/api/v1/llm/knowledge_base/recreate_vector_store?_csrf=${csrf}&knowledge_base_name=${params.knowledge_base_name}&model_name=${params.model_name}`, {
headers: {
'Content-Type': 'application/json'
},
method: 'POST',
})
}

+ 3
- 1
web_src/vuepages/apis/modules/modelbase.js View File

@@ -52,13 +52,15 @@ export const stopTrainJob = (params) => {
url: `/api/v1/repos/${params.userName}/openi-notebook/modelarts/train-job/${params.jobId}/stop_version`,
method: 'post',
params: {},
data: {},
});
}

// 删除训练任务
export const deleteTrainJob = (params) => {
return service({
url: `/api/v1/repos/${params.userName}/openi-notebook/modelarts/train-job/${params.jobId}/del_version`,
// url: `/api/v1/repos/${params.userName}/openi-notebook/modelarts/train-job/${params.jobId}/del_version`,
url: `/api/v1/${params.userName}/openi-notebook/ai_task/del?id=${params.id}`,
method: 'post',
params: {},
});


+ 19
- 4
web_src/vuepages/components/cloudbrain/details/ConfigInfo.vue View File

@@ -1,15 +1,18 @@
<template>
<div class="item-container">
<template v-for="(item, index) in configs.fields">
<div :key="item + '-' + index" v-if="item != 'dataset' && item != 'modelList' && item != 'failedReason'" class="item-block">
<div :key="item + '-' + index" v-if="item != 'dataset' && item != 'modelList' && item != 'failedReason'"
class="item-block">
<div class="title"> {{ renderTitle(item) }} </div>
<div class="content" v-html="renderContent(item)"></div>
</div>
<div :key="item + '-' + index" v-if="item == 'failedReason' && renderContent(item)" class="item-block item-failed-reason">
<div :key="item + '-' + index" v-if="item == 'failedReason' && renderContent(item)"
class="item-block item-failed-reason">
<div class="title"> {{ renderTitle(item) }} </div>
<div class="content" v-html="renderContent(item)"></div>
</div>
<div :key="item + '-' + index" v-if="item == 'dataset' && data.task.dataset_list.length > 0" class="item-block item-dataset">
<div :key="item + '-' + index" v-if="item == 'dataset' && data.task.dataset_list.length > 0"
class="item-block item-dataset">
<div class="table-container">
<el-table ref="tableRef" :data="data.task.dataset_list" style="width:100%">
<el-table-column prop="FileName" :label="$t('cloudbrainObj.datasetFiles')"
@@ -41,7 +44,8 @@
</el-table>
</div>
</div>
<div :key="item + '-' + index" v-if="item == 'modelList' && data.task.pretrain_model_list.length > 0" class="item-block item-model-list">
<div :key="item + '-' + index" v-if="item == 'modelList' && data.task.pretrain_model_list.length > 0"
class="item-block item-model-list">
<div class="table-container">
<el-table ref="tableRef" :data="data.task.pretrain_model_list" style="width:100%">
<el-table-column prop="FileName" :label="$t('modelManage.modelFiles')"
@@ -142,6 +146,9 @@ export default {
case 'aiCenter':
result = i18n.t('resourcesManagement.aiCenter');
break;
case 'hasInternet':
result = i18n.t('cloudbrainObj.networkType');
break;
case 'creator':
result = i18n.t('modelManage.creator');
break;
@@ -260,6 +267,14 @@ export default {
case 'aiCenter':
result = task.ai_center;
break;
case 'hasInternet':
result = '--'
if (task.has_internet == 1) {
result = i18n.t('cloudbrainObj.noInternet');
} else if (task.has_internet == 2) {
result = i18n.t('cloudbrainObj.hasInternet');
}
break;
case 'branch':
result = `${task.branch_name}` + (task.commit_id ? `<span class="commit-id">${task.commit_id.slice(0, 10)}</span>` : '');
break;


+ 68
- 1
web_src/vuepages/langs/config/en-US.js View File

@@ -338,6 +338,7 @@ const en = {
modelAccess: 'Model Access',
modelAccessPublic: 'Public',
modelAccessPrivate: 'Private',
modelAccessTips: 'Only public projects can be set as public models',
modelSettings: 'Model Settings',
edit: 'Edit',
editFiles: 'Edit files',
@@ -561,7 +562,7 @@ const en = {
allNetworkType: 'All network',
noInternet: 'No',
hasInternet: 'Yes',
networkTypeDesc: '「Access Internet」Describe whether the computing resource center supports internet access',
networkTypeDesc: '「Access Internet」Describe whether the computing resource center supports internet access. When \'No\' is selected, more computing resource centers can be allocated, and the dataset and models on the OpenI platform can still be used.',
resourceSpec: 'Specification',
specPlaceholder: 'Select resource specification',
specDescr: 'Resource Note',
@@ -673,6 +674,72 @@ const en = {
},
superComputeObj: {
mmlSparkDescr: `The full name of MMLSpark is Microsoft Machine Learning for Apache Spark, which enables users to run customized container images and grants them root accesses within the container. Users can directly use Microsoft's MMLSpark provided by the platform.\nNote: MMLSpark is a Spark version provided by Microsoft for machine learning environments( <a target="_blank" href="https://github.com/Azure/mmlspark">https://github.com/Azure/mmlspark</a> )Regarding mmlspark, please refer to the following paper: <a target="_blank" href="https://arxiv.org/pdf/1810.08744.pdf">https://arxiv.org/pdf/1810.08744.pdf</a>`,
},
modelSquare: {
llmHeader: 'Document dialogue experience',
chatGlm_intro: 'is an open source conversational language model that supports Chinese and English bilingualism, provided by Zhipu AI.',
llama2:'is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format.',
dialogtips1: 'Hello 👋! Welcome to experience the large model knowledge base Q&A',
dialogtips21: 'This experience is based on',
dialogtips22: 'language model and m3-base vector model',
dialogtips3: 'Please choose to talk to the model directly or ask questions based on the local knowledge base on the right.',
dialogtips4: 'In the knowledge base Q&A mode, after selecting the name of the knowledge base, you can start Q&A.',
dialogtips5: 'If necessary, you can upload files/folders to the knowledge base or delete files from the knowledge base after selecting the knowledge base name.',
dialogtips6: 'The model has been successfully loaded, you can start the conversation, or select the mode from the right to start the conversation',
promptPlaceholder: 'Please input the question content (Ctrl+Enter=line feed, press Enter to submit)',
dialogModeSelect: 'Please select the dialogue mode',
dialogLLM: 'LLM dialogue',
dialogKb: 'Knowledge Base Q&A',
configKb: 'Configure Knowledge Base',
updatekb: 'Update existing knowledge base options',
recreateKb: 'Vector library reconstruction in progress, please be patient and do not refresh or close the webpage',
selectKb: 'Please select the knowledge base to load:',
createKb: 'Create knowledge base',
deleteKb: 'Delete knowledge base',
deleteKbTips: 'Are you sure to delete the {knowledgeValue} knowledge base?',
deleteVbTips: 'Are you sure to delete the knowledge base files?',
uploadFile: 'Upload files',
uploadFileTips1: 'Drag the file here, or click <em>to upload</em>',
uploadFileTips2:'Single file upload size limit is 1MB • HTML, MD, JSON, CSV, TXT, XML, DOCX',
addFileToKb: 'Add files to the knowledge base',
manageFile: 'Manage files',
deleteKbFileSelect: 'Please select the file to delete from the existing files in the knowledge base',
deleteKbFile: 'Delete files from the knowledge base',
recreateKbSuccess: '{knowledgeValue} vector library reconstruction successful',
noPermission: 'You do not have permission to operate',
fileExit: 'File already exists',
fileExceed: 'Files exceeding 1MB',
fileError: 'File error, please re-upload!',
fileTypeError:'File type error, please re-upload!',
fileUploadSuccess:'{fileName} File uploaded successfully!',
kbName: 'Knowledge Base Name',
createKbPlaceholder: 'The new knowledge base name can only be numbers and letters',
vectorType: 'Vector Library Type',
embedModel: 'Embedding Model',
cancel: 'Cancel',
create: 'Create',
ok: 'Ok',
kbNameDetect1: 'Name cannot be empty',
kbNameDetect2: 'The name can only be numbers and letters',
chatExceedCount: 'If the usage limit is exceeded, you will not be able to experience the model service!',
useNotice: 'User experience instructions',
agreeNotice: 'Agree to <a href="/home/model_privacy" target="_blank"> 《OpenI Qizhi Community AI Collaboration Platform Disclaimer and Service Usage Specification》</a> <p style="text-align: center;margin-top: 1rem;color: red;">Kind reminder: Unreasonable use may result in account closure</p>',
modelProvide: '<a href="/home/model_privacy" target="_blank"> 《Disclaimer and Service Usage Specifications》</a> This model Running on <a href="https://ai.blsc.cn/" target= "_blank">Beijing Super Cloud Computing Center</a>',
maxTries: 'Limited experience {maxTries} times',
modelChatTask: 'Create a model online experience task',
createChatTips1: 'Click the button below to create an online experience task. After successful creation, you can experience {expireMinutes} minutes online',
createChatTips2: 'The online experience tasks created are exclusive to your account only.',
createChatTips3: 'You have already created an online experience task for the current model. Click the button below to directly enter the experience interface',
createChatBtns1: 'Create an online experience task',
createChatBtns2: 'online experience',
experienceTime: 'Experience Countdown:',
uploadFIleLimit: 'Upload up to 10 files',
inputNotEmpty: 'The input content cannot be empty!',
sessionChating: 'Session loading, don not be impatient!',
chatBanned: 'Your account has been banned, please contact the website administrator',
chatIllegal: "I'm very sorry, as an artificial intelligence assistant I can only provide objective information. Do you have anything else to ask?",
chatExpireMins: 'If the experience time exceeds {expireMinutes} minutes, the online experience task needs to be re-created.',
chatExpired: 'Chat session expired, please create a new chat.',
}
}



+ 69
- 2
web_src/vuepages/langs/config/zh-CN.js View File

@@ -354,6 +354,7 @@ const zh = {
modelAccess: '模型权限',
modelAccessPublic: '公开',
modelAccessPrivate: '私有',
modelAccessTips: '公开项目才可以设置模型为公开',
modelSettings: '模型信息设置',
edit: '编辑',
editFiles: '编辑文件',
@@ -577,7 +578,7 @@ const zh = {
allNetworkType: '全部网络',
noInternet: '否',
hasInternet: '是',
networkTypeDesc: '「访问Internet」是描述您的任务分配到的计算资源中心是否支持互联网访问情况',
networkTypeDesc: '「访问Internet」是描述您的任务分配到的计算资源中心是否支持互联网访问情况。选择“否”时可分配的计算资源中心更多,且依然可以使用启智平台上的数据集和模型。',
resourceSpec: '资源规格',
specPlaceholder: '请选择资源规格',
specDescr: '资源说明',
@@ -690,8 +691,74 @@ const zh = {
},
superComputeObj: {
mmlSparkDescr: `MMLSpark全称为Microsoft Machine Learning for Apache Spark,支持用户运行自制容器镜像,且赋予了用户容器内root权限。用户可直接使用平台提供的微软的MMLSpark。\n注:MMLSpark是微软提供针对机器学习环境的Spark版本(<a target="_blank" href="https://github.com/Azure/mmlspark">https://github.com/Azure/mmlspark</a>),关于mmlspark可参考论文:<a target="_blank" href="https://arxiv.org/pdf/1810.08744.pdf">https://arxiv.org/pdf/1810.08744.pdf</a>`,
},
modelSquare: {
llmHeader: '文档对话体验',
chatGlm_intro: '是一个开源的、支持中英双语的对话语言模型,由智谱AI提供。',
llama2: 'is a collection of pretrained and fine-tuned generative text models ranging in scale from 7 billion to 70 billion parameters. This is the repository for the 7B fine-tuned model, optimized for dialogue use cases and converted for the Hugging Face Transformers format.',
dialogtips1: '你好👋!欢迎体验大模型知识库问答',
dialogtips21: '此体验基于',
dialogtips22: '语言模型与m3-base向量模型',
dialogtips3: '请在右侧选择直接与模型对话或基于本地知识库问答',
dialogtips4: '知识库问答模式,选择知识库名称后,即可开始问答。',
dialogtips5: '如有需要可以在选择知识库名称后上传文件/文件夹至知识库,或从知识库中删除文件。',
dialogtips6: '模型已成功加载,可以开始对话,或从右侧选择模式后开始对话',
promptPlaceholder: '请输入提问内容...(Ctrl + Enter = 换行, 按回车键进行提交)',
dialogModeSelect: '请选择使用模式',
dialogLLM: 'LLM 对话',
dialogKb: '知识库问答',
configKb: '配置知识库',
updatekb: '更新已有知识库选项',
recreateKb: '向量库重构中,请耐心等待,请勿刷新或关闭网页',
selectKb: '请选择要加载的知识库:',
createKb: '新建知识库',
deleteKb: '删除本知识库',
deleteKbTips: '确定删除{knowledgeValue}知识库吗?',
deleteVbTips: '确定删除已选择的知识库文件吗?',
uploadFile: '上传文件',
uploadFileTips1: '将文件拖到此处,或<em>点击上传</em>',
uploadFileTips2: '单个文件上传大小限制为1MB • HTML, MD, JSON, CSV, TXT, XML, DOCX',
addFileToKb: '添加文件到知识库',
manageFile: '管理文件',
deleteKbFileSelect: '请从知识库已有文件中选择要删除的文件',
deleteKbFile: '从知识库中删除文件',
recreateKbSuccess: '{knowledgeValue}向量库重构成功',
noPermission: '您没有权限操作',
fileExit: '文件已经存在',
fileExceed: '文件超过1MB',
fileError: '文件错误,请重新上传!',
fileTypeError:'文类型件错误,请重新上传!',
fileUploadSuccess: '{fileName}文件上传成功!',
kbName: '知识库名称',
createKbPlaceholder: '新知识库名称只能是数字和字母',
vectorType: '向量库类型',
embedModel: 'Embedding 模型',
cancel: '取 消',
create: '新 建',
ok: '确 定',
kbNameDetect1: '名称不能为空',
kbNameDetect2: '名称只能是数字和字母',
chatExceedCount: '超出使用次数,无法体验模型服务!',
useNotice: '使用体验须知',
agreeNotice: '同意 <a href="/home/model_privacy" target="_blank"> 《OpenI启智社区AI协作平台免责声明和服务使用规范》 </a>中所述内容 <p style="text-align: center;margin-top: 1rem;color: red;">温馨提示:不合理使用可能会被封号!</p>',
modelProvide: '<a href="/home/model_privacy" target="_blank">《免责声明和服务使用规范》</a>本模型体验由<a href="https://ai.blsc.cn/" target="_blank"> 北京超级云计算中心 </a>提供算力支持',
maxTries: '限量体验{maxTries}次',
modelChatTask: '创建模型在线体验任务',
createChatTips1: '单击下方按钮创建在线体验任务,创建成功后可在线体验{expireMinutes}分钟',
createChatTips2: '本人创建的在线体验任务只限本人体验',
createChatTips3: '您已经创建过当前模型的在线体验任务,点击下方按钮直接进入体验界面',
createChatBtns1: '创建在线体验任务',
createChatBtns2: '在线体验',
experienceTime: '体验倒计时:',
uploadFIleLimit: '最多上传10个文件',
inputNotEmpty: '输入内容不能为空!',
sessionChating: '会话加载中,不要心急哦!',
chatBanned: '您的帐户被禁止登录,请与网站管理员联系',
chatIllegal: '非常抱歉,作为一个人工智能助手我只能提供客观的信息。您还有什么要问的吗?',
chatExpireMins: '体验时间超过{expireMinutes}分钟,需重新创建在线体验任务。',
chatExpired: '对话已过期,请重新创建对话',
}

}

export default zh;

+ 92
- 92
web_src/vuepages/pages/cloudbrain/configs.js View File

@@ -579,14 +579,14 @@ export const DetailPageConfigs = {
fields: [
'taskName', 'imagev1',
'status', 'spec',
'creator', 'modelName',
'branch', 'modelVersion',
'computerRes', 'modelFiles',
'createTime', 'codePath',
'startTime', 'datasetPath',
'endTime', 'modelPath',
'duration', 'outputPath',
'descr', '',
'creator', 'hasInternet',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'codePath',
'endTime', 'datasetPath',
'duration', 'modelPath',
'descr', 'outputPath',
'failedReason',
'dataset',
'modelList',
@@ -604,10 +604,10 @@ export const DetailPageConfigs = {
fields: [
'taskName', 'imagev2',
'status', 'spec',
'creator', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', '',
'creator', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', '',
'duration', '',
'descr', '',
@@ -631,13 +631,13 @@ export const DetailPageConfigs = {
'taskName', 'imagev1',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'codePath',
'endTime', 'datasetPath',
'duration', 'modelPath',
'descr', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', 'codePath',
'duration', 'datasetPath',
'descr', 'modelPath',
'failedReason',
'dataset',
'modelList',
@@ -656,11 +656,11 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'codeObsPath',
'endTime', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', 'codeObsPath',
'duration', '',
'descr', '',
'failedReason',
@@ -683,10 +683,10 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', '',
'duration', '',
'descr', '',
@@ -708,10 +708,10 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', '',
'duration', '',
'descr', '',
@@ -733,10 +733,10 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', '',
'duration', '',
'descr', '',
@@ -758,10 +758,10 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', '',
'duration', '',
'descr', '',
@@ -783,10 +783,10 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'createTime', 'modelVersion',
'startTime', 'modelFiles',
'endTime', '',
'duration', '',
'descr', '',
@@ -814,12 +814,12 @@ export const DetailPageConfigs = {
fields: [
'taskName', 'imagev1',
'status', 'spec',
'creator', 'modelName',
'branch', 'modelVersion',
'computerRes', 'modelFiles',
'createTime', 'bootFile',
'startTime', 'runParameters',
'endTime', '',
'creator', 'hasInternet',
'branch', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'bootFile',
'endTime', 'runParameters',
'duration', '',
'descr', '',
'failedReason',
@@ -843,13 +843,13 @@ export const DetailPageConfigs = {
fields: [
'taskName', 'imagev2',
'status', 'spec',
'creator', 'modelName',
'branch', 'modelVersion',
'computerRes', 'modelFiles',
'runVersion', 'bootFile',
'createTime', 'runParameters',
'startTime', 'workServerNum',
'endTime', '',
'creator', 'hasInternet',
'branch', 'modelName',
'computerRes', 'modelVersion',
'runVersion', 'modelFiles',
'createTime', 'bootFile',
'startTime', 'runParameters',
'endTime', 'workServerNum',
'duration', '',
'descr', '',
'failedReason',
@@ -878,13 +878,13 @@ export const DetailPageConfigs = {
'taskName', 'imagev1',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'runVersion', 'modelFiles',
'createTime', 'bootFile',
'startTime', 'runParameters',
'endTime', 'workServerNum',
'duration', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'runVersion', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'bootFile',
'endTime', 'runParameters',
'duration', 'workServerNum',
'descr', '',
'failedReason',
'dataset',
@@ -913,13 +913,13 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'runVersion', 'modelFiles',
'createTime', 'bootFile',
'startTime', 'runParameters',
'endTime', 'workServerNum',
'duration', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'runVersion', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'bootFile',
'endTime', 'runParameters',
'duration', 'workServerNum',
'descr', '',
'failedReason',
'dataset',
@@ -948,13 +948,13 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'runVersion', 'modelFiles',
'createTime', 'bootFile',
'startTime', 'runParameters',
'endTime', 'workServerNum',
'duration', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'runVersion', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'bootFile',
'endTime', 'runParameters',
'duration', 'workServerNum',
'descr', '',
'failedReason',
'dataset',
@@ -983,13 +983,13 @@ export const DetailPageConfigs = {
'taskName', 'imagev2',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'computerRes', 'modelVersion',
'runVersion', 'modelFiles',
'createTime', 'bootFile',
'startTime', 'runParameters',
'endTime', 'workServerNum',
'duration', '',
'branch', 'hasInternet',
'computerRes', 'modelName',
'runVersion', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'bootFile',
'endTime', 'runParameters',
'duration', 'workServerNum',
'descr', '',
'failedReason',
'dataset',
@@ -1110,13 +1110,13 @@ export const DetailPageConfigs = {
'taskName', 'imagev1',
'status', 'spec',
'creator', 'aiCenter',
'branch', 'modelName',
'bootFile', 'modelVersion',
'computerRes', 'modelFiles',
'createTime', 'codePath',
'startTime', 'datasetPath',
'endTime', 'modelPath',
'duration', '',
'branch', 'hasInternet',
'bootFile', 'modelName',
'computerRes', 'modelVersion',
'createTime', 'modelFiles',
'startTime', 'codePath',
'endTime', 'datasetPath',
'duration', 'modelPath',
'descr', '',
'failedReason',
'dataset',


+ 13
- 5
web_src/vuepages/pages/cloudbrain/create/index.vue View File

@@ -329,12 +329,20 @@ export default {
this.state.branchName = '';
}
}
if (this.formCfg['spec'] && task.spec) {
if (this.formCfg['spec']) {
let networkType = 'all';
if (task.spec.has_internet == 1) {
networkType = 'no_internet';
} else if (task.spec.has_internet == 2) {
networkType = 'has_internet'
if (task.has_internet == 1 || task.has_internet == 2) {
if (task.has_internet == 1) {
networkType = 'no_internet';
} else if (task.has_internet == 2) {
networkType = 'has_internet'
}
} else if (task.spec) {
if (task.spec.has_internet == 1) {
networkType = 'no_internet';
} else if (task.spec.has_internet == 2) {
networkType = 'has_internet'
}
}
this.state.networkType = networkType;
this.$nextTick(() => { this.state.spec = ''; });


+ 246
- 0
web_src/vuepages/pages/model/llms/App.vue View File

@@ -0,0 +1,246 @@
<template>
<div class="ui container" style="margin-top: 2rem;margin-bottom:-40px" id="dialog">
<headerModel :modelName="modelName" :minutes="minutes" :seconds="seconds"/>
<div class="model-dialog-wrapper">
<dialogLeft :pattern="pattern" :kbName="kbName" :modelName="modelName" :maxlength="maxlength" :counts="counts" :maxTrie="maxTrie" :expireMinutes="expireMinutes"></dialogLeft>
<dialogRight @radioChange="radioChange" @changeKbName="changeKbName" :commonKB="commonKB" :modelName="modelName"></dialogRight>
</div>
<div class="model-dialog-footer" :style="{marginTop:`${height}rem`}">
<span class="text" v-html="$t('modelSquare.modelProvide')">
</span>
</div>
<el-dialog
style="border-radius:2rem;margin-top: 20vh;"
:visible.sync="dialogVisible"
:title="$t('modelSquare.useNotice')"
width="30%"
center
:show-close="false"
:before-close="handleClose">
<span v-html="$t('modelSquare.agreeNotice')"></span>
<span slot="footer" class="dialog-footer">
<el-button @click="cancel" size="mini">{{$t('modelSquare.cancel')}}</el-button>
<el-button type="primary" @click="confirm" size="mini">{{$t('modelSquare.ok')}}</el-button>
</span>
</el-dialog>
</div>
</template>

<script>
import { llmAgree} from '~/apis/modules/llmchat';
import headerModel from './componenes/headerModel.vue'
import dialogLeft from './componenes/dialogLeft.vue'
import dialogRight from './componenes/dialogRight.vue'
export default {
name: "App",
components: { headerModel,dialogLeft,dialogRight },
data(){
return{
pattern:'1',
kbName:'',
modelName:'',
commonKB:'',
dialogVisible:false,
maxlength:0,
height:5,
minutes:"NA",
seconds:"NA",
counts:1000,
maxTrie:0,
expireMinutes:30
}
},
methods: {
handleClose(){

},
cancel(){
this.dialogVisible = false
history.back()
},
confirm(){
llmAgree({model_name:this.modelName}).then((res)=>{
this.dialogVisible = false
}).catch((err)=>{
this.$message.error(err.msg)
})
},
radioChange(val){
if(val==='1'){
this.height = 5
}else{
this.height = 3
}
this.pattern = val
},
changeKbName(val){
this.kbName = val
},
durationFormatter(gap) {
const second = 1000
const minute = second * 60
const hour = minute * 60
let m = Math.floor((gap % hour) / minute)
let s = Math.floor((gap % minute) / second)
this.minutes = this.addZero(m)
this.seconds = this.addZero(s)

},
addZero(num) {
return num < 10 ? '0' + num : '' + num
},
countDown(duration){
const totalDuration = duration;
let requestRef = null;
let startTime;
let prevEndTime;
let prevTime;
let currentCount = totalDuration;
let endTime;
let timeDifferance = 0; // 每1s倒计时偏差值,单位ms
let interval = 1000;
let nextTime = interval;

const animate = (timestamp) => {
if (prevTime !== undefined) {
const deltaTime = timestamp - prevTime;
if (deltaTime >= nextTime) {
prevTime = timestamp;
prevEndTime = endTime;
endTime = new Date().getTime();
currentCount = currentCount - 1000;
this.durationFormatter(currentCount)
timeDifferance = endTime - startTime - (totalDuration - currentCount);
nextTime = interval - timeDifferance;
// 慢太多了,就立刻执行下一个循环
if (nextTime < 0) {
nextTime = 0;
}
if (currentCount <= 0) {
currentCount = 0;
cancelAnimationFrame(requestRef);
return;
}
}
} else {
startTime = new Date().getTime();
prevTime = timestamp;
endTime = new Date().getTime();
}
requestRef = requestAnimationFrame(animate);
};

requestRef = requestAnimationFrame(animate);
},
addWaterMarker(name,str){
let that = this
let can = document.createElement('canvas')
let container = document.querySelector('#dialog')
container.appendChild(can)
can.width = 180
can.height = 100
can.style.display = 'none'
let cans = can.getContext('2d')
cans.rotate(-20 * Math.PI / 180)
cans.font = 'normal 12px Microsoft Jhenghei'
cans.fillStyle = 'rgba(223,223,223,1)'
cans.textAlign = 'center'
cans.textBaseline = 'Middle'
cans.fillText(name, can.width / 3 , can.height / 2)
cans.fillText(str, can.width / 2.7, can.height / 1.6)
const base64Url = can.toDataURL();
const watermarkNode = document.querySelector(".watermarkNode");
const watermarkDiv = watermarkNode || document.createElement("div");
const styleStr = `position:absolute;
opacity:0.5;
top:0;
left:0;
width:100%;
height:100%;
z-index:1000;
pointer-events:none;
background-repeat:repeat;
background-image:url('${base64Url}')`
watermarkDiv.setAttribute("style", styleStr);
watermarkDiv.classList.add("watermarkNode");
if(!watermarkNode){
container.style.position = 'relative';
container.insertBefore(watermarkDiv, container.firstChild);
}
if (MutationObserver) {
let MOInstance = new MutationObserver(function () {
const watermarkNode = document.querySelector(".watermarkNode");
// 只在watermarkNode元素变动才重新调用 createWatermark
if (
!watermarkNode ||
(watermarkNode && watermarkNode.getAttribute("style") !== styleStr)
) {
// 避免一直触发
MOInstance.disconnect();
// 重新创建水印
that.addWaterMarker(document.querySelector('meta[name="_uid"]').getAttribute('content-ext'),'AI生成内容仅供参考');
}
});
MOInstance.observe(container, {
attributes: true,
subtree: true,
childList: true,
});
}
},
},
async mounted() {
const urlParams = new URLSearchParams(location.search)
if(urlParams.has('model_name')){
this.modelName = urlParams.get('model_name')
this.modelName==='chatglm2-6b' ? this.maxlength=2000 : this.maxlength=1000
}
const userName = document.querySelector('meta[name="_uid"]').getAttribute('content-ext')
if(window.config.csrf !== '' && userName !==''){
this.addWaterMarker(userName,'AI生成内容仅供参考')
}
this.commonKB = document.getElementById('dialog-setting').getAttribute('data-common-kb')
const firstVisit = document.getElementById('dialog-setting').getAttribute('data-first-visit')
const createdUnix = +document.getElementById('dialog-setting').getAttribute('data-create-unix')
const endUnix = +document.getElementById('dialog-setting').getAttribute('data-end-unix')
this.counts = +document.getElementById('dialog-setting').getAttribute('data-counts')
this.maxTrie = +document.getElementById('dialog-setting').getAttribute('data-max-tries')
this.expireMinutes = +document.getElementById('dialog-setting').getAttribute('data-expire-minutes')
this.countDown((endUnix - createdUnix)*1000)
if(firstVisit=='true'){
this.dialogVisible = true
}
}
};
</script>
<style lang="less" scoped>
.model-dialog-wrapper{
display: flex;
justify-content: space-between;
}
.model-dialog-footer{
align-items: center;
display: flex;
flex-direction: column;
padding: 10px 0;
margin-top: 5rem;
.text{
font-family: PingFangSC-Regular;
font-size: 12px;
font-weight: 400;
letter-spacing: 0;
line-height: 20px;
text-align: center;
color: rgba(103,104,144,.6);
}
}
</style>

+ 96
- 0
web_src/vuepages/pages/model/llms/componenes/SessionWindow.vue View File

@@ -0,0 +1,96 @@
<template>
<div style="width: 100%;height: 100%;position: relative">
<div class="session-window" ref="sessionWindow">
<div class="chat-content" ref="chatContent">
<div v-for="(item,index) in sessionRecordData" :key="index" style="display: flex;flex-direction: column;margin: 1rem;">
<WindowAssistant v-if="item.role !== 'user'" :content="item.content" :docs="item.hasOwnProperty('docs')?item.docs:[]"></WindowAssistant>
<WindowUser v-else :content="item.content"></WindowUser>
</div>
<!-- <div class="spacer"></div> -->
</div>
</div>
</div>

</template>
<script>
import WindowAssistant from "./WindowAssistant.vue";
import WindowUser from "./WindowUser.vue";

export default {
name: "SessionWindow",
components: {
WindowUser,
WindowAssistant
},
props: {
sessionData: {
type: Array,
default: []
},
},
data() {
return {
sessionRecordData:[],

}
},
watch: {
},
created() {
},
mounted() {
},
methods: {
setSessionRecord(val) {
this.sessionRecordData = val;
},
},
}
</script>



<style lang="less" scoped>
.session-window {
width: 100%;
height: 100%;
background-size: 100% 100%;
box-sizing: border-box;
display: flex;
flex-wrap: wrap;
justify-content: flex-start;
flex-direction: column;
}

.chat-content {
width: 100%;
height: 100%;
max-height: 100%;
overflow-y: auto;
box-sizing: border-box;
.chat-main-content {
width: 100%;
display: flex;
}

li {
list-style: none;
height: auto;
width: 100%;
margin-bottom: 20px;
display: flex;
}

}


.spacer {
height: 20px;
}

::v-deep .markdown-body {
font-size: 15px;
}
</style>

+ 118
- 0
web_src/vuepages/pages/model/llms/componenes/WindowAssistant.vue View File

@@ -0,0 +1,118 @@
<template>

<div class="chat-main-content">
<div class="chat-text">
<div class="message-content" style="display: flex;">
<!-- <img src="/img/favicon.png" alt="" style="margin-right: 0.5rem;"/> -->
<div class="file-view markdown markdown" style="font-size: 14px;" v-html="htmlContent" />
</div>
</div>
<el-collapse v-model="activeNames" v-if="kbDocsList.length!==0" class="chat-collapse">
<el-collapse-item>
<template slot="title">
<div>知识库匹配结果</div>
</template>
<div>
<div class="file-view markdown markdown" style="font-size: 14px;padding:1rem 0" v-for="item in kbDocsList" v-html="item"/>
</div>
</el-collapse-item>
</el-collapse>
</div>

</template>

<script>

import MarkdownIt from "markdown-it";

export default {
name: "WindowAssistant",
props: {
content: { type: String, required: true },
docs:{type:Array},
},
data(){
return{
htmlContent: "",
activeNames:[],
kbDocsList:[]
}
},
watch: {
content:{
deep: true,
handler(val){
const md = new MarkdownIt();
this.htmlContent = md.render(val)
}
},
docs:{
deep: true,
handler(val){
if(val.length===0) return
const md = new MarkdownIt();
val[0].forEach((item)=>{
this.kbDocsList.push(md.render(item))
})

}
}
},
methods:{
},
mounted(){
}
}
</script>

<style scoped lang="less">

.chat-main-content {
display: flex;
width: 96%;
border-color:#e5e7eb;
background:#f9fafb;
padding-left: 22px !important;
border-width: 1px;
border-radius: 22px;
padding: 16px;
color:#1f2937 ;
overflow-wrap: break-word;
border-bottom-left-radius: 0 !important;
flex-direction: column;
img{
width: 28px;
height: 28px;
margin-right: 1rem;
}
}
.chat-collapse{
margin-left: 3rem;margin-top: 1rem;
border-left: 1px solid #EBEEF5;
border-right: 1px solid #EBEEF5;
padding: 0 1rem;
border-radius: 1rem;
}
/deep/ .el-collapse-item__header{
background-color: transparent;
}
/deep/ .el-collapse-item__wrap{
background-color: transparent;
}

.file-view{
border-bottom:1px solid rgba(1, 145, 255, 0.1);
&:first-child{
padding-bottom: 1rem;
}
&:last-child{
padding-bottom: 0;
border: none;
}
}
</style>

+ 39
- 0
web_src/vuepages/pages/model/llms/componenes/WindowUser.vue View File

@@ -0,0 +1,39 @@
<template>
<div class="chat-me" style="border-bottom-right-radius: 0px">
<span><p>{{ content }}</p></span>
</div>
</template>

<script>
export default {
name: "WindowUser",
props: {
content: { type: String, required: true },
},
data(){
return{
userImgHead: ''
}
},
mounted() {
},
}
</script>

<style scoped lang="less">
.chat-me{
border-color:rgb(99, 102, 241);
background-color:#ebebfb;
border-width: 1px;
align-self: flex-end;
border-radius: 22px;
padding: 14px;
color:#1f2937;
font-size:16px;
overflow-wrap: break-word;
border-style: solid;
box-sizing: border-box;
width:96%;
font-size: 14px;
}
</style>

+ 119
- 0
web_src/vuepages/pages/model/llms/componenes/createKbDialog.vue View File

@@ -0,0 +1,119 @@
<template >
<div>
<el-dialog :title="$t('modelSquare.createKb')" :visible.sync="visible" width="30%" @closed="closed" :show-close="false" :close-on-press-escape="false" :close-on-click-modal="false">
<el-form :model="form" :label-width="formLabelWidth" ref="ruleForm" :rules="rules" style="margin-right: 60px;">
<el-form-item prop="knowledge_base_name" :label="$t('modelSquare.kbName')">
<el-input v-model="form.knowledge_base_name" autocomplete="off" :placeholder="$t('modelSquare.createKbPlaceholder')"></el-input>
</el-form-item>
<el-form-item :label="$t('modelSquare.vectorType')">
<el-select v-model="form.vector_store_type" style="width:100%">
<el-option label="faiss" value="faiss"></el-option>
</el-select>
</el-form-item>
<el-form-item :label="$t('modelSquare.embedModel')">
<el-select v-model="form.embed_model" style="width:100%">
<el-option label="m3e-base" value="m3e-base"></el-option>
</el-select>
</el-form-item>
</el-form>
<div slot="footer" class="dialog-footer">
<el-button @click="cancel" size="small">{{$t('modelSquare.cancel')}}</el-button>
<el-button type="primary" @click="submitForm('ruleForm')" size="small">{{$t('modelSquare.create')}}</el-button>
</div>
</el-dialog>
</div>
</template>
<script>
import { llmKbcreate } from '~/apis/modules/llmchat';
export default {
props:{
dialogVisible:{type:Boolean,required:true,default:false},
modelName:{type:String,default:''},
},
data() {
var checkName = (rule, value, callback) => {
if (value === '') {
callback(new Error(this.$t('modelSquare.kbNameDetect1')));
}else if(!(/^[0-9a-zA-Z]+$/g.test(value))){
callback(new Error(this.$t('modelSquare.kbNameDetect2')));
}else{
callback();
}//[\u4e00-\u9fa5]
};
return {
visible:false,
knowledgeValue:'',
form:{
knowledge_base_name:'',
vector_store_type:'faiss',
embed_model:'m3e-base'
},
formLabelWidth: '160px',
rules: {
knowledge_base_name:[{ validator: checkName, trigger: 'blur' }]
},
loading:true
};
},
watch:{
dialogVisible:{
handler(newVal,oldVal){
this.visible = newVal
},
deep:true,
immediate:true
}
},
methods:{
checkData(rule,value,callback){
if(value){
if(/[\u4e00-\u9fa5]/g.test(value)){
callback(new Error(this.$t('modelSquare.kbNameDetect2')))
}else{
callback()
}
}
callback()
},
submitForm(formName) {
this.$refs[formName].validate((valid) => {
if (valid) {
const loading = this.$loading({target:'.el-dialog',lock:true})
llmKbcreate(this.form,{model_name:this.modelName}).then((res)=>{
if(res.data.code===200){
loading.close()
this.visible = false
this.$message({
type: 'success',
message: res.data.msg,
});
this.$emit('refresh')
}else{
loading.close()
this.$message({
type: 'error',
message: res.data.msg,
});
}
}).catch((err)=>{
this.$message({
type: 'error',
message: err.message,
});
loading.close()
})
} else {
return false;
}
});
},
closed(){
this.form = {knowledge_base_name:'',vector_store_type:'faiss',embed_model:'m3e-base',}
this.$emit("close");
},
cancel(){
this.visible = false
}
}
}
</script>

+ 389
- 0
web_src/vuepages/pages/model/llms/componenes/dialogLeft.vue View File

@@ -0,0 +1,389 @@
<template lang="">
<div class="model-dialog-left">
<div class="model-dialog-text" @scroll="onScroll" ref="chatContainer" :class="dialogHeight == true ? 'dialog-height-llm':'dialog-height-kb'">
<div class="model-dialog-title" style="margin-bottom: 1.5rem;">
<div class="text">
<p>{{$t('modelSquare.dialogtips1')}}</p>
<p>{{$t('modelSquare.dialogtips21')}} {{modelName}} {{$t('modelSquare.dialogtips22')}}</p>
<p>{{$t('modelSquare.dialogtips3')}}</p>
<p>{{$t('modelSquare.dialogtips4')}}</p>
<p>{{$t('modelSquare.dialogtips5')}}</p>
</div>
</div>
<div class="model-dialog-title">
<div class="text">
<p>{{$t('modelSquare.dialogtips6')}}</p>
</div>
</div>
<div style="width:100%; position: relative;" >
<SessionWindow
ref="sessionWindow"
:session-data="sessionData"
></SessionWindow>
</div>
</div>
<div class="model-dialog-input">
<el-input
ref="inputRef"
v-model:value="prompt"
type="textarea"
:placeholder="placeholder"
:maxlength="maxlength"
:autosize="{ minRows: 1, maxRows: 4}"
:show-word-limit="showLimit"
@input="inputPrompt"
@keydown.enter.native="carriageReturn($event)"/>
<div class="chat-count">
<span>{{countsRatio}}</span>
<span style="color: #ff5e00;">{{$t('modelSquare.maxTries',{maxTries:maxTrie})}}</span>
</div>
</div>
</div>
</template>
<script>
import { llmChat,llmKbChat,llmCount} from '~/apis/modules/llmchat';
import SessionWindow from './SessionWindow.vue'
export default {
name: "dialogLeft",
props: {
pattern:{type:String,required:true,default:'1'},
kbName:{type:String,default:''},
modelName:{type:String,default:''},
maxlength:{type:Number,default:0},
counts:{type:Number,default:0},
maxTrie:{type:Number,default:0},
expireMinutes:{type:Number,default:30}
},
components: { SessionWindow,},
data() {
return {
prompt:'',
placeholder:this.$t('modelSquare.promptPlaceholder'),
sessionData:[],
history:[],
sessionRecordData:[],
isAutoScroll:true,
countsRatio:0,
elementHeight:0,
chatFlag:false,
kbChatFlag:false,
showLimit:false,
dialogHeight:true,
maxTries:0,
count:0
};
},
watch:{
sessionRecordData(val){
if (val != null){
this.$refs.sessionWindow.setSessionRecord(val)
this.$nextTick(() => {
this.scrollBottom();
})
}
},
counts(){
this.countsRatio = `${this.counts}/${this.maxTrie}`
},
pattern(val){
if(val==='2'){
this.dialogHeight = false
}else{
this.dialogHeight = true
}
}
},
methods:{
inputPrompt(value){
if(value.length>=this.maxlength){
this.showLimit = true
}else{
this.showLimit = false
}
},
onScroll() {
const scrollDom = this.$refs.chatContainer;
const scrollTop = scrollDom.scrollTop;
const offsetHeight = scrollDom.offsetHeight;
const scrollHeight = scrollDom.scrollHeight;
// 当滚动到底部,设置 isAutoScroll 为 true
if (scrollTop + offsetHeight >= scrollHeight) {
this.isAutoScroll = true;
} else {
// 否则,用户正在手动滑动,设置为 false,停止自动滚动
this.isAutoScroll = false;
}
},
/**
* 获取窗口高度并滚动至最底层
*/
scrollBottom() {
this.$nextTick(() => {
if (!this.isAutoScroll) return;
const scrollDom = this.$refs.chatContainer;
scrollDom.scrollTop = scrollDom.scrollHeight;
// animation(scrollDom, scrollDom.scrollHeight);
})
},
carriageReturn(event){
event.preventDefault()
if(event.ctrlKey && event.keyCode ==13){
this.prompt = this.prompt + '\n'
}else{
this.sendInputMessage()
}
},
sendInputMessage(){
const re = new RegExp("^[ ]+$")
if(!this.prompt || re.test(this.prompt)){
this.$message.error(this.$t('modelSquare.inputNotEmpty'))
return
}
if(this.chatFlag || this.kbChatFlag){
this.$message.error(this.$t('modelSquare.sessionChating'))
return
}
let query = this.prompt.trim()
this.prompt = ''
this.sessionRecordData.push({"role":"user","content":query})
if(this.pattern === '1'){
this.sessionRecordData.push({"role": "assistant","content": ''})
let data = {
"query":query,
"stream": true,
"model_name": this.modelName,
history:this.history
}
llmCount({'model_name':this.modelName}).then((res)=>{
this.countsRatio = `${res.data.counts}/${res.data.max_tries}`
this.maxTries = res.data.max_tries
this.count = res.data.counts
if(res.status===200 && res.data.can_chat===true){
this.chat(data)
}else{
this.sessionRecordData.pop()
this.sessionRecordData.push({"role": "assistant","content": this.$t('modelSquare.chatExceedCount')})
}
})
}else{
if(!this.kbName){
this.$message.error(this.$t('modelSquare.chatExpired'))
return
}
this.sessionRecordData.push({"role": "assistant","content": '',"docs":[]})
let data = {
"query":query,
"knowledge_base_name": this.kbName,
"top_k": 5,
"score_threshold": 1,
"stream": true,
"model_name": this.modelName,
history:this.history,
"local_doc_url": false
}
llmCount({'model_name':this.modelName}).then((res)=>{
this.countsRatio = `${res.data.counts}/${res.data.max_tries}`
this.maxTries = res.data.max_tries
this.count = res.data.counts
if(res.status===200 && res.data.can_chat===true){
this.kbChat(data)
}else{
this.sessionRecordData.pop()
this.sessionRecordData.push({"role": "assistant","content": this.$t('modelSquare.chatExceedCount'),"docs":[]})
}
})
}
this.isAutoScroll = true
this.$nextTick(() => {
this.scrollBottom();
})
},
chat(data){
let that = this
this.chatFlag = true
this.countsRatio = `${this.count + 1}/${this.maxTries}`
llmChat(data,this.modelName).then((response)=>{
const reader = response.body.getReader();
const processBinaryData = async () => {
while (true) {
const { done, value } = await reader.read();
if (done) {
that.chatFlag = false
if(new TextDecoder().decode(value)==='<illegal>'){
popData.content = this.$t('modelSquare.chatIllegal')
}
// The entire response has been processed
//this.history.push({"role":"user","content":data.query},{"role": "assistant","content": res.data})
break;
}
// Handle the binary data in the 'value' variable
let chars = new TextDecoder().decode(value)
let popData = that.sessionRecordData[that.sessionRecordData.length - 1];
if(chars==='<expired>'){
popData.content = this.$t('modelSquare.chatExpireMins',{expireMinutes:this.expireMinutes})
this.countsRatio = `${this.count}/${this.maxTries}`
that.chatFlag = false
return
}
if(chars==='<illegal>'){
popData.content = this.$t('modelSquare.chatIllegal')
that.chatFlag = false
return
}
if(chars==='<banned>'){
popData.content = this.$t('modelSquare.chatBanned')
that.chatFlag = false
setTimeout(()=>{
location.reload()
},1000)
return
}
popData.content += chars;
this.scrollBottom();
// You can process the binary data here and update your UI as needed
}
};
// Start processing the binary data
processBinaryData();
}).catch((err)=>{
this.$message({
type: 'error',
message: err.message,
});
})
},
kbChat(data){
let that = this
let docs = []
this.kbChatFlag = true
this.countsRatio = `${this.count + 1}/${this.maxTries}`
llmKbChat(data,this.modelName).then((response)=>{
const reader = response.body.getReader();
const processBinaryData = async () => {
while (true) {
const { done, value } = await reader.read();
if (done) {
that.kbChatFlag = false
//if(popData.docs.length===0){
//popData.docs.push(JSON.parse(chars.split('<end>')[1]).docs)
//}
//const docsArray = JSON.parse(chars.split('<end>')[1])
// The entire response has been processed
break;
}
// Handle the binary data in the 'value' variable
let chars = new TextDecoder().decode(value)
let popData = that.sessionRecordData[that.sessionRecordData.length - 1];
if(chars==='<expired>'){
popData.content = this.$t('modelSquare.chatExpireMins',{expireMinutes:this.expireMinutes})
this.countsRatio = `${this.count}/${this.maxTries}`
that.kbChatFlag = false
return
}
if(chars==='<illegal>'){
popData.content = this.$t('modelSquare.chatIllegal')
that.kbChatFlag = false
return
}
if(chars==='<banned>'){
popData.content = this.$t('modelSquare.chatBanned')
that.kbChatFlag = false
return
setTimeout(()=>{
location.reload()
},1000)
}
if(chars.indexOf('<docs>')!==0){
popData.content += chars;
}else{
if(popData.docs.length===0){
that.kbChatFlag = false
popData.docs.push(JSON.parse(chars.split('<docs>')[1]).docs)
}
}
this.scrollBottom();
// You can process the binary data here and update your UI as needed
}
};
// Start processing the binary data
processBinaryData();
})
/* llmKbChat(data).then((res)=>{
if(res.data){
this.history.push({"role":"user","content":data.query},{"role": "assistant","content": res.data.answer})
this.sessionData.push({"role": "assistant","content": res.data.answer,"docs":res.data.docs})
}
})*/
},
},
mounted() {
}
}
</script>
<style lang="less" scoped>
.model-dialog-left{
width: 70%;
margin-right:3rem;
position: relative;
.dialog-height-llm{
min-height: 60vh;
max-height: 635px;
}
.dialog-height-kb{
max-height: 635px;
height: 100%;
}
.model-dialog-text{
overflow-y: auto;
.model-dialog-title{
border-color: rgb(229, 231, 235);
border-width: 1px;
border-style: solid;
border-radius: 10px 10px 10px 0px;
font-size: 14px;
padding: 20px;
text-align: left;
line-height: 23px;
font-weight: normal;
font-style: normal;
background: rgb(249, 250, 251);
.text{
width: 100%;
word-break: break-word;
word-wrap: break-word;
p{
padding: 0;
margin-bottom: 5px;
white-space: pre-wrap;
&:last-child{
margin-bottom: 0px;
}
}
}
}
}
.model-dialog-input{
position: absolute;
width:100%;
margin-top: 1rem;
.chat-count{
float: right;
margin-top:0.5rem;
}
}
}
/deep/ .el-textarea__inner{
resize: none;
}
</style>

+ 673
- 0
web_src/vuepages/pages/model/llms/componenes/dialogRight.vue View File

@@ -0,0 +1,673 @@
<template>
<div class="model-dialog-right">
<div>
<p class="use-pattern">{{$t('modelSquare.dialogModeSelect')}}</p>
<div class="pattern-wrap">
<el-radio-group v-model="pattern" @change="patternChange">
<el-radio label="1" border>{{$t('modelSquare.dialogLLM')}}</el-radio>
<el-radio label="2" border>{{$t('modelSquare.dialogKb')}}</el-radio>
</el-radio-group>
</div>
<div class="component-2" v-if="pattern==='2'" style="min-height: 560px;">
<div class="label-wrap">
<span class="titlr">{{$t('modelSquare.configKb')}}</span>
<span class="icon-rotate" style="transform: rotate(0deg);">▼</span>
</div>
<div class="updata-knowledge" @click="recreateVe">
<span>{{$t('modelSquare.updatekb')}}</span>
</div>
<div class="recreate-kb" v-if="recreateFlag">
<div class="nowrap">
<i class="el-icon-loading" style="color:rgb(28, 131, 225)"></i>
<span :title="$t('modelSquare.recreateKb')">{{$t('modelSquare.recreateKb')}}</span>
</div>
<div class="nowrap" style="margin-top: 1rem;font-size:12px" :title="recreateVeValue.msg">{{recreateVeValue.msg}}</div>
<el-progress :percentage="recreateVeValue.percentage"></el-progress>

</div>
<div :class="{'disabled': recreateFlag}">
<span style="display: inline-block;color:#101010">{{$t('modelSquare.selectKb')}}</span>
<el-select v-model="knowledgeValue" style="width: 100%;margin-top:12px" @change="changeKbValue" v-loading="loading">
<el-option
v-for="item in KnowledgeBaseList"
:key="item.value"
:label="item.value"
:value="item.value">
</el-option>
</el-select>
</div>
<div class="knowledge-op-btn" :class="{'disabled': recreateFlag}">
<div @click="dialogVisible = true">
<span>{{$t('modelSquare.createKb')}}</span>
</div>
<div>
<el-popconfirm
:title="$t('modelSquare.deleteKbTips',{knowledgeValue})"
@confirm="confirmDel"
>
<span slot="reference" :class="{'disabled':knowledgeValue===commonKB}">{{$t('modelSquare.deleteKb')}}</span>
</el-popconfirm>
</div>
</div>
<el-tabs v-model="activeName" type="border-card" @tab-click="tabClick" :class="{'disabled': recreateFlag}">
<el-tab-pane name="upload" :class="{'disabled':knowledgeValue===commonKB}">
<span slot="label">{{$t('modelSquare.uploadFile')}}</span>
<div class="upload-knowledge-file">
<el-upload
class="upload-file"
drag
action=""
multiple
:show-file-list="false"
:http-request="getUploadFileList"
accept=".html, .md, .json, .csv, .txt, .xml, .docx"
>
<i class="el-icon-upload" style="margin: 1rem 0;"></i>
<div class="el-upload__text" v-html="$t('modelSquare.uploadFileTips1')"></div>
<div class="el-upload__tip" slot="tip">{{$t('modelSquare.uploadFileTips2')}}</div>
</el-upload>
<ul class="upload-list">
<li class="upload-list-item" v-for="(item,index) in files" :key="item.uid">
<a class="upload-item-name" :title="item.name" style="cursor: none;">
<i class="el-icon-document"></i>
{{item.name}}
</a>
<label class="upload-item-status">
<i v-if="filesStatus[index].status===1" style="color: #67C23A;" class="el-icon-circle-check"></i>
<i v-else-if="filesStatus[index].status===2" :title="filesStatus[index].error" style="color: red;" class="el-icon-warning-outline"></i>
<i v-else style="color: #909399;cursor:pointer" class="el-icon-close" @click="delKbFile(index)"></i>
</label>
</li>
</ul>
</div>
<div class="knowledge-op-btn">
<div style="width: 100%;" :class="{disabled:upBtnDisabled}" @click="uploadDocKnowledge">
<span >{{$t('modelSquare.addFileToKb')}}</span>
</div>
</div>
</el-tab-pane>
<el-tab-pane :label="$t('modelSquare.manageFile')" name="manage" :class="{'disabled':knowledgeValue===commonKB}">
<div class="knowledege-detail-file">
<p>{{$t('modelSquare.deleteKbFileSelect')}}</p>
<el-checkbox-group v-model="checkList" size="small" style="max-height: 150px;overflow-y:auto">
<el-checkbox :label="kbFile" v-for="kbFile in kbFileList" :key="kbFile"></el-checkbox>
</el-checkbox-group>

<div class="knowledge-op-btn">
<el-popconfirm :title="$t('modelSquare.deleteVbTips')" @confirm="confirmDelFile" style="width: 100%;">
<div slot="reference" style="width: 100%;" :class="{'disabled':noCheck}">
<span>{{$t('modelSquare.deleteKbFile')}}</span>
</div>
</el-popconfirm>
</div>
</div>
</el-tab-pane>

</el-tabs>
</div>
<createKbDialog :modelName="modelName" :dialogVisible="dialogVisible" @close="dialogVisible=false" @refresh="refresh"></createKbDialog>
</div>
</div>
</template>
<script>
import { llmKbDelete, llmKbDeleteDoc,llmKbList,llmKbUploadDocUrl,llmKbUploadDoc,llmKbFileList,llmRecreateVectorStore} from '~/apis/modules/llmchat';
import createKbDialog from './createKbDialog.vue'
let csrf = window.config ? window.config.csrf : ''
export default {
name: "dialogRight",
props: {
commonKB:{type:String,default:''},
modelName:{type:String,default:''},
},
components: { createKbDialog},
data() {
return {
pattern:'1',
knowledgeValue:'',
dialogVisible:false,
files:[],
form:{
knowledge_base_name:'',
vector_store_type:'faiss',
embed_model:'m3e-base'
},
formLabelWidth: '120px',
KnowledgeBaseList:[],
flag:false,
kbFileList:[],
checkList:[],
uploadFlag:false,
upBtnDisabled:false,
filesStatus:[],
activeName:'upload',
loading:false,
recreateVeValue:{},
recreateFlag:false,
noCheck:true
};
},
watch:{
knowledgeValue(val){
if (val){
this.$emit('changeKbName',val)
}
},
checkList(val){
if(val.length===0){
this.noCheck = true
}else{
this.noCheck = false
}
}
},
methods:{
patternChange(val){
this.$emit('radioChange',val)
if(val==='2'){
this.getKnowledgeBaseList()
}
},
tabClick(tab, event){
if(tab.name ==='manage'){
this.getKbFileDetails()
}
},
confirmDel(){
this.deleteKnowledgeBase()
},
confirmDelFile(){
this.delKbFileVe()
},
recreateVe(){
this.recreateVeValue.percentage = 0
let data = {
knowledge_base_name:this.knowledgeValue,
allow_empty_kb:true,
vs_type:"faiss",
embed_model:"m3e-base",
}
this.recreateFlag = true
llmRecreateVectorStore({knowledge_base_name:this.knowledgeValue,model_name:this.modelName}).then((response)=>{
if(response.status===200){
const reader = response.body.getReader();
const processBinaryData = async () => {
while (true) {
const { done, value } = await reader.read();
if (done) {
// The entire response has been processed
this.recreateFlag = false
this.$message({
type: 'success',
message: this.$t('modelSquare.recreateKbSuccess',{knowledgeValue:this.knowledgeValue}),
});
break;
}
// Handle the binary data in the 'value' variable
let chars = new TextDecoder().decode(value)
if(chars.indexOf('}{')!==-1){
const textArray = chars.split('}{')
let str1 = textArray[0] + "}"
let str2 = "{" + textArray[1]
}else{
const paraseChars = JSON.parse(chars)
if(paraseChars.code===200){
let percentage = Math.ceil(((paraseChars.finished + 1)/paraseChars.total)*100)
this.recreateVeValue = {...JSON.parse(chars),percentage:percentage}
}else{
this.recreateFlag = false
this.$message({
type: 'error',
message: paraseChars.msg,
});
return
}
}
// You can process the binary data here and update your UI as needed
}
};
// Start processing the binary data
processBinaryData();
}else if(response.status===403){
this.recreateFlag = false
this.$message({
type: 'error',
message: this.$t('modelSquare.noPermission'),
});
}else{
this.recreateFlag = false
this.$message({
type: 'error',
message: response.statusText,
});
}
})
/*llmRecreateVectorStore({knowledge_base_name:this.knowledgeValue}).then((res)=>{
})*/
},
deleteKnowledgeBase(){
const loading = this.$loading({target:'.component-2',lock:true})
llmKbDelete({knowledge_base_name:this.knowledgeValue,model_name:this.modelName}).then((res)=>{
if(res.data.code===200){
this.$message({
type: 'success',
message: res.data.msg,
});
this.getKnowledgeBaseList()
}else{
this.$message({
type: 'error',
message: res.data.msg,
});
}
loading.close()
}).catch((err)=>{
this.$message({
type: 'error',
message: err.message,
});
loading.close()
})
},
delKbFile(index){
this.files.splice(index,1)
this.filesStatus.splice(index,1)
},
async delKbFileVe(){
const loading = this.$loading({target:'.component-2',lock:true})
if(this.checkList.length!==0){
const data = {
knowledge_base_name:this.knowledgeValue,
file_names: this.checkList
}
llmKbDeleteDoc(data,{model_name:this.modelName}).then((res)=>{
loading.close()
if(res.data.code===200){
const {failed_files} = {...res.data.data}
if(Object.keys(failed_files).length===0){
this.$message({
message: res.data.msg,
type: 'success',
});
this.checkList = []
this.getKbFileDetails()
}else{
this.getKbFileDetails()
Object.keys(failed_files).forEach((item)=>{
setTimeout(()=>{
this.$notify({
message: failed_files[item],
type: 'error',
position: 'bottom-right'
})
},500);
})
}
}else{
this.$message({
type: 'error',
message: res.data.msg,
});
}
}).catch((error)=>{
loading.close()
if(error.response.status===403){
this.$message({
type: 'error',
message: this.$t('modelSquare.noPermission'),
})
this.checkList=[]
}else{
this.$message({
type: 'error',
message: error.message,
});
}
})
}else{
loading.close()
}
},
getUploadFileList(value){
if(this.uploadFlag){
this.files = []
this.filesStatus = []
this.uploadFlag = false
this.upBtnDisabled = false
}
if(this.files.length>=10){
setTimeout(()=>{
this.$message.error(this.$t('modelSquare.uploadFIleLimit'))
},0)
return
}
const index = value.file.name.lastIndexOf('.')
const acceptFileTypes = ['html', 'md', 'json', 'csv', 'txt', 'xml', 'docx'] //
if(index === -1){
setTimeout(()=>{
this.$message({
type: 'error',
message: `${value.file.name}${this.$t('modelSquare.fileError')}`,
});
},0)
return
}else if(this.files.findIndex(f=>f.name===value.file.name)!==-1){
setTimeout(()=>{
this.$message({
type: 'error',
message: `${value.file.name}${this.$t('modelSquare.fileExit')}`,
});
},0)
return
}else if(value.file.size/(1024*1024)>=1){
setTimeout(()=>{
this.$message({
type: 'error',
message: `${value.file.name}${this.$t('modelSquare.fileExceed')}`,
});
})
return
}else if(!acceptFileTypes.includes(value.file.name.substr(index+1))){
setTimeout(()=>{
this.$message({
type: 'error',
message: `${value.file.name}${this.$t('modelSquare.fileTypeError')}`,
});
},0)
return
}else{
value.file.status = 0
this.files.push(value.file)
this.filesStatus.push({name:value.file.name,status:0})
}
},
getKnowledgeBaseList(){
this.loading = true
this.KnowledgeBaseList=[]
llmKbList({model_name:this.modelName}).then((res)=>{
if(res.data.code===200){
this.knowledgeValue = res.data.data[0]
this.getKbFileDetails()
res.data.data.forEach((item)=>{
this.KnowledgeBaseList.push({value:item})
})
}else{
this.$message({
type: 'error',
message: res.data.msg,
});
}
this.loading = false
}).catch((err)=>{
this.$message({
type: 'error',
message: err.message,
});
this.loading = false
})
},
uploadDocKnowledge(){
if(this.files.length!==0){
const loading = this.$loading({target:'.component-2',lock:true})
const fd = new FormData()
fd.append("override",false)
fd.append("knowledge_base_name",this.knowledgeValue)
this.files.forEach((file)=>{
fd.append("files",file)
})
llmKbUploadDocUrl({model_name:this.modelName},fd).then((res)=>{
if(res.data.code===200){
const {failed_files} = {...res.data.data}
this.filesStatus.forEach((file,index)=>{
this.filesStatus[index].status = 1
})
if(Object.keys(failed_files).length!==this.files.length){
this.$message({
type: 'success',
message: res.data.msg,
});
}
if(Object.keys(failed_files).length!==0){
Object.keys(failed_files).forEach((item, index)=>{
setTimeout(()=>{
this.$notify({
message: failed_files[item],
type: 'error',
position: 'bottom-right'
})
},500);
this.filesStatus.forEach((file,index)=>{
if(item===file.name){
this.filesStatus[index].status = 2
this.filesStatus[index].error = failed_files[item]
}
})
})
}
this.uploadFlag = true
this.upBtnDisabled = true
}else{
this.$message({
type: 'error',
message: res.data.msg,
});
}
loading.close()
}).catch((err)=>{
if(err.response.status===403){
this.$message({
type: 'error',
message: this.$t('modelSquare.noPermission'),
})
}else{
this.$message({
type: 'error',
message: err.message,
});
}
loading.close()
})

}
},
changeKbValue(val){
this.files=[]
this.upBtnDisabled = false
this.checkList = []
this.getKbFileDetails()
},
getKbFileDetails(){
const loading = this.$loading({target:'.knowledege-detail-file',lock:true})
llmKbFileList({knowledge_base_name:this.knowledgeValue,model_name:this.modelName}).then((res)=>{
if(res.data.code===200){
loading.close()
this.kbFileList = res.data.data
}else{
this.$message({
type: 'error',
message: res.data.msg,
});
loading.close()
}
}).catch((err)=>{
this.$message({
type: 'error',
message: err.message,
});
loading.close()
})
},
refresh(){
this.dialogVisible = false
this.getKnowledgeBaseList()
}
},
async mounted() {
}
}
</script>
<style lang="less" scoped>
.model-dialog-right{
width: 30%;
.pattern-wrap{
display: flex;
flex-wrap: wrap;
gap:8px;
}
.use-pattern{
font-family: SourceHanSansSC;
font-weight: 550;
font-size: 16px;
color: #101010;
font-style: normal;
letter-spacing: 0px;
line-height: 26px;
text-decoration: none;
}
.component-2{
border: 1px solid #e5e7eb;
border-radius: 8px;
padding: 12px 16px;
margin-top: 1rem;
display: flex;
flex-direction: column;
.label-wrap{
font-weight: 550;
font-size: 16px;
color: #101010;
margin-bottom:8px;
display: flex;
justify-content: space-between;
cursor: pointer;
width:100%;

.icon-rotate{
transition: .15s;
}
}
.updata-knowledge{
width: 100%;
height: 40px;
font-size: 14px;
border: 1px solid rgba(1, 145, 255, 0.3);
background: rgba(1, 145, 255, 0.1);
color: rgb(16, 16, 16);
display: flex;
justify-content: center;
align-items: center;
margin: 1.5rem 0;
border-radius: 5px;
cursor: pointer;
}
.recreate-kb{
border: 1px solid #e5e7eb;
border-radius: 4px;
display: flex;
flex-direction: column;
padding: 0.8rem;
margin-bottom: 1rem;
}
.knowledge-op-btn{
display: flex;
justify-content: space-between;
margin: 0.8rem 0;
div{
border: 1px solid rgba(1, 145, 255, 0.3);
background: rgba(1, 145, 255, 0.1);
color: rgb(16, 16, 16);
display: flex;
justify-content: center;
align-items: center;
height: 32px;
width:40%;
border-radius: 5px;
cursor:pointer;
}
}
.upload-file{
width: 100%;
margin-top: 1rem;
}
.upload-list{
margin: 0;
padding: 0;
list-style: none;
max-height: 100px;
overflow-y: auto;
.upload-list-item{
transition: all 0.5s cubic-bezier(0.55, 0, 0.1, 1);
font-size: 14px;
color: #606266;
line-height: 1.8;
margin-top: 5px;
position: relative;
box-sizing: border-box;
border-radius: 4px;
width: 100%;
&:first-child{
margin-top: 10px;
}
.upload-item-name{
color: #606266;
display: block;
margin-right: 40px;
overflow: hidden;
padding-left: 4px;
text-overflow: ellipsis;
transition: color 0.3s;
white-space: nowrap;
.el-icon-document{
color: #909399;
}
}
.upload-item-status{
position: absolute;
right: 5px;
top: 0;
line-height: inherit;
display: block;
}
}
}
}
}
/deep/ .el-radio {
margin-right: 8px;
}
/deep/ .el-radio.is-bordered.is-checked{
border-color: rgb(99, 102, 241);
background: rgb(99, 102, 241);
}
/deep/ .el-radio__input.is-checked + .el-radio__label{
color: white;
}
/deep/ .el-radio__input.is-checked .el-radio__inner{
border-color: rgb(79, 70, 229);
background: rgb(79, 70, 229);
}
/deep/ .el-dialog__body{
padding-bottom: 0;
}
/deep/ .el-upload{
width: 100%;
}
/deep/ .el-upload-dragger{
width: 100%;
height: 120px;
}
/deep/ .el-tabs--border-card{
border-radius: 0.5rem;
}
</style>

+ 126
- 0
web_src/vuepages/pages/model/llms/componenes/headerModel.vue View File

@@ -0,0 +1,126 @@
<template>
<div>
<div style="display: flex;justify-content: space-between;">
<div>
<a v-if="modelName==='chatglm2-6b'" class="model-type" :class="{'model-activate': modelName==='chatglm2-6b'}" href="/extension/llm_chat?model_name=chatglm2-6b">
<p>ChatGLM</p>
</a>
<a v-else-if="modelName==='llama2-7b-chat-hf'" class="model-type" :class="{'model-activate': modelName==='llama2-7b-chat-hf'}" href="/extension/llm_chat?model_name=llama2-7b-chat-hf">
<p>LLaMA 2</p>
</a>
<a v-else class="model-type" href="/extension/wenxin">
<p>ERNIE-ViLG</p>
</a>
</div>
<div class="countdouwn">
<span style="display: block;">{{$t('modelSquare.experienceTime')}}</span>
<div>
{{minutes}}:{{seconds}}
</div>
</div>
</div>
<div class="model-text-wrapper">
<p class="model-title">{{model[modelName]}} {{$t('modelSquare.llmHeader')}}</p>
<p class="model-desc"><a :href="modelUrl[modelName]">{{model[modelName]}}</a> {{modelText[modelName]}}</p>
</div>
</div>
</template>
<script>
export default {
props: {
modelName:{type:String,default:''},
minutes:{type:String,default:"NA"},
seconds:{type:String,default:"NA"}
},
data() {
return {
model:{
'chatglm2-6b':'ChatGLM2-6B',
'llama2-7b-chat-hf':'llama2-7b-chat-hf'
},
modelText:{
'chatglm2-6b':this.$t('modelSquare.chatGlm_intro'),
'llama2-7b-chat-hf':this.$t('modelSquare.llama2')
},
modelUrl:{
'chatglm2-6b':'/FoundationModel/ChatGLM2-6B/modelmanage/model_readme_tmpl?name=ChatGLM2-6B',
'llama2-7b-chat-hf': '/FoundationModel/llama2/modelmanage/model_readme_tmpl?name=llama2-7b-chat-hf'
}

};
},
watch:{

},
async mounted() {
}
}
</script>
<style lang="less" scoped>
.model-type{
display: inline-flex;
padding: 0.6rem 1.2rem;
border: 1px solid rgba(229, 231, 235, 1);
border-radius: 5px;
color: rgba(16, 16, 16, 1);
margin-right: 2rem;
font-size: 16px;
position: relative;
p{
line-height: 100%;
}
}
.model-activate{
border: 1px solid rgb(1, 145, 255);
color: rgb(1, 145, 255);

}
.model-activate::before{
position: absolute;
content: "";
width: 0;
height: 0;
border-top:7px solid rgb(1, 145, 255) ;
border-left: 7px solid transparent;
border-right: 7px solid transparent;
top: 100%;
left: 42%;

}
.model-text-wrapper{
margin: 2rem;
display: flex;
flex-direction: column;
justify-content: center;
align-items: center;
.model-title{
color: rgba(16, 16, 16, 1);
font-size: 28px;
margin-bottom: 0.5rem;
}
.model-desc{
color: rgba(136, 136, 136, 0.87);
font-size: 14px;
}
}
.countdouwn {
display: flex;
align-items: baseline;
}

.countdouwn div {
position: relative;
width: 80px;
height: 50px;
line-height: 50px;
text-align: center;
background: #333333;
color: #ffffff;
font-size: 2em;
font-weight: 500;
}


</style>

+ 117
- 0
web_src/vuepages/pages/model/llms/create/index.vue View File

@@ -0,0 +1,117 @@
<template>
<div>
<div class="ui container area">
<div class="area-title">{{$t('modelSquare.modelChatTask')}}</div>
<div class="area-content">
<div class="ui container">
<div class="area-main-wrap">
<div v-if="hasChat==='-1'">
<div style="font-size: 16px;margin:2rem 0 1rem 0">{{$t('modelSquare.createChatTips1',{expireMinutes})}}</div>
<div style="font-size: 16px;color: rgb(255, 94, 0);text-align: center;margin-bottom:1rem">{{$t('modelSquare.createChatTips2')}}</div>
<div class="area-main-btn">
<div type="primary" class="ui green button" @click="createLlmsService">{{$t('modelSquare.createChatBtns1')}}</div>
</div>
</div>
<div v-else>
<div style="font-size: 16px;margin:2rem 0 1rem 0">{{$t('modelSquare.createChatTips3')}}</div>
<div style="font-size: 16px;color: rgb(255, 94, 0);text-align: center;margin-bottom:1rem">{{$t('modelSquare.createChatTips2')}}</div>
<div class="area-main-btn">
<div type="primary" class="ui green button" @click="enterLlmsService">{{$t('modelSquare.createChatBtns2')}}</div>
</div>
</div>
</div>
</div>
</div>
<LoadingMask :loading="loading" :tips="loadingTips"></LoadingMask>
</div>
</div>
</template>
<script>

import { onlineExperience} from '~/apis/modules/llmchat';
import LoadingMask from '../../../modelbase/components/cloudbrain/LoadingMask.vue';

export default {
data(){
return{
modelName:'',
loading: false,
loadingTips: '任务正在准备中,喝杯水回来再看看~',
hasChat:'-1',
expireMinutes:30
}
},
components: { LoadingMask },
methods:{
createLlmsService(){
this.loading = true
onlineExperience({model_name:this.modelName})
.then((res)=>{
if(res.status===200){
location.href = `/extension/llm_chat/${res.data.chat_id}?model_name=${res.data.model_name}`
}
this.loading = false
})
.catch((error)=>{
this.loading = false
if (error.response.status === 404 || error.response.status === 400) {
this.$message.error(error.response.data.message);
}else{
this.$message.error(error.message);
}
})
},
enterLlmsService(){
location.href = `/extension/llm_chat/${this.hasChat}?model_name=${this.modelName}`
}
},
mounted(){
const urlParams = new URLSearchParams(location.search)
if(urlParams.has('model_name')){
this.modelName = urlParams.get('model_name')
}
this.hasChat = document.getElementById('llms-create-setting').getAttribute('data-has-chat')
this.expireMinutes = document.getElementById('llms-create-setting').getAttribute('data-expire-minutes')
}
}
</script>
<style scoped lang="less">
.area {
width: 1050px !important;
margin-top: 40px;
.area-title {
height: 45px;
border-color: rgb(212, 212, 213);
border-width: 1px;
border-style: solid;
border-radius: 5px 5px 0px 0px;
font-size: 14px;
background: rgb(240, 240, 240);
line-height: 45px;
padding-left: 15px;
font-weight: 550;
font-size: 16px;
color: rgb(16, 16, 16);
}
.area-content {
border-color: rgb(212, 212, 213);
border-width: 1px;
border-style: solid;
margin-top: -1px;
.area-main-wrap {
display: flex;
flex-direction: column;
align-items: center;
.area-main-btn{
display: flex;
justify-content: center;
margin-bottom: 2rem;
}
}
}
}
</style>

+ 22
- 0
web_src/vuepages/pages/model/llms/create/vp-llms-create.js View File

@@ -0,0 +1,22 @@
import Vue from 'vue'
import App from './index.vue'
// import router from './router'
import localeEn from 'element-ui/lib/locale/lang/en';
import localeZh from 'element-ui/lib/locale/lang/zh-CN';
import { i18n, lang } from "~/langs";
import ElementUI from 'element-ui';
import 'element-ui/lib/theme-chalk/index.css';
// import './style/index.css'

Vue.use(ElementUI, { locale: lang === 'zh-CN' ? localeZh : localeEn,
}),
Vue.config.productionTip = false




new Vue({
i18n,
// router,
render: h => h(App)
}).$mount('#__vue-root');

+ 22
- 0
web_src/vuepages/pages/model/llms/vp-model-llms.js View File

@@ -0,0 +1,22 @@
import Vue from 'vue'
import App from './App.vue'
// import router from './router'
import localeEn from 'element-ui/lib/locale/lang/en';
import localeZh from 'element-ui/lib/locale/lang/zh-CN';
import { i18n, lang } from "~/langs";
import ElementUI from 'element-ui';
import 'element-ui/lib/theme-chalk/index.css';
// import './style/index.css'

Vue.use(ElementUI, { locale: lang === 'zh-CN' ? localeZh : localeEn,
}),
Vue.config.productionTip = false




new Vue({
i18n,
// router,
render: h => h(App)
}).$mount('#__vue-root');

+ 3
- 3
web_src/vuepages/pages/model/wenxin/index.vue View File

@@ -1,5 +1,5 @@
<template>
<div class="ui container wenxin_model_wrap">
<div class="ui container wenxin_model_wrap" style="margin-top: 2rem;">
<div>
<div class="wenxin_title_wrap">
<h1>基于国产算力的ERNIE-ViLG AI 作画大模型</h1>
@@ -105,7 +105,7 @@
</div>
</div>
<div class="wenxin_alert_wrap">
<span> <a href="/home/wenxin_privacy" target="_blank">《免责声明和服务使用规范》</a></span>
<span> <a href="/home/model_privacy" target="_blank">《免责声明和服务使用规范》</a></span>
<span>访问更多关于 <a href="https://wenxin.baidu.com/ernie-vilg" target="_blank"> ERNIE-ViLG AI</a> 作画大模型的内容</span>
</div>
</div>
@@ -281,4 +281,4 @@ export default {
}
},
}
</script>
</script>

+ 2
- 1
web_src/vuepages/pages/modelbase/home/index.vue View File

@@ -108,7 +108,7 @@ export default {
loading: false,
};
},
components: { TopHeader, },
components: { TopHeader },
methods: {
changeTab(tab) {
this.tabIndex = tab;
@@ -134,6 +134,7 @@ export default {
this.loading = false;
})
},

},
beforeMount() {
this.getModels();


+ 1
- 0
web_src/vuepages/pages/modelbase/model/index.vue View File

@@ -257,6 +257,7 @@ export default {
deleteTrainJob({
userName: this.userName,
jobId: taskJob.job_id,
id: taskJob.id,
}).then(res => {
this.isOperating = false;
this.maskLoading = false;


+ 27
- 33
web_src/vuepages/pages/modelmanage/local/index.vue View File

@@ -62,22 +62,15 @@
:placeholder="$t('modelManage.modelLabelInputTips')" @input="labelInput"></el-input>
</div>
</div>
<div class="row" v-if="repoIsPrivate == false">
<div class="row">
<div class="r-title"><label>{{ $t('modelManage.modelAccess') }}</label></div>
<div class="field">
<div class="ui radio checkbox">
<input id="isPrivate_false" type="radio" name="isPrivate" checked="checked" value="false">
<label>{{ $t('modelManage.modelAccessPublic') }}</label>
</div>
</div>
<div class="field">
<label>&nbsp;&nbsp;</label>
</div>
<div class="field">
<div class="ui radio checkbox">
<input id="isPrivate_true" type="radio" name="isPrivate" value="true">
<label>{{ $t('modelManage.modelAccessPrivate') }}</label>
</div>
<div class="r-content">
<el-radio v-model="state.isPrivate" label="0" :disabled="repoIsPrivate">{{
$t('modelManage.modelAccessPublic') }}<span v-if="repoIsPrivate">
({{ $t('modelManage.modelAccessTips') }})</span>
</el-radio>
<el-radio v-model="state.isPrivate" label="1" :disabled="repoIsPrivate">{{
$t('modelManage.modelAccessPrivate') }}</el-radio>
</div>
</div>
<div class="row" style="align-items:flex-start;">
@@ -125,7 +118,7 @@ export default {
license: '',
label: '',
description: '',
isPrivate: false,
isPrivate: REPOISPRIVATE ? '1' : '0',
},
licenseList: [],
nameErr: false,
@@ -157,21 +150,12 @@ export default {
// });
return;
}
var radio = document.getElementsByName("isPrivate");
if (radio != null && radio.length > 0) {
for (var i = 0; i < radio.length; i++) {
if (radio[i].checked) {
this.state.isPrivate = radio[i].value;
}
}
} else {
this.state.isPrivate = true;
}
const submintApi = this.type == '1' ? modifyModel : saveLocalModel;
submintApi({
repo: location.pathname.split('/').slice(0, 3).join('/'),
...this.state,
label: this.state.label.split(/\s+/).join(' ').trim(),
isPrivate: (this.repoIsPrivate || this.state.isPrivate == 1) ? true : false,
}).then(res => {
res = res.data;
if (res && res.code == '0') {
@@ -234,13 +218,9 @@ export default {
this.state.engine = data.engine.toString();
this.state.label = data.label;
this.state.description = data.description;
this.state.isPrivate = data.isPrivate;
if (data.isPrivate) {
$('#isPrivate_true').attr("checked", true);
$('#isPrivate_false').attr("checked", false);
} else {
$('#isPrivate_true').attr("checked", false);
$('#isPrivate_false').attr("checked", true);
this.state.isPrivate = data.isPrivate ? '1' : '0';
if (this.repoIsPrivate) {
this.state.isPrivate = '1';
}
}
}).catch(err => {
@@ -520,4 +500,18 @@ export default {
border-color: #85b7d9;
}
}

/deep/ .el-radio.is-checked {
.el-radio__inner {
// border-color: #409EFF;
// background: #409EFF;
border-color: rgb(16, 16, 16);
background: rgb(16, 16, 16);
}

.el-radio__label {
// color: #409EFF;
color: rgb(16, 16, 16);
}
}
</style>

+ 10
- 3
web_src/vuepages/pages/modelmanage/settings/index.vue View File

@@ -58,11 +58,15 @@
:placeholder="$t('modelManage.modelLabelInputTips')" @input="labelInput"></el-input>
</div>
</div>
<div class="row" v-if="repoIsPrivate == false">
<div class="row">
<div class="r-title"><label>{{ $t('modelManage.modelAccess') }}</label></div>
<div class="r-content">
<el-radio v-model="state.isPrivate" label="0">{{ $t('modelManage.modelAccessPublic') }}</el-radio>
<el-radio v-model="state.isPrivate" label="1">{{ $t('modelManage.modelAccessPrivate') }}</el-radio>
<el-radio v-model="state.isPrivate" label="0" :disabled="repoIsPrivate">{{
$t('modelManage.modelAccessPublic') }}<span v-if="repoIsPrivate">
({{ $t('modelManage.modelAccessTips') }})</span>
</el-radio>
<el-radio v-model="state.isPrivate" label="1" :disabled="repoIsPrivate">{{
$t('modelManage.modelAccessPrivate') }}</el-radio>
</div>
</div>
<div class="row" style="align-items:flex-start;">
@@ -215,6 +219,9 @@ export default {
this.state.license = data.license;
this.state.description = data.description;
this.state.isPrivate = data.isPrivate ? '1' : '0';
if (this.repoIsPrivate) {
this.state.isPrivate = '1';
}
getModelLicenseList().then(res => {
res = res.data;
try {


+ 1
- 1
web_src/vuepages/pages/resources/scene/index.vue View File

@@ -191,7 +191,7 @@ export default {
const item = data[i];
list.push({
k: item.ID,
v: `${item.QueueCode}(${getListValueWithKey(this.clusterList, item.Cluster)} - ${item.AiCenterName})`,
v: `${item.QueueCode}(${getListValueWithKey(this.clusterList, item.Cluster)} - ${item.AiCenterName}) ${item.ComputeResource}(${item.AccCardType})`,
});
}
this.queueList.push(...list);


+ 1
- 1
web_src/vuepages/pages/resources/specification/index.vue View File

@@ -206,7 +206,7 @@ export default {
const item = data[i];
list.push({
k: item.ID,
v: `${item.QueueCode}(${getListValueWithKey(this.clusterList, item.Cluster)} - ${item.AiCenterName})`,
v: `${item.QueueCode}(${getListValueWithKey(this.clusterList, item.Cluster)} - ${item.AiCenterName}) ${item.ComputeResource}(${item.AccCardType})`,
});
}
this.queueList.push(...list);


Loading…
Cancel
Save