接入数字人和调整界面

This commit is contained in:
aixianling
2023-11-09 15:21:33 +08:00
parent 8eb0db8401
commit ad77470063
18 changed files with 290 additions and 115 deletions

View File

@@ -1,6 +1,6 @@
import {dayjs} from "element-plus";
import {nanoid} from "nanoid";
import axios from "./axios";
import http from "./http.js";
import {JSEncrypt} from "jsencrypt";
class BaseModel {
@@ -37,7 +37,7 @@ export class ChatGPT extends BaseModel {
async chat(history) {
const messages = history.map(e => ({role: e.role, content: e.msg}))
return await axios.post(ChatGPT.base + "/v1/chat/completions", JSON.stringify({messages, model: this.id}), {
return await http.post(ChatGPT.base + "/v1/chat/completions", JSON.stringify({messages, model: this.id}), {
headers: {
Authorization: 'Bearer ' + this.apiKey, "Content-Type": "application/json", Accept: "application/json",
},
@@ -46,7 +46,7 @@ export class ChatGPT extends BaseModel {
async chatStream(history) {
const messages = history.map(e => ({role: e.role, content: e.msg}))
return await axios.post(ChatGPT.base + "/v1/chat/completions", JSON.stringify({
return await http.post(ChatGPT.base + "/v1/chat/completions", JSON.stringify({
messages,
model: this.id,
stream: true
@@ -59,12 +59,12 @@ export class ChatGPT extends BaseModel {
async getAccount() {
const {headers} = this
const usages = await axios.get(ChatGPT.base + "/v1/dashboard/billing/subscription", {headers}).then(res => res.json());
const usages = await http.get(ChatGPT.base + "/v1/dashboard/billing/subscription", {headers}).then(res => res.json());
const endDate = usages.access_until
if (endDate) {
const startDate = new Date(endDate - 90 * 24 * 60 * 60);
const formattedDate = time => dayjs(time).format("YYYY-MM-DD")
return await axios.get(`${ChatGPT.base}/v1/dashboard/billing/usage?start_date=${formattedDate(startDate * 1000)}&end_date=${formattedDate(endDate * 1000)}`, {headers}).then(res => res.json()).then(res => {
return await http.get(`${ChatGPT.base}/v1/dashboard/billing/usage?start_date=${formattedDate(startDate * 1000)}&end_date=${formattedDate(endDate * 1000)}`, {headers}).then(res => res.json()).then(res => {
usages.total_usage = res.total_usage
const names = usages.account_name.split(" ")
return {
@@ -133,7 +133,7 @@ export class ChatGLM extends BaseModel {
async getToken() {
if (this.apiKey) return await this.apiKey
const encrypted = ChatGLM.encrypt(ChatGLM.publicKey)
return await axios.post(ChatGLM.base + "/passApiToken/createApiToken", JSON.stringify({
return await http.post(ChatGLM.base + "/passApiToken/createApiToken", JSON.stringify({
apiKey: "4e3ceff669c143dfa09e763663aa72cd",
encrypted
}), {
@@ -145,7 +145,7 @@ export class ChatGLM extends BaseModel {
const history = messages.map(e => e.msg)
history.pop()
const prompt = history.pop()
return await axios.post(ChatGLM.base + "/model/v1/open/engines/chatGLM/chatGLM", JSON.stringify({
return await http.post(ChatGLM.base + "/model/v1/open/engines/chatGLM/chatGLM", JSON.stringify({
history, prompt, temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
}), {headers: this.headers}).then(res => res.json()).then(data => {
if (data?.data.taskStatus == 'PROCESSING') {
@@ -157,7 +157,7 @@ export class ChatGLM extends BaseModel {
}
async getChatResult(taskOrderNo) {
return await axios.get(ChatGLM.base + `/request-task/query-request-task-result/${taskOrderNo}`, {headers: this.headers}).then(res => res.json()).then(data => {
return await http.get(ChatGLM.base + `/request-task/query-request-task-result/${taskOrderNo}`, {headers: this.headers}).then(res => res.json()).then(data => {
if (data?.data.taskStatus == 'PROCESSING') {
return this.getChatResult(data.data.taskOrderNo)
} else {
@@ -171,7 +171,7 @@ export class ChatGLM extends BaseModel {
history.pop()
const prompt = history.pop()
const url = ChatGLM.base + "/model/v1/open/engines/sse/chatGLM/chatGLM"
return await axios.post(url, JSON.stringify({
return await http.post(url, JSON.stringify({
history, prompt, temperature: 0.2, requestTaskNo: this.taskId
}), {
headers: this.headers,
@@ -186,7 +186,7 @@ export class ChatGLM extends BaseModel {
}
async getAccount() {
const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report", {headers: this.headers}).then(res => res.json());
const usages = await http.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report", {headers: this.headers}).then(res => res.json());
if (usages.code == 200) {
const {data} = usages
return {
@@ -216,7 +216,7 @@ export class ChatGLM extends BaseModel {
* 集成私有的Alpaca
*/
export class Alpaca extends BaseModel {
static base = "https://testai.cunwuyun.cn"
static base = "https://alpaca7b.aceykubbo.workers.dev"
static avatar = "https://cdn.cunwuyun.cn/img/logo.svg"
static name = "Alpaca"
static id = "alpaca-7b-plus"
@@ -230,7 +230,7 @@ export class Alpaca extends BaseModel {
async chat(history) {
const messages = history.map(e => ({role: e.role, content: e.msg}))
return await axios.post(Alpaca.base + "/v1/chat/completions", JSON.stringify({messages, model: this.id}), {
return await http.post(Alpaca.base + "/v1/chat/completions", JSON.stringify({messages, model: this.id}), {
headers: {
Authorization: 'Bearer ' + this.apiKey, "Content-Type": "application/json", Accept: "application/json",
},
@@ -238,51 +238,57 @@ export class Alpaca extends BaseModel {
}
async chatStream(history) {
const messages = history.map(e => ({role: e.role, content: e.msg}))
return await axios.post(Alpaca.base + "/v1/chat/completions", JSON.stringify({
messages,
model: this.id,
stream: true
}), {
headers: {
Authorization: 'Bearer ' + this.apiKey, "Content-Type": "application/json", Accept: "application/json",
},
}).then(res => res?.body?.getReader());
const prompt = history.map(e => `\n\n### ${e.role}:${e.msg}`).join("")
return await http.post(Alpaca.base + "/completion", JSON.stringify({
prompt,
batch_size: 128,
temperature: 0.2,
top_k: 40,
top_p: 0.9,
n_keep: -1,
n_predict: 2048,
stop: ["### user:\n\n"], // when detect this, stop completion
exclude: [], // no show in the completion
threads: 8,
as_loop: true, // use this to request the completion token by token
interactive: true, // enable the detection of a stop word
})).then(res => res?.text());
}
streamOutput(reader, chat) {
return reader.read().then(({done, value}) => {
if (done) {
return;
}
if (!chat.reminder) {
chat.reminder = ""
}
let decode = new TextDecoder().decode(value)
decode = chat.reminder + decode
let decodedArray = decode.split("data: ");
let longstr = "";
decodedArray.forEach(decoded => {
decoded = decoded.trim();
try {
if (longstr != "") {
decoded = longstr + decoded;
longstr = "";
}
} catch (e) {
longstr = decoded;
decoded = "";
}
if (!!decoded && decoded !== "[DONE]") {
const choices = JSON.parse(decoded).choices
if (choices?.length > 0) {
const response = choices[0].delta.content || "";
chat.msg += response
}
}
})
return this.streamOutput(reader, chat)
})
console.log(reader)
// return reader.read().then(({done, value}) => {
// if (done) {
// return;
// }
// if (!chat.reminder) {
// chat.reminder = ""
// }
// let decode = new TextDecoder().decode(value)
// decode = chat.reminder + decode
// let decodedArray = decode.split("data: ");
// let longstr = "";
// decodedArray.forEach(decoded => {
// decoded = decoded.trim();
// try {
// if (longstr != "") {
// decoded = longstr + decoded;
// longstr = "";
// }
// } catch (e) {
// longstr = decoded;
// decoded = "";
// }
// if (!!decoded && decoded !== "[DONE]") {
// const choices = JSON.parse(decoded).choices
// if (choices?.length > 0) {
// const response = choices[0].delta.content || "";
// chat.msg += response
// }
// }
// })
// return this.streamOutput(reader, chat)
// })
}
}