修复ChatGLM获取token方法,并且将非流式请求也接入
This commit is contained in:
@@ -10,7 +10,7 @@
|
||||
<script>
|
||||
import Chat from "./components/chat";
|
||||
import Settings from "./components/settings";
|
||||
import {ChatGPT} from "./utils/models";
|
||||
import {ChatGLM} from "./utils/models";
|
||||
|
||||
export default {
|
||||
name: 'App',
|
||||
@@ -19,14 +19,13 @@ export default {
|
||||
return {
|
||||
showSettings: false,
|
||||
setting: {
|
||||
model: new ChatGPT(),
|
||||
model: new ChatGLM(),
|
||||
stream: true
|
||||
},
|
||||
}
|
||||
},
|
||||
methods: {
|
||||
handleResize() {
|
||||
console.log("App handleResize:" + window.innerWidth)
|
||||
this.showSettings = window.innerWidth > 1150;
|
||||
}
|
||||
},
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
<el-form-item label="流式输出">
|
||||
<el-switch v-model="settings.stream" :active-value="true" :inactive-value="false"/>
|
||||
</el-form-item>
|
||||
<el-form-item label="API KEY">
|
||||
<el-form-item label="API KEY" v-if="isGPT">
|
||||
<el-row class="w100">
|
||||
<el-input v-model="settings.model.apiKey" clearable class="fill mar-r8"/>
|
||||
<el-button type="text" @click="getModelAccount">应用</el-button>
|
||||
@@ -55,7 +55,9 @@ export default {
|
||||
},
|
||||
computed: {
|
||||
models: () => Object.values(models),
|
||||
account: v => v.settings.account || {usage: 0, total: 0}
|
||||
account: v => v.settings.account || {usage: 0, total: 0},
|
||||
apiKey: v => v.settings.model.apiKey || "key无效或网络波动,请重新尝试",
|
||||
isGPT: v => v.settings.model.name == "ChatGPT"
|
||||
},
|
||||
methods: {
|
||||
initModel(model) {
|
||||
@@ -68,12 +70,13 @@ export default {
|
||||
}
|
||||
}, 500)
|
||||
},
|
||||
getModelAccount() {
|
||||
getModelAccount(c = 0) {
|
||||
const ai = this.settings.model
|
||||
console.log(ai)
|
||||
if (ai.apiKey) {
|
||||
this.loadingAccount = true
|
||||
ai.getAccount().then(v => this.settings.account = v).finally(() => this.loadingAccount = false)
|
||||
}
|
||||
} else if (c < 5) setTimeout(() => this.getModelAccount(++c), 1000)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -131,16 +131,37 @@ export class ChatGLM extends BaseModel {
|
||||
|
||||
async getToken() {
|
||||
if (this.apiKey) return await this.apiKey
|
||||
const timestamp = new TextEncoder().encode(Date.now().toFixed(0))
|
||||
const encrypted = ChatGLM.encrypt(ChatGLM.publicKey, timestamp)
|
||||
const encrypted = ChatGLM.encrypt(ChatGLM.publicKey)
|
||||
return await axios.post(ChatGLM.base + "/passApiToken/createApiToken", JSON.stringify({apiKey: "4e3ceff669c143dfa09e763663aa72cd", encrypted}), {
|
||||
headers: this.headers,
|
||||
}).then(res => res.json()).then(data => data?.data || "key无效或网络波动,请重新尝试");
|
||||
}).then(res => res.json()).then(data => data?.data);
|
||||
}
|
||||
|
||||
async chat(history, callback) {
|
||||
const context = await axios.post(ChatGLM.base + "/v1/stream_context").then(res => res.json());
|
||||
return await axios.get(ChatGPT.base + "/v1/stream", {params: context.result})
|
||||
async chat(messages) {
|
||||
const history = messages.map(e => e.msg)
|
||||
history.pop()
|
||||
const prompt = history.pop()
|
||||
return await axios.post(ChatGLM.base + "/model/v1/open/engines/chatGLM/chatGLM", JSON.stringify({
|
||||
history, prompt,
|
||||
temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
|
||||
}), {headers: this.headers}).then(res => res.json()).then(data => {
|
||||
if (data?.data.taskStatus == 'PROCESSING') {
|
||||
return this.getChatResult(data.data.taskOrderNo)
|
||||
} else {
|
||||
return data?.data?.outputText || "key无效或网络波动,请重新尝试"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async getChatResult(taskOrderNo) {
|
||||
return await axios.get(ChatGLM.base + `/request-task/query-request-task-result/${taskOrderNo}`,
|
||||
{headers: this.headers}).then(res => res.json()).then(data => {
|
||||
if (data?.data.taskStatus == 'PROCESSING') {
|
||||
return this.getChatResult(data.data.taskOrderNo)
|
||||
} else {
|
||||
return data?.data?.outputText || "key无效或网络波动,请重新尝试"
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async chatStream(messages) {
|
||||
@@ -148,7 +169,6 @@ export class ChatGLM extends BaseModel {
|
||||
history.pop()
|
||||
const prompt = history.pop()
|
||||
const url = ChatGLM.base + "/model/v1/open/engines/sse/chatGLM/chatGLM"
|
||||
// const url = ChatGLM.base + "/model/v2/open/engines/chatglm_qa_6b/chatglm_6b"
|
||||
return await axios.post(url, JSON.stringify({
|
||||
history, prompt,
|
||||
temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
|
||||
@@ -158,16 +178,16 @@ export class ChatGLM extends BaseModel {
|
||||
}
|
||||
|
||||
static encrypt(publicKey, timestamp) {
|
||||
const public_key = forge.util.decode64(publicKey)
|
||||
const decoded_key = forge.asn1.fromDer(public_key); // 使用 fromDer 方法解码
|
||||
const key = forge.pki.publicKeyFromAsn1(decoded_key); // 使用 publicKeyFromAsn1 方法导入公钥
|
||||
const encrypted = key.encrypt(timestamp, 'RSAES-PKCS1-V1_5');
|
||||
const publicKeyDer = forge.util.decode64(publicKey)
|
||||
const key = forge.pki.publicKeyFromAsn1(forge.asn1.fromDer(publicKeyDer)); // 使用 publicKeyFromAsn1 方法导入公钥
|
||||
timestamp = new TextEncoder().encode(Date.now().toFixed(0))
|
||||
const encrypted = key.encrypt(timestamp);
|
||||
return forge.util.encode64(encrypted);
|
||||
}
|
||||
|
||||
async getAccount() {
|
||||
const {headers} = this
|
||||
const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report", {headers}).then(res => res.json());
|
||||
const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report",
|
||||
{headers: this.headers}).then(res => res.json());
|
||||
if (usages.code == 200) {
|
||||
const {data} = usages
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user