修复ChatGLM获取token方法,并且将非流式请求也接入

This commit is contained in:
aixianling
2023-05-18 14:18:18 +08:00
parent c179400d67
commit 8b3db98ffe
3 changed files with 42 additions and 20 deletions

View File

@@ -10,7 +10,7 @@
<script> <script>
import Chat from "./components/chat"; import Chat from "./components/chat";
import Settings from "./components/settings"; import Settings from "./components/settings";
import {ChatGPT} from "./utils/models"; import {ChatGLM} from "./utils/models";
export default { export default {
name: 'App', name: 'App',
@@ -19,14 +19,13 @@ export default {
return { return {
showSettings: false, showSettings: false,
setting: { setting: {
model: new ChatGPT(), model: new ChatGLM(),
stream: true stream: true
}, },
} }
}, },
methods: { methods: {
handleResize() { handleResize() {
console.log("App handleResize:" + window.innerWidth)
this.showSettings = window.innerWidth > 1150; this.showSettings = window.innerWidth > 1150;
} }
}, },

View File

@@ -10,7 +10,7 @@
<el-form-item label="流式输出"> <el-form-item label="流式输出">
<el-switch v-model="settings.stream" :active-value="true" :inactive-value="false"/> <el-switch v-model="settings.stream" :active-value="true" :inactive-value="false"/>
</el-form-item> </el-form-item>
<el-form-item label="API KEY"> <el-form-item label="API KEY" v-if="isGPT">
<el-row class="w100"> <el-row class="w100">
<el-input v-model="settings.model.apiKey" clearable class="fill mar-r8"/> <el-input v-model="settings.model.apiKey" clearable class="fill mar-r8"/>
<el-button type="text" @click="getModelAccount">应用</el-button> <el-button type="text" @click="getModelAccount">应用</el-button>
@@ -55,7 +55,9 @@ export default {
}, },
computed: { computed: {
models: () => Object.values(models), models: () => Object.values(models),
account: v => v.settings.account || {usage: 0, total: 0} account: v => v.settings.account || {usage: 0, total: 0},
apiKey: v => v.settings.model.apiKey || "key无效或网络波动,请重新尝试",
isGPT: v => v.settings.model.name == "ChatGPT"
}, },
methods: { methods: {
initModel(model) { initModel(model) {
@@ -68,12 +70,13 @@ export default {
} }
}, 500) }, 500)
}, },
getModelAccount() { getModelAccount(c = 0) {
const ai = this.settings.model const ai = this.settings.model
console.log(ai)
if (ai.apiKey) { if (ai.apiKey) {
this.loadingAccount = true this.loadingAccount = true
ai.getAccount().then(v => this.settings.account = v).finally(() => this.loadingAccount = false) ai.getAccount().then(v => this.settings.account = v).finally(() => this.loadingAccount = false)
} } else if (c < 5) setTimeout(() => this.getModelAccount(++c), 1000)
} }
} }
} }

View File

@@ -131,16 +131,37 @@ export class ChatGLM extends BaseModel {
async getToken() { async getToken() {
if (this.apiKey) return await this.apiKey if (this.apiKey) return await this.apiKey
const timestamp = new TextEncoder().encode(Date.now().toFixed(0)) const encrypted = ChatGLM.encrypt(ChatGLM.publicKey)
const encrypted = ChatGLM.encrypt(ChatGLM.publicKey, timestamp)
return await axios.post(ChatGLM.base + "/passApiToken/createApiToken", JSON.stringify({apiKey: "4e3ceff669c143dfa09e763663aa72cd", encrypted}), { return await axios.post(ChatGLM.base + "/passApiToken/createApiToken", JSON.stringify({apiKey: "4e3ceff669c143dfa09e763663aa72cd", encrypted}), {
headers: this.headers, headers: this.headers,
}).then(res => res.json()).then(data => data?.data || "key无效或网络波动,请重新尝试"); }).then(res => res.json()).then(data => data?.data);
} }
async chat(history, callback) { async chat(messages) {
const context = await axios.post(ChatGLM.base + "/v1/stream_context").then(res => res.json()); const history = messages.map(e => e.msg)
return await axios.get(ChatGPT.base + "/v1/stream", {params: context.result}) history.pop()
const prompt = history.pop()
return await axios.post(ChatGLM.base + "/model/v1/open/engines/chatGLM/chatGLM", JSON.stringify({
history, prompt,
temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
}), {headers: this.headers}).then(res => res.json()).then(data => {
if (data?.data.taskStatus == 'PROCESSING') {
return this.getChatResult(data.data.taskOrderNo)
} else {
return data?.data?.outputText || "key无效或网络波动,请重新尝试"
}
});
}
async getChatResult(taskOrderNo) {
return await axios.get(ChatGLM.base + `/request-task/query-request-task-result/${taskOrderNo}`,
{headers: this.headers}).then(res => res.json()).then(data => {
if (data?.data.taskStatus == 'PROCESSING') {
return this.getChatResult(data.data.taskOrderNo)
} else {
return data?.data?.outputText || "key无效或网络波动,请重新尝试"
}
})
} }
async chatStream(messages) { async chatStream(messages) {
@@ -148,7 +169,6 @@ export class ChatGLM extends BaseModel {
history.pop() history.pop()
const prompt = history.pop() const prompt = history.pop()
const url = ChatGLM.base + "/model/v1/open/engines/sse/chatGLM/chatGLM" const url = ChatGLM.base + "/model/v1/open/engines/sse/chatGLM/chatGLM"
// const url = ChatGLM.base + "/model/v2/open/engines/chatglm_qa_6b/chatglm_6b"
return await axios.post(url, JSON.stringify({ return await axios.post(url, JSON.stringify({
history, prompt, history, prompt,
temperature: 1, top_p: 0.6, requestTaskNo: this.taskId temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
@@ -158,16 +178,16 @@ export class ChatGLM extends BaseModel {
} }
static encrypt(publicKey, timestamp) { static encrypt(publicKey, timestamp) {
const public_key = forge.util.decode64(publicKey) const publicKeyDer = forge.util.decode64(publicKey)
const decoded_key = forge.asn1.fromDer(public_key); // 使用 fromDer 方法解码 const key = forge.pki.publicKeyFromAsn1(forge.asn1.fromDer(publicKeyDer)); // 使用 publicKeyFromAsn1 方法导入公钥
const key = forge.pki.publicKeyFromAsn1(decoded_key); // 使用 publicKeyFromAsn1 方法导入公钥 timestamp = new TextEncoder().encode(Date.now().toFixed(0))
const encrypted = key.encrypt(timestamp, 'RSAES-PKCS1-V1_5'); const encrypted = key.encrypt(timestamp);
return forge.util.encode64(encrypted); return forge.util.encode64(encrypted);
} }
async getAccount() { async getAccount() {
const {headers} = this const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report",
const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report", {headers}).then(res => res.json()); {headers: this.headers}).then(res => res.json());
if (usages.code == 200) { if (usages.code == 200) {
const {data} = usages const {data} = usages
return { return {