修复ChatGLM获取token方法,并且将非流式请求也接入
This commit is contained in:
@@ -131,16 +131,37 @@ export class ChatGLM extends BaseModel {
|
||||
|
||||
async getToken() {
|
||||
if (this.apiKey) return await this.apiKey
|
||||
const timestamp = new TextEncoder().encode(Date.now().toFixed(0))
|
||||
const encrypted = ChatGLM.encrypt(ChatGLM.publicKey, timestamp)
|
||||
const encrypted = ChatGLM.encrypt(ChatGLM.publicKey)
|
||||
return await axios.post(ChatGLM.base + "/passApiToken/createApiToken", JSON.stringify({apiKey: "4e3ceff669c143dfa09e763663aa72cd", encrypted}), {
|
||||
headers: this.headers,
|
||||
}).then(res => res.json()).then(data => data?.data || "key无效或网络波动,请重新尝试");
|
||||
}).then(res => res.json()).then(data => data?.data);
|
||||
}
|
||||
|
||||
async chat(history, callback) {
|
||||
const context = await axios.post(ChatGLM.base + "/v1/stream_context").then(res => res.json());
|
||||
return await axios.get(ChatGPT.base + "/v1/stream", {params: context.result})
|
||||
async chat(messages) {
|
||||
const history = messages.map(e => e.msg)
|
||||
history.pop()
|
||||
const prompt = history.pop()
|
||||
return await axios.post(ChatGLM.base + "/model/v1/open/engines/chatGLM/chatGLM", JSON.stringify({
|
||||
history, prompt,
|
||||
temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
|
||||
}), {headers: this.headers}).then(res => res.json()).then(data => {
|
||||
if (data?.data.taskStatus == 'PROCESSING') {
|
||||
return this.getChatResult(data.data.taskOrderNo)
|
||||
} else {
|
||||
return data?.data?.outputText || "key无效或网络波动,请重新尝试"
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async getChatResult(taskOrderNo) {
|
||||
return await axios.get(ChatGLM.base + `/request-task/query-request-task-result/${taskOrderNo}`,
|
||||
{headers: this.headers}).then(res => res.json()).then(data => {
|
||||
if (data?.data.taskStatus == 'PROCESSING') {
|
||||
return this.getChatResult(data.data.taskOrderNo)
|
||||
} else {
|
||||
return data?.data?.outputText || "key无效或网络波动,请重新尝试"
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async chatStream(messages) {
|
||||
@@ -148,7 +169,6 @@ export class ChatGLM extends BaseModel {
|
||||
history.pop()
|
||||
const prompt = history.pop()
|
||||
const url = ChatGLM.base + "/model/v1/open/engines/sse/chatGLM/chatGLM"
|
||||
// const url = ChatGLM.base + "/model/v2/open/engines/chatglm_qa_6b/chatglm_6b"
|
||||
return await axios.post(url, JSON.stringify({
|
||||
history, prompt,
|
||||
temperature: 1, top_p: 0.6, requestTaskNo: this.taskId
|
||||
@@ -158,16 +178,16 @@ export class ChatGLM extends BaseModel {
|
||||
}
|
||||
|
||||
static encrypt(publicKey, timestamp) {
|
||||
const public_key = forge.util.decode64(publicKey)
|
||||
const decoded_key = forge.asn1.fromDer(public_key); // 使用 fromDer 方法解码
|
||||
const key = forge.pki.publicKeyFromAsn1(decoded_key); // 使用 publicKeyFromAsn1 方法导入公钥
|
||||
const encrypted = key.encrypt(timestamp, 'RSAES-PKCS1-V1_5');
|
||||
const publicKeyDer = forge.util.decode64(publicKey)
|
||||
const key = forge.pki.publicKeyFromAsn1(forge.asn1.fromDer(publicKeyDer)); // 使用 publicKeyFromAsn1 方法导入公钥
|
||||
timestamp = new TextEncoder().encode(Date.now().toFixed(0))
|
||||
const encrypted = key.encrypt(timestamp);
|
||||
return forge.util.encode64(encrypted);
|
||||
}
|
||||
|
||||
async getAccount() {
|
||||
const {headers} = this
|
||||
const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report", {headers}).then(res => res.json());
|
||||
const usages = await axios.get("https://open.bigmodel.ai/api/paas/account/query-customer-account-report",
|
||||
{headers: this.headers}).then(res => res.json());
|
||||
if (usages.code == 200) {
|
||||
const {data} = usages
|
||||
return {
|
||||
|
||||
Reference in New Issue
Block a user