use-client.js 4.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. // eslint-disable-next-line no-unused-vars
  2. import { KeyvFile } from 'keyv-file';
  3. // import { ChatGPTClient } from '@waylaidwanderer/chatgpt-api';
  4. import { ChatGPTClient } from '../index.js';
  5. const clientOptions = {
  6. // (Optional) Support for a reverse proxy for the completions endpoint (private API server).
  7. // Warning: This will expose your `openaiApiKey` to a third party. Consider the risks before using this.
  8. // reverseProxyUrl: 'https://chatgpt.hato.ai/completions',
  9. // (Optional) Parameters as described in https://platform.openai.com/docs/api-reference/completions
  10. // (Optional) to use Azure OpenAI API, set `azure` to true and `reverseProxyUrl` to your completion endpoint:
  11. // azure: true,
  12. // reverseProxyUrl: 'https://{your-resource-name}.openai.azure.com/openai/deployments/{deployment-id}/chat/completions?api-version={api-version}',
  13. modelOptions: {
  14. // You can override the model name and any other parameters here, like so:
  15. model: 'gpt-3.5-turbo',
  16. // I'm overriding the temperature to 0 here for demonstration purposes, but you shouldn't need to override this
  17. // for normal usage.
  18. temperature: 0,
  19. // Set max_tokens here to override the default max_tokens of 1000 for the completion.
  20. // max_tokens: 1000,
  21. },
  22. // (Optional) Davinci models have a max context length of 4097 tokens, but you may need to change this for other models.
  23. // maxContextTokens: 4097,
  24. // (Optional) You might want to lower this to save money if using a paid model like `text-davinci-003`.
  25. // Earlier messages will be dropped until the prompt is within the limit.
  26. // maxPromptTokens: 3097,
  27. // (Optional) Set custom instructions instead of "You are ChatGPT...".
  28. // promptPrefix: 'You are Bob, a cowboy in Western times...',
  29. // (Optional) Set a custom name for the user
  30. // userLabel: 'User',
  31. // (Optional) Set a custom name for ChatGPT
  32. // chatGptLabel: 'ChatGPT',
  33. // (Optional) Set to true to enable `console.debug()` logging
  34. debug: false,
  35. };
  36. const cacheOptions = {
  37. // Options for the Keyv cache, see https://www.npmjs.com/package/keyv
  38. // This is used for storing conversations, and supports additional drivers (conversations are stored in memory by default)
  39. // For example, to use a JSON file (`npm i keyv-file`) as a database:
  40. // store: new KeyvFile({ filename: 'cache.json' }),
  41. };
  42. const chatGptClient = new ChatGPTClient('OPENAI_API_KEY', clientOptions, cacheOptions);
  43. let response;
  44. response = await chatGptClient.sendMessage('Hello!');
  45. console.log(response); // { response: 'Hello! How can I assist you today?', conversationId: '...', messageId: '...' }
  46. response = await chatGptClient.sendMessage('Write a short poem about cats.', { conversationId: response.conversationId, parentMessageId: response.messageId });
  47. console.log(response.response); // Soft and sleek, with eyes that gleam,\nCats are creatures of grace supreme.\n...
  48. console.log();
  49. response = await chatGptClient.sendMessage('Now write it in French.', {
  50. conversationId: response.conversationId,
  51. parentMessageId: response.messageId,
  52. // If you want streamed responses, you can set the `onProgress` callback to receive the response as it's generated.
  53. // You will receive one token at a time, so you will need to concatenate them yourself.
  54. onProgress: token => process.stdout.write(token),
  55. });
  56. console.log();
  57. console.log(response.response); // Doux et élégant, avec des yeux qui brillent,\nLes chats sont des créatures de grâce suprême.\n...
  58. response = await chatGptClient.sendMessage('Repeat my 2nd message verbatim.', {
  59. conversationId: response.conversationId,
  60. parentMessageId: response.messageId,
  61. // If you want streamed responses, you can set the `onProgress` callback to receive the response as it's generated.
  62. // You will receive one token at a time, so you will need to concatenate them yourself.
  63. onProgress: token => process.stdout.write(token),
  64. });
  65. console.log();
  66. console.log(response.response); // "Write a short poem about cats."