Class: ReplicateLLM
Replicate LLM implementation used
Hierarchy
-
BaseLLM
↳
ReplicateLLM
Constructors
constructor
• new ReplicateLLM(init?
): ReplicateLLM
Parameters
Name | Type |
---|---|
init? | Partial <ReplicateLLM > & { noWarn? : boolean } |
Returns
Overrides
BaseLLM.constructor
Defined in
packages/core/src/llm/replicate_ai.ts:115
Properties
chatStrategy
• chatStrategy: ReplicateChatStrategy
Defined in
packages/core/src/llm/replicate_ai.ts:109
maxTokens
• Optional
maxTokens: number
Defined in
packages/core/src/llm/replicate_ai.ts:112
model
• model: "Llama-2-70b-chat-old"
| "Llama-2-70b-chat-4bit"
| "Llama-2-13b-chat-old"
| "Llama-2-13b-chat-4bit"
| "Llama-2-7b-chat-old"
| "Llama-2-7b-chat-4bit"
| "llama-3-70b-instruct"
| "llama-3-8b-instruct"
Defined in
packages/core/src/llm/replicate_ai.ts:108
replicateSession
• replicateSession: ReplicateSession
Defined in
packages/core/src/llm/replicate_ai.ts:113
temperature
• temperature: number
Defined in
packages/core/src/llm/replicate_ai.ts:110
topP
• topP: number
Defined in
packages/core/src/llm/replicate_ai.ts:111
Accessors
metadata
• get
metadata(): Object
Returns
Object
Name | Type |
---|---|
contextWindow | number |
maxTokens | undefined | number |
model | "Llama-2-70b-chat-old" | "Llama-2-70b-chat-4bit" | "Llama-2-13b-chat-old" | "Llama-2-13b-chat-4bit" | "Llama-2-7b-chat-old" | "Llama-2-7b-chat-4bit" | "llama-3-70b-instruct" | "llama-3-8b-instruct" |
temperature | number |
tokenizer | undefined |
topP | number |
Overrides
BaseLLM.metadata
Defined in
packages/core/src/llm/replicate_ai.ts:140
Methods
chat
▸ chat(params
): Promise
<AsyncIterable
<ChatResponseChunk
>>
Parameters
Name | Type |
---|---|
params | LLMChatParamsStreaming <object , object > |
Returns
Promise
<AsyncIterable
<ChatResponseChunk
>>
Overrides
BaseLLM.chat
Defined in
packages/core/src/llm/replicate_ai.ts:307
▸ chat(params
): Promise
<ChatResponse
<object
>>
Parameters
Name | Type |
---|---|
params | LLMChatParamsNonStreaming <object , object > |
Returns
Promise
<ChatResponse
<object
>>
Overrides
BaseLLM.chat
Defined in
packages/core/src/llm/replicate_ai.ts:310
complete
▸ complete(params
): Promise
<AsyncIterable
<CompletionResponse
>>
Parameters
Name | Type |
---|---|
params | LLMCompletionParamsStreaming |
Returns
Promise
<AsyncIterable
<CompletionResponse
>>