File tree Expand file tree Collapse file tree 3 files changed +23
-3
lines changed Expand file tree Collapse file tree 3 files changed +23
-3
lines changed Original file line number Diff line number Diff line change 1
1
configured_endpoints : 28
2
- openapi_spec_url : https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-df8486ebc1de1915a1611890eb571e387e1dac284df20962d10be37aa8855ce6 .yml
3
- openapi_spec_hash : 499837ed4588ccbb5426ea3c1f0a4179
2
+ openapi_spec_url : https://storage.googleapis.com/stainless-sdk-openapi-specs/togetherai%2Ftogetherai-8f50cb3e342f2fd67f1f2cfda195b3d78c0740344f55f37cf1c99c66a0f7c2c5 .yml
3
+ openapi_spec_hash : b9907745f73f337395ffd5cef1e8a2d5
4
4
config_hash : a60b100624e80dc8d9144e7bc306f5ce
Original file line number Diff line number Diff line change @@ -17,7 +17,7 @@ export class Models extends APIResource {
17
17
}
18
18
19
19
/**
20
- * Upload a custom model from Hugging Face or S3
20
+ * Upload a custom model or adapter from Hugging Face or S3
21
21
*
22
22
* @example
23
23
* ```ts
@@ -101,6 +101,12 @@ export interface ModelUploadParams {
101
101
*/
102
102
model_source : string ;
103
103
104
+ /**
105
+ * The base model to use for an adapter if setting it to run against a serverless
106
+ * pool. Only used for model_type `adapter`.
107
+ */
108
+ base_model ?: string ;
109
+
104
110
/**
105
111
* A description of your model
106
112
*/
@@ -110,6 +116,17 @@ export interface ModelUploadParams {
110
116
* Hugging Face token (if uploading from Hugging Face)
111
117
*/
112
118
hf_token ?: string ;
119
+
120
+ /**
121
+ * The lora pool to use for an adapter if setting it to run against, say, a
122
+ * dedicated pool. Only used for model_type `adapter`.
123
+ */
124
+ lora_model ?: string ;
125
+
126
+ /**
127
+ * Whether the model is a full model or an adapter
128
+ */
129
+ model_type ?: 'model' | 'adapter' ;
113
130
}
114
131
115
132
export declare namespace Models {
Original file line number Diff line number Diff line change @@ -45,8 +45,11 @@ describe('resource models', () => {
45
45
const response = await client . models . upload ( {
46
46
model_name : 'Qwen2.5-72B-Instruct' ,
47
47
model_source : 'unsloth/Qwen2.5-72B-Instruct' ,
48
+ base_model : 'Qwen/Qwen2.5-72B-Instruct' ,
48
49
description : 'Finetuned Qwen2.5-72B-Instruct by Unsloth' ,
49
50
hf_token : 'hf_examplehuggingfacetoken' ,
51
+ lora_model : 'my_username/Qwen2.5-72B-Instruct-lora' ,
52
+ model_type : 'model' ,
50
53
} ) ;
51
54
} ) ;
52
55
} ) ;
You can’t perform that action at this time.
0 commit comments