@@ -48,6 +48,7 @@ Link the `onnxruntime-react-native` library:
48
48
``` sh
49
49
npx react-native link onnxruntime-react-native
50
50
```
51
+
51
52
</details >
52
53
53
54
<details >
@@ -56,14 +57,9 @@ npx react-native link onnxruntime-react-native
56
57
Add the Expo plugin configuration in ` app.json ` or ` app.config.js ` :
57
58
58
59
``` json
59
- {
60
- "expo" : {
61
- "plugins" : [
62
- " onnxruntime-react-native"
63
- ]
64
- }
65
- }
60
+ { "expo" : { "plugins" : [" onnxruntime-react-native" ] } }
66
61
```
62
+
67
63
</details >
68
64
69
65
### 4. Babel Configuration
@@ -76,8 +72,8 @@ module.exports = {
76
72
// ... your existing config
77
73
plugins: [
78
74
// ... your existing plugins
79
- " babel-plugin-transform-import-meta"
80
- ]
75
+ ' babel-plugin-transform-import-meta' ,
76
+ ],
81
77
};
82
78
```
83
79
@@ -92,18 +88,17 @@ You can set up a development client using one of these methods:
92
88
- ** [ EAS Development Build] ( https://docs.expo.dev/develop/development-builds/introduction/ ) ** : Create a custom development client using EAS Build
93
89
- ** [ Expo Prebuild] ( https://docs.expo.dev/workflow/prebuild/ ) ** : Eject to a bare workflow to access native code
94
90
95
-
96
91
## Usage
97
92
98
93
### Text Generation
99
94
100
95
``` javascript
101
- import React , { useState , useEffect } from " react" ;
102
- import { View , Text , Button } from " react-native" ;
103
- import { Pipeline } from " react-native-transformers" ;
96
+ import React , { useState , useEffect } from ' react' ;
97
+ import { View , Text , Button } from ' react-native' ;
98
+ import { Pipeline } from ' react-native-transformers' ;
104
99
105
100
export default function App () {
106
- const [output , setOutput ] = useState (" " );
101
+ const [output , setOutput ] = useState (' ' );
107
102
const [isLoading , setIsLoading ] = useState (false );
108
103
const [isModelReady , setIsModelReady ] = useState (false );
109
104
@@ -117,44 +112,44 @@ export default function App() {
117
112
try {
118
113
// Load a small Llama model
119
114
await Pipeline .TextGeneration .init (
120
- " Felladrin/onnx-Llama-160M-Chat-v1" ,
121
- " onnx/decoder_model_merged.onnx" ,
115
+ ' Felladrin/onnx-Llama-160M-Chat-v1' ,
116
+ ' onnx/decoder_model_merged.onnx' ,
122
117
{
123
118
// The fetch function is required to download model files
124
119
fetch: async (url ) => {
125
120
// In a real app, you might want to cache the downloaded files
126
121
const response = await fetch (url);
127
122
return response .url ;
128
- }
123
+ },
129
124
}
130
125
);
131
126
setIsModelReady (true );
132
127
} catch (error) {
133
- console .error (" Error loading model:" , error);
134
- alert (" Failed to load model: " + error .message );
128
+ console .error (' Error loading model:' , error);
129
+ alert (' Failed to load model: ' + error .message );
135
130
} finally {
136
131
setIsLoading (false );
137
132
}
138
133
};
139
134
140
135
const generateText = () => {
141
- setOutput (" " );
136
+ setOutput (' ' );
142
137
// Generate text from the prompt and update the UI as tokens are generated
143
138
Pipeline .TextGeneration .generate (
144
- " Write a short poem about programming:" ,
139
+ ' Write a short poem about programming:' ,
145
140
(text ) => setOutput (text)
146
141
);
147
142
};
148
143
149
144
return (
150
145
< View style= {{ padding: 20 }}>
151
146
< Button
152
- title= {isModelReady ? " Generate Text" : " Load Model" }
147
+ title= {isModelReady ? ' Generate Text' : ' Load Model' }
153
148
onPress= {isModelReady ? generateText : loadModel}
154
149
disabled= {isLoading}
155
150
/ >
156
151
< Text style= {{ marginTop: 20 }}>
157
- {output || " Generated text will appear here" }
152
+ {output || ' Generated text will appear here' }
158
153
< / Text >
159
154
< / View>
160
155
);
@@ -166,18 +161,18 @@ export default function App() {
166
161
For Expo applications, use ` expo-file-system ` to download models with progress tracking:
167
162
168
163
``` javascript
169
- import * as FileSystem from " expo-file-system" ;
170
- import { Pipeline } from " react-native-transformers" ;
164
+ import * as FileSystem from ' expo-file-system' ;
165
+ import { Pipeline } from ' react-native-transformers' ;
171
166
172
167
// In your model loading function
173
- await Pipeline .TextGeneration .init (" model-repo" , " model-file" , {
168
+ await Pipeline .TextGeneration .init (' model-repo' , ' model-file' , {
174
169
fetch: async (url ) => {
175
- const localPath = FileSystem .cacheDirectory + url .split (" / " ).pop ();
170
+ const localPath = FileSystem .cacheDirectory + url .split (' / ' ).pop ();
176
171
177
172
// Check if file already exists
178
173
const fileInfo = await FileSystem .getInfoAsync (localPath);
179
174
if (fileInfo .exists ) {
180
- console .log (" Model already downloaded, using cached version" );
175
+ console .log (' Model already downloaded, using cached version' );
181
176
return localPath;
182
177
}
183
178
@@ -187,28 +182,31 @@ await Pipeline.TextGeneration.init("model-repo", "model-file", {
187
182
localPath,
188
183
{},
189
184
(progress ) => {
190
- const percentComplete = progress .totalBytesWritten / progress .totalBytesExpectedToWrite ;
191
- console .log (` Download progress: ${ (percentComplete * 100 ).toFixed (1 )} %` );
185
+ const percentComplete =
186
+ progress .totalBytesWritten / progress .totalBytesExpectedToWrite ;
187
+ console .log (
188
+ ` Download progress: ${ (percentComplete * 100 ).toFixed (1 )} %`
189
+ );
192
190
}
193
191
);
194
192
195
193
const result = await downloadResumable .downloadAsync ();
196
194
return result? .uri ;
197
- }
195
+ },
198
196
});
199
197
` ` `
200
198
201
199
## Supported Models
202
200
203
201
` react- native- transformers` works with ONNX-formatted models from Hugging Face. Here are some recommended models based on size and performance:
204
202
205
- | Model | Type | Size | Description |
206
- |-------| ------| ------| -------------|
207
- | [Felladrin/onnx-Llama-160M-Chat-v1](https://huggingface.co/Felladrin/onnx-Llama-160M-Chat-v1) | Text Generation | ~300MB | Small Llama model (160M parameters) |
208
- | [microsoft/Phi-3-mini-4k-instruct-onnx-web](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx-web) | Text Generation | ~1.5GB | Microsoft's Phi-3-mini model |
209
- | [Xenova/distilgpt2_onnx-quantized](https://huggingface.co/Xenova/distilgpt2_onnx-quantized) | Text Generation | ~165MB | Quantized DistilGPT-2 |
210
- | [Xenova/tiny-mamba-onnx](https://huggingface.co/Xenova/tiny-mamba-onnx) | Text Generation | ~85MB | Tiny Mamba model |
211
- | [Xenova/all-MiniLM-L6-v2-onnx](https://huggingface.co/Xenova/all-MiniLM-L6-v2-onnx) | Text Embedding | ~80MB | Sentence embedding model |
203
+ | Model | Type | Size | Description |
204
+ | ------------------------------------------------------------------------------------------------------------- | --------------- | ------ | ----------------------------------- |
205
+ | [Felladrin/onnx-Llama-160M-Chat-v1](https://huggingface.co/Felladrin/onnx-Llama-160M-Chat-v1) | Text Generation | ~300MB | Small Llama model (160M parameters) |
206
+ | [microsoft/Phi-3-mini-4k-instruct-onnx-web](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx-web) | Text Generation | ~1.5GB | Microsoft's Phi-3-mini model |
207
+ | [Xenova/distilgpt2_onnx-quantized](https://huggingface.co/Xenova/distilgpt2_onnx-quantized) | Text Generation | ~165MB | Quantized DistilGPT-2 |
208
+ | [Xenova/tiny-mamba-onnx](https://huggingface.co/Xenova/tiny-mamba-onnx) | Text Generation | ~85MB | Tiny Mamba model |
209
+ | [Xenova/all-MiniLM-L6-v2-onnx](https://huggingface.co/Xenova/all-MiniLM-L6-v2-onnx) | Text Embedding | ~80MB | Sentence embedding model |
212
210
213
211
## API Reference
214
212
@@ -229,12 +227,12 @@ This project is licensed under the MIT License. See the [LICENSE](LICENSE) file
229
227
## Acknowledgements
230
228
231
229
- [ONNX Runtime](https://onnxruntime.ai/) for efficient model execution on mobile devices
232
- - [@xenova /transformers](https://www.npmjs.com/package/@xenova/ transformers) for transformer model implementations
230
+ - [@huggingface /transformers](github:mybigday/ transformers.js-rn#merge ) for transformer model implementations
233
231
- [Hugging Face](https://huggingface.co/) for providing pre-trained models and model hosting
234
232
235
233
## External Links
236
234
237
235
- [Expo Plugins Documentation](https://docs.expo.dev/guides/config-plugins/)
238
236
- [ONNX Runtime Documentation](https://onnxruntime.ai/)
239
237
- [Hugging Face Model Hub](https://huggingface.co/models)
240
- - [ONNX Format Documentation](https://onnx.ai/onnx/intro/)
238
+ - [ONNX Format Documentation](https://onnx.ai/onnx/intro/)
0 commit comments