@@ -48,6 +48,7 @@ Link the `onnxruntime-react-native` library:
4848``` sh
4949npx react-native link onnxruntime-react-native
5050```
51+
5152</details >
5253
5354<details >
@@ -56,14 +57,9 @@ npx react-native link onnxruntime-react-native
5657Add the Expo plugin configuration in ` app.json ` or ` app.config.js ` :
5758
5859``` json
59- {
60- "expo" : {
61- "plugins" : [
62- " onnxruntime-react-native"
63- ]
64- }
65- }
60+ { "expo" : { "plugins" : [" onnxruntime-react-native" ] } }
6661```
62+
6763</details >
6864
6965### 4. Babel Configuration
@@ -76,8 +72,8 @@ module.exports = {
7672 // ... your existing config
7773 plugins: [
7874 // ... your existing plugins
79- " babel-plugin-transform-import-meta"
80- ]
75+ ' babel-plugin-transform-import-meta' ,
76+ ],
8177};
8278```
8379
@@ -92,18 +88,17 @@ You can set up a development client using one of these methods:
9288- ** [ EAS Development Build] ( https://docs.expo.dev/develop/development-builds/introduction/ ) ** : Create a custom development client using EAS Build
9389- ** [ Expo Prebuild] ( https://docs.expo.dev/workflow/prebuild/ ) ** : Eject to a bare workflow to access native code
9490
95-
9691## Usage
9792
9893### Text Generation
9994
10095``` javascript
101- import React , { useState , useEffect } from " react" ;
102- import { View , Text , Button } from " react-native" ;
103- import { Pipeline } from " react-native-transformers" ;
96+ import React , { useState , useEffect } from ' react' ;
97+ import { View , Text , Button } from ' react-native' ;
98+ import { Pipeline } from ' react-native-transformers' ;
10499
105100export default function App () {
106- const [output , setOutput ] = useState (" " );
101+ const [output , setOutput ] = useState (' ' );
107102 const [isLoading , setIsLoading ] = useState (false );
108103 const [isModelReady , setIsModelReady ] = useState (false );
109104
@@ -117,44 +112,44 @@ export default function App() {
117112 try {
118113 // Load a small Llama model
119114 await Pipeline .TextGeneration .init (
120- " Felladrin/onnx-Llama-160M-Chat-v1" ,
121- " onnx/decoder_model_merged.onnx" ,
115+ ' Felladrin/onnx-Llama-160M-Chat-v1' ,
116+ ' onnx/decoder_model_merged.onnx' ,
122117 {
123118 // The fetch function is required to download model files
124119 fetch: async (url ) => {
125120 // In a real app, you might want to cache the downloaded files
126121 const response = await fetch (url);
127122 return response .url ;
128- }
123+ },
129124 }
130125 );
131126 setIsModelReady (true );
132127 } catch (error) {
133- console .error (" Error loading model:" , error);
134- alert (" Failed to load model: " + error .message );
128+ console .error (' Error loading model:' , error);
129+ alert (' Failed to load model: ' + error .message );
135130 } finally {
136131 setIsLoading (false );
137132 }
138133 };
139134
140135 const generateText = () => {
141- setOutput (" " );
136+ setOutput (' ' );
142137 // Generate text from the prompt and update the UI as tokens are generated
143138 Pipeline .TextGeneration .generate (
144- " Write a short poem about programming:" ,
139+ ' Write a short poem about programming:' ,
145140 (text ) => setOutput (text)
146141 );
147142 };
148143
149144 return (
150145 < View style= {{ padding: 20 }}>
151146 < Button
152- title= {isModelReady ? " Generate Text" : " Load Model" }
147+ title= {isModelReady ? ' Generate Text' : ' Load Model' }
153148 onPress= {isModelReady ? generateText : loadModel}
154149 disabled= {isLoading}
155150 / >
156151 < Text style= {{ marginTop: 20 }}>
157- {output || " Generated text will appear here" }
152+ {output || ' Generated text will appear here' }
158153 < / Text >
159154 < / View>
160155 );
@@ -166,18 +161,18 @@ export default function App() {
166161For Expo applications, use ` expo-file-system ` to download models with progress tracking:
167162
168163``` javascript
169- import * as FileSystem from " expo-file-system" ;
170- import { Pipeline } from " react-native-transformers" ;
164+ import * as FileSystem from ' expo-file-system' ;
165+ import { Pipeline } from ' react-native-transformers' ;
171166
172167// In your model loading function
173- await Pipeline .TextGeneration .init (" model-repo" , " model-file" , {
168+ await Pipeline .TextGeneration .init (' model-repo' , ' model-file' , {
174169 fetch: async (url ) => {
175- const localPath = FileSystem .cacheDirectory + url .split (" / " ).pop ();
170+ const localPath = FileSystem .cacheDirectory + url .split (' / ' ).pop ();
176171
177172 // Check if file already exists
178173 const fileInfo = await FileSystem .getInfoAsync (localPath);
179174 if (fileInfo .exists ) {
180- console .log (" Model already downloaded, using cached version" );
175+ console .log (' Model already downloaded, using cached version' );
181176 return localPath;
182177 }
183178
@@ -187,28 +182,31 @@ await Pipeline.TextGeneration.init("model-repo", "model-file", {
187182 localPath,
188183 {},
189184 (progress ) => {
190- const percentComplete = progress .totalBytesWritten / progress .totalBytesExpectedToWrite ;
191- console .log (` Download progress: ${ (percentComplete * 100 ).toFixed (1 )} %` );
185+ const percentComplete =
186+ progress .totalBytesWritten / progress .totalBytesExpectedToWrite ;
187+ console .log (
188+ ` Download progress: ${ (percentComplete * 100 ).toFixed (1 )} %`
189+ );
192190 }
193191 );
194192
195193 const result = await downloadResumable .downloadAsync ();
196194 return result? .uri ;
197- }
195+ },
198196});
199197` ` `
200198
201199## Supported Models
202200
203201` react- native- transformers` works with ONNX-formatted models from Hugging Face. Here are some recommended models based on size and performance:
204202
205- | Model | Type | Size | Description |
206- |-------| ------| ------| -------------|
207- | [Felladrin/onnx-Llama-160M-Chat-v1](https://huggingface.co/Felladrin/onnx-Llama-160M-Chat-v1) | Text Generation | ~300MB | Small Llama model (160M parameters) |
208- | [microsoft/Phi-3-mini-4k-instruct-onnx-web](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx-web) | Text Generation | ~1.5GB | Microsoft's Phi-3-mini model |
209- | [Xenova/distilgpt2_onnx-quantized](https://huggingface.co/Xenova/distilgpt2_onnx-quantized) | Text Generation | ~165MB | Quantized DistilGPT-2 |
210- | [Xenova/tiny-mamba-onnx](https://huggingface.co/Xenova/tiny-mamba-onnx) | Text Generation | ~85MB | Tiny Mamba model |
211- | [Xenova/all-MiniLM-L6-v2-onnx](https://huggingface.co/Xenova/all-MiniLM-L6-v2-onnx) | Text Embedding | ~80MB | Sentence embedding model |
203+ | Model | Type | Size | Description |
204+ | ------------------------------------------------------------------------------------------------------------- | --------------- | ------ | ----------------------------------- |
205+ | [Felladrin/onnx-Llama-160M-Chat-v1](https://huggingface.co/Felladrin/onnx-Llama-160M-Chat-v1) | Text Generation | ~300MB | Small Llama model (160M parameters) |
206+ | [microsoft/Phi-3-mini-4k-instruct-onnx-web](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx-web) | Text Generation | ~1.5GB | Microsoft's Phi-3-mini model |
207+ | [Xenova/distilgpt2_onnx-quantized](https://huggingface.co/Xenova/distilgpt2_onnx-quantized) | Text Generation | ~165MB | Quantized DistilGPT-2 |
208+ | [Xenova/tiny-mamba-onnx](https://huggingface.co/Xenova/tiny-mamba-onnx) | Text Generation | ~85MB | Tiny Mamba model |
209+ | [Xenova/all-MiniLM-L6-v2-onnx](https://huggingface.co/Xenova/all-MiniLM-L6-v2-onnx) | Text Embedding | ~80MB | Sentence embedding model |
212210
213211## API Reference
214212
@@ -229,12 +227,12 @@ This project is licensed under the MIT License. See the [LICENSE](LICENSE) file
229227## Acknowledgements
230228
231229- [ONNX Runtime](https://onnxruntime.ai/) for efficient model execution on mobile devices
232- - [@xenova /transformers](https://www.npmjs.com/package/@xenova/ transformers) for transformer model implementations
230+ - [@huggingface /transformers](github:mybigday/ transformers.js-rn#merge ) for transformer model implementations
233231- [Hugging Face](https://huggingface.co/) for providing pre-trained models and model hosting
234232
235233## External Links
236234
237235- [Expo Plugins Documentation](https://docs.expo.dev/guides/config-plugins/)
238236- [ONNX Runtime Documentation](https://onnxruntime.ai/)
239237- [Hugging Face Model Hub](https://huggingface.co/models)
240- - [ONNX Format Documentation](https://onnx.ai/onnx/intro/)
238+ - [ONNX Format Documentation](https://onnx.ai/onnx/intro/)
0 commit comments