Update README.md
Browse files
README.md
CHANGED
@@ -9187,6 +9187,39 @@ Query: Where can I get the best tacos?
|
|
9187 |
tensor(0.2797) Mexico City of Course!
|
9188 |
tensor(0.1250) The Data Cloud!
|
9189 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9190 |
## Contact
|
9191 |
|
9192 |
|
|
|
9187 |
tensor(0.2797) Mexico City of Course!
|
9188 |
tensor(0.1250) The Data Cloud!
|
9189 |
```
|
9190 |
+
|
9191 |
+
### Using Huggingface Transformers.js
|
9192 |
+
|
9193 |
+
If you haven't already, you can install the [Transformers.js](https://huggingface.co/docs/transformers.js) JavaScript library from [NPM](https://www.npmjs.com/package/@huggingface/transformers) using:
|
9194 |
+
```bash
|
9195 |
+
npm i @huggingface/transformers
|
9196 |
+
```
|
9197 |
+
|
9198 |
+
You can then use the model for retrieval, as follows:
|
9199 |
+
|
9200 |
+
```js
|
9201 |
+
import { pipeline, dot } from '@huggingface/transformers';
|
9202 |
+
|
9203 |
+
// Create feature extraction pipeline
|
9204 |
+
const extractor = await pipeline('feature-extraction', 'Snowflake/snowflake-arctic-embed-m-v2.0', {
|
9205 |
+
dtype: 'q8',
|
9206 |
+
});
|
9207 |
+
|
9208 |
+
// Generate sentence embeddings
|
9209 |
+
const sentences = [
|
9210 |
+
'query: what is snowflake?',
|
9211 |
+
'The Data Cloud!',
|
9212 |
+
'Mexico City of Course!',
|
9213 |
+
]
|
9214 |
+
const output = await extractor(sentences, { normalize: true, pooling: 'cls' });
|
9215 |
+
|
9216 |
+
// Compute similarity scores
|
9217 |
+
const [source_embeddings, ...document_embeddings ] = output.tolist();
|
9218 |
+
const similarities = document_embeddings.map(x => dot(source_embeddings, x));
|
9219 |
+
console.log(similarities); // [0.24783534471401417, 0.05313122704326892]
|
9220 |
+
```
|
9221 |
+
|
9222 |
+
|
9223 |
## Contact
|
9224 |
|
9225 |
|