diff --git a/.vitepress/config.ts b/.vitepress/config.ts
index 4d0981b7..422b79a2 100644
--- a/.vitepress/config.ts
+++ b/.vitepress/config.ts
@@ -321,6 +321,7 @@ export default defineConfig({
]
},
themeConfig: {
+ logo: "/icon.svg",
editLink: {
pattern: "https://github.com/withcatai/node-llama-cpp/edit/master/docs/:path"
},
diff --git a/.vitepress/theme/style.css b/.vitepress/theme/style.css
index dc85fa3d..fa10533a 100644
--- a/.vitepress/theme/style.css
+++ b/.vitepress/theme/style.css
@@ -3,6 +3,7 @@
--vp-c-brand-2: #cc6e3a;
--vp-c-brand-3: #cd8156;
--vp-c-brand-soft: rgb(255 156 100 / 14%);
+ color-scheme: light;
}
.dark {
@@ -10,6 +11,7 @@
--vp-c-brand-2: #e78e5c;
--vp-c-brand-3: #dd773e;
--vp-c-brand-soft: rgb(255 156 100 / 16%);
+ color-scheme: dark;
}
:root {
@@ -74,6 +76,13 @@
--vp-c-neutral-inverse: rgb(0 0 0 / 60%);
}
+.VPNavBarTitle>.title>.logo {
+ margin-bottom: -2px;
+}
+.VPNavBar.home .VPNavBarTitle>.title>.logo {
+ display: none;
+}
+
.VPNavBar:before {
display: block;
position: absolute;
diff --git a/assets/icon.svg b/assets/icon.svg
new file mode 100644
index 00000000..0d86d925
--- /dev/null
+++ b/assets/icon.svg
@@ -0,0 +1,83 @@
+
diff --git a/assets/icon.v3.svg b/assets/icon.v3.svg
new file mode 100644
index 00000000..3210f94e
--- /dev/null
+++ b/assets/icon.v3.svg
@@ -0,0 +1,9 @@
+
diff --git a/docs/guide/choosing-a-model.md b/docs/guide/choosing-a-model.md
index d1a4891a..a27e3297 100644
--- a/docs/guide/choosing-a-model.md
+++ b/docs/guide/choosing-a-model.md
@@ -83,7 +83,7 @@ npx --no node-llama-cpp inspect estimate
```
:::
-### What do you need this model for? (chat, code completion, analyzing data, classification, etc.)
+### What do you need this model for? (chat, code completion, analyzing data, classification, etc.) {#model-purpose}
There are plenty of models with different areas of expertise and capabilities.
When you choose a model that is more specialized in the task you need it for, it will usually perform better than a general model.
diff --git a/docs/guide/downloading-models.md b/docs/guide/downloading-models.md
index 1a112b49..eadd0f36 100644
--- a/docs/guide/downloading-models.md
+++ b/docs/guide/downloading-models.md
@@ -126,7 +126,7 @@ or the [`resolveModelFile`](../api/functions/resolveModelFile.md) method will au
Alternatively, you can use the token in the [`tokens`](../api/type-aliases/ModelDownloaderOptions.md#tokens) option when using [`createModelDownloader`](../api/functions/createModelDownloader.md) or [`resolveModelFile`](../api/functions/resolveModelFile.md).
## Inspecting Remote Models
-You can inspect the metadata of a remote model without downloading it by either using the [`inspect gguf` command](../cli/inspect/gguf.md) with a URL,
+You can inspect the metadata of a remote model without downloading it by either using the [`inspect gguf`](../cli/inspect/gguf.md) command with a URL,
or using the [`readGgufFileInfo`](../api/functions/readGgufFileInfo.md) method with a URL:
```typescript
import {readGgufFileInfo} from "node-llama-cpp";
@@ -140,7 +140,7 @@ const modelMetadata = await readGgufFileInfo("");
It's handy to check the compatibility of a remote model with your current machine hardware before downloading it,
so you won't waste time downloading a model that won't work on your machine.
-You can do so using the [`inspect estimate` command](../cli/inspect/estimate.md) with a URL:
+You can do so using the [`inspect estimate`](../cli/inspect/estimate.md) command with a URL:
```shell
npx --no node-llama-cpp inspect estimate
```
diff --git a/docs/guide/index.md b/docs/guide/index.md
index cfc5c7ec..21e42fc5 100644
--- a/docs/guide/index.md
+++ b/docs/guide/index.md
@@ -55,6 +55,8 @@ We recommend getting a GGUF model from either [Michael Radermacher on Hugging Fa
We recommend starting by getting a small model that doesn't have a lot of parameters just to ensure everything works, so try downloading a `7B`/`8B` parameters model first (search for models with both `7B`/`8B` and `GGUF` in their name).
+To ensure you can chat with the model, make sure you [choose an Instruct model](./choosing-a-model.md#model-purpose) by looking for `Instruct` or `it` in the model name.
+
For improved download speeds, you can use the [`pull`](../cli/pull.md) command to download a model:
```shell
npx --no node-llama-cpp pull --dir ./models
diff --git a/docs/guide/troubleshooting.md b/docs/guide/troubleshooting.md
index 9717c9ed..304da350 100644
--- a/docs/guide/troubleshooting.md
+++ b/docs/guide/troubleshooting.md
@@ -151,3 +151,8 @@ const context = await model.createContext({
If you found that the memory estimation is indeed inaccurate,
please [open a new issue on GitHub](https://github.com/withcatai/node-llama-cpp/issues/new/choose) with a link to the model you're using and the output of the [`inspect measure`](../cli/inspect/measure.md) command.
+
+## Getting an `The specified module could not be found \\?\C:\Users\Administrator\AppData\Roaming\npm\node_modules` Error on a Windows Machine
+The common cause for this issue is when using the `Administrator` to run `npm install` and then trying to run the code with a different user.
+
+Ensure you're not using the `Administrator` user for `npm install` nor to run the code.
diff --git a/docs/public/icon.svg b/docs/public/icon.svg
new file mode 100644
index 00000000..3210f94e
--- /dev/null
+++ b/docs/public/icon.svg
@@ -0,0 +1,9 @@
+
diff --git a/test/modelDependent/codegemma/parallel.test.ts b/test/modelDependent/codegemma/parallel.test.ts
index 3a3c9cec..da516681 100644
--- a/test/modelDependent/codegemma/parallel.test.ts
+++ b/test/modelDependent/codegemma/parallel.test.ts
@@ -166,10 +166,10 @@ describe("CodeGemma", () => {
});
const resPromise = completion.generateCompletion("const singleLineArrayFromOneToHundred = [1, 2, 3, ", {
- maxTokens: 40
+ maxTokens: 20
});
const resPromise2 = completion2.generateCompletion("const singleLineArrayFromOneToHundred = [100, 99, 98, 97, 96, ", {
- maxTokens: 40
+ maxTokens: 20
});
const [