Skip to content

Commit

Permalink
Merge branch 'main' into patch-1
Browse files Browse the repository at this point in the history
  • Loading branch information
Vaibhavs10 authored Aug 23, 2024
2 parents 74891b9 + 6e47ae8 commit af32ffa
Show file tree
Hide file tree
Showing 5 changed files with 38 additions and 17 deletions.
2 changes: 1 addition & 1 deletion packages/tasks/package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "@huggingface/tasks",
"packageManager": "[email protected]",
"version": "0.11.10",
"version": "0.11.11",
"description": "List of ML tasks for huggingface.co/tasks",
"repository": "https://github.com/huggingface/huggingface.js.git",
"publishConfig": {
Expand Down
6 changes: 6 additions & 0 deletions packages/tasks/src/dataset-libraries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,12 @@ export const DATASET_LIBRARIES_UI_ELEMENTS = {
repoUrl: "https://github.com/pola-rs/polars",
docsUrl: "https://huggingface.co/docs/hub/datasets-polars",
},
duckdb: {
prettyLabel: "DuckDB",
repoName: "duckdb",
repoUrl: "https://github.com/duckdb/duckdb",
docsUrl: "https://huggingface.co/docs/hub/datasets-duckdb",
},
} satisfies Record<string, DatasetLibraryUiElement>;

/// List of the dataset libraries supported by the Hub
Expand Down
25 changes: 15 additions & 10 deletions packages/tasks/src/local-apps.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,15 @@ export type LocalApp = {
}
);

// eslint-disable-next-line @typescript-eslint/no-unused-vars
function isGgufModel(model: ModelData) {
return model.tags.includes("gguf");
}

function isLlamaCppGgufModel(model: ModelData) {
return !!model.gguf?.context_length;
}

const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
const command = (binary: string) =>
[
Expand Down Expand Up @@ -138,56 +143,56 @@ export const LOCAL_APPS = {
prettyLabel: "llama.cpp",
docsUrl: "https://github.com/ggerganov/llama.cpp",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
snippet: snippetLlamacpp,
},
lmstudio: {
prettyLabel: "LM Studio",
docsUrl: "https://lmstudio.ai",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
deeplink: (model, filepath) =>
new URL(`lmstudio://open_from_hf?model=${model.id}${filepath ? `&file=${filepath}` : ""}`),
},
localai: {
prettyLabel: "LocalAI",
docsUrl: "https://github.com/mudler/LocalAI",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
snippet: snippetLocalAI,
},
jan: {
prettyLabel: "Jan",
docsUrl: "https://jan.ai",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
deeplink: (model) => new URL(`jan://models/huggingface/${model.id}`),
},
backyard: {
prettyLabel: "Backyard AI",
docsUrl: "https://backyard.ai",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
deeplink: (model) => new URL(`https://backyard.ai/hf/model/${model.id}`),
},
sanctum: {
prettyLabel: "Sanctum",
docsUrl: "https://sanctum.ai",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
deeplink: (model) => new URL(`sanctum://open_from_hf?model=${model.id}`),
},
jellybox: {
prettyLabel: "Jellybox",
docsUrl: "https://jellybox.com",
mainTask: "text-generation",
displayOnModelPage: (model) =>
isGgufModel(model) ||
isLlamaCppGgufModel(model) ||
(model.library_name === "diffusers" &&
model.tags.includes("safetensors") &&
(model.pipeline_tag === "text-to-image" || model.tags.includes("lora"))),
deeplink: (model) => {
if (isGgufModel(model)) {
if (isLlamaCppGgufModel(model)) {
return new URL(`jellybox://llm/models/huggingface/LLM/${model.id}`);
} else if (model.tags.includes("lora")) {
return new URL(`jellybox://image/models/huggingface/ImageLora/${model.id}`);
Expand All @@ -200,15 +205,15 @@ export const LOCAL_APPS = {
prettyLabel: "Msty",
docsUrl: "https://msty.app",
mainTask: "text-generation",
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
deeplink: (model) => new URL(`msty://models/search/hf/${model.id}`),
},
recursechat: {
prettyLabel: "RecurseChat",
docsUrl: "https://recurse.chat",
mainTask: "text-generation",
macOSOnly: true,
displayOnModelPage: isGgufModel,
displayOnModelPage: isLlamaCppGgufModel,
deeplink: (model) => new URL(`recursechat://new-hf-gguf-model?hf-model-id=${model.id}`),
},
drawthings: {
Expand Down
10 changes: 10 additions & 0 deletions packages/tasks/src/model-data.ts
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,16 @@ export interface ModelData {
* Example: transformers, SpeechBrain, Stanza, etc.
*/
library_name?: string;
safetensors?: {
parameters: Record<string, number>;
total: number;
sharded: boolean;
};
gguf?: {
total: number;
architecture?: string;
context_length?: number;
};
}

/**
Expand Down
12 changes: 6 additions & 6 deletions packages/tasks/src/model-libraries-snippets.ts
Original file line number Diff line number Diff line change
Expand Up @@ -270,12 +270,12 @@ llm = Llama.from_pretrained(
)
llm.create_chat_completion(
messages = [
{
"role": "user",
"content": "What is the capital of France?"
}
]
messages = [
{
"role": "user",
"content": "What is the capital of France?"
}
]
)`,
];

Expand Down

0 comments on commit af32ffa

Please sign in to comment.