Skip to content

Commit

Permalink
Add missing parameters for Table Question Answering (#1053)
Browse files Browse the repository at this point in the history
  • Loading branch information
beurkinger authored Dec 2, 2024
1 parent 2a99455 commit a418861
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 4 deletions.
28 changes: 25 additions & 3 deletions packages/tasks/src/tasks/table-question-answering/inference.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,7 @@ export interface TableQuestionAnsweringInput {
/**
* Additional inference parameters for Table Question Answering
*/
parameters?: {
[key: string]: unknown;
};
parameters?: TableQuestionAnsweringParameters;
[property: string]: unknown;
}
/**
Expand All @@ -35,6 +33,30 @@ export interface TableQuestionAnsweringInputData {
};
[property: string]: unknown;
}
/**
* Additional inference parameters for Table Question Answering
*/
export interface TableQuestionAnsweringParameters {
/**
* Activates and controls padding.
*/
padding?: Padding;
/**
* Whether to do inference sequentially or as a batch. Batching is faster, but models like
* SQA require the inference to be done sequentially to extract relations within sequences,
* given their conversational nature.
*/
sequential?: boolean;
/**
* Activates and controls truncation.
*/
truncation?: boolean;
[property: string]: unknown;
}
/**
* Activates and controls padding.
*/
export type Padding = "do_not_pad" | "longest" | "max_length";
export type TableQuestionAnsweringOutput = TableQuestionAnsweringOutputElement[];
/**
* Outputs of inference for the Table Question Answering task
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,24 @@
"TableQuestionAnsweringParameters": {
"title": "TableQuestionAnsweringParameters",
"type": "object",
"properties": {}
"properties": {
"padding": {
"type": "string",
"default": "do_not_pad",
"description": "Activates and controls padding.",
"enum": ["do_not_pad", "longest", "max_length"]
},
"sequential": {
"type": "boolean",
"default": "false",
"description": "Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the inference to be done sequentially to extract relations within sequences, given their conversational nature.",
},
"truncation": {
"type": "boolean",
"default": "false",
"description": "Activates and controls truncation.",
}
}
}
},
"required": ["inputs"]
Expand Down

0 comments on commit a418861

Please sign in to comment.