Skip to content

Commit

Permalink
gemi command to run gemini prompt/chat from cmd line
Browse files Browse the repository at this point in the history
  • Loading branch information
paulirish committed Jun 1, 2024
1 parent 3322922 commit 3f59804
Show file tree
Hide file tree
Showing 2 changed files with 42 additions and 8 deletions.
45 changes: 39 additions & 6 deletions bin/render-streaming-markdown.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,53 @@
// cat README.md | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts
//

import $ from "jsr:@david/[email protected]"
import $ from 'jsr:@david/[email protected]'
// todo: replace dax with this:
// import { exec } from 'https://deno.land/std/process.mod.ts';
import { writeAllSync } from 'https://deno.land/[email protected]/streams/mod.ts';

let inputBuffer = ""

const decoder = new TextDecoder()
const encoder = new TextEncoder()

// This style works well for prompt.. but not chat
for await (const chunk of Deno.stdin.readable) {
const decoded = decoder.decode(chunk);
inputBuffer += decoded
// console.log("$$$$$$$$$$", decoder.decode(chunk), "$$$zzz$$$")
// show immediately, but meanwhile…
writeAllSync(Deno.stdout, chunk);
// Collect it.
inputBuffer += decoder.decode(chunk);
}

// --style auto is there to force it to output styled https://github.com/charmbracelet/glow/blob/2430b0a/main.go#L158
const output = await $`glow --style auto`.stdinText(decoded).text()
// and now re-render it.
if (inputBuffer) {
console.log('⬇️… and now rendered…⬇️');
const output = await $`glow --style auto`.stdinText(inputBuffer).text()
writeAllSync(Deno.stdout, encoder.encode(output));
}


// This is a newline-buffered variant to avoid getting extra newlines in the output because we send it to glow too eagerly
// it works but... the next problem is backtick codeblocks are broken up and... i'm sure there's more.
// definitely need a better solution

// let remainingContent = '';
// for await (const chunk of Deno.stdin.readable) {
// const decoded = remainingContent + decoder.decode(chunk);

// const lastNewline = decoded.lastIndexOf("\n");
// if (lastNewline !== -1) {
// // Flush everything up to it
// const output = await $`glow --style auto`.stdinText(decoded.substring(0, lastNewline + 1)).text()
// writeAllSync(Deno.stdout, encoder.encode(output));

// // Hold onto the remaining content to flush with the next chunk
// remainingContent = decoded.substring(lastNewline + 1);
// }
// }

// // Flush any remaining content
// if (remainingContent) {
// const output = await $`glow --style auto`.stdinText(remainingContent).text()
// writeAllSync(Deno.stdout, encoder.encode(output));
// }
5 changes: 3 additions & 2 deletions fish/aliases.fish
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,11 @@ alias update_brew_npm_gem='brew_update; npm install npm -g; npm update -g; sudo
abbr gemini "llm -m gemini-1.5-pro-latest"

function gemi
# using https://github.com/simonw/llm-gemini and llm
if test -n "$argv[1]"
llm prompt -m gemini-1.5-pro-latest $argv[1] | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts
llm prompt -m gemini-1.5-pro-latest $argv[1] | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts
else
llm chat --continue -m gemini-1.5-pro-latest | deno run --allow-env --allow-read --allow-run bin/render-streaming-markdown.ts
llm chat --continue -m gemini-1.5-pro-latest
end
end

Expand Down

0 comments on commit 3f59804

Please sign in to comment.