Skip to content

Commit

Permalink
Merge remote-tracking branch 'upstream/master' into flushable-debounce
Browse files Browse the repository at this point in the history
* upstream/master: (85 commits)
  History and minor UI fixes (gitbutlerapp#3797)
  History fixes (gitbutlerapp#3796)
  make GitGuardian happy
  bump playwright version
  add vitest exclude for node_modules
  Remove unused exports from ipc
  Update auth.ts to use absolute import
  Use correct invoke function
  setup for async tasks on oplog update
  formatting
  save and restore the gitbutler/integration branch
  remove unneccessary CSPs
  Fix ollama request security
  fix: forgot the port
  feat: Set default snapshot lines threshold to 20 when undefined
  fix: Updated connect-src to allow local connections for ollama
  Minor tweaks to CSS and use real branch name
  Unmount history component when hidden
  Fix bug leaving PR button visible
  Small refactor of history component
  ...
  • Loading branch information
anaisbetts committed May 21, 2024
2 parents 2ff2b1f + 6e62729 commit b4b0de6
Show file tree
Hide file tree
Showing 104 changed files with 3,959 additions and 1,876 deletions.
52 changes: 52 additions & 0 deletions .github/workflows/test-e2e.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
name: Playwright Tests
on:
schedule:
- cron: "0 6 * * *"
workflow_dispatch:
push:
branches: [master]
pull_request:
branches: [master]

jobs:
playwright:
timeout-minutes: 60
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pnpm/action-setup@v3
with:
version: 8
- uses: actions/setup-node@v4
with:
node-version: lts/*
cache: "pnpm"
cache-dependency-path: |
pnpm-lock.yaml
- name: Install dependencies
run: pnpm install
- name: Get installed Playwright version
id: playwright-version
run: echo "PLAYWRIGHT_VERSION=$(node -e "console.log(require('./app/package.json').devDependencies['@playwright/test'].substring(1))")" >> $GITHUB_ENV
- name: Cache playwright binaries
uses: actions/cache@v4
id: playwright-cache
with:
path: |
~/.cache/ms-playwright
key: ${{ runner.os }}-playwright-${{ env.PLAYWRIGHT_VERSION }}
- name: Install Playwright Browsers
if: steps.playwright-cache.outputs.cache-hit != 'true'
run: pnpm dlx "playwright@$PLAYWRIGHT_VERSION" install --with-deps chromium
- name: Run Tests
run: cd app && pnpm test:e2e
env:
CI: true
- name: Upload Artifacts
uses: actions/upload-artifact@v4
if: ${{ !cancelled() }}
with:
name: playwright-report
path: app/test-results/**/trace.zip
if-no-files-found: ignore
retention-days: 30
2 changes: 2 additions & 0 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

5 changes: 5 additions & 0 deletions app/.env.testing
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
PUBLIC_API_BASE_URL=https://test.app.gitbutler.com/
PUBLIC_POSTHOG_API_KEY=
PUBLIC_CHAIN_API=https://data-test.gitbutler.com/chain/
PUBLIC_SENTRY_ENVIRONMENT=
PUBLIC_TESTING=true
4 changes: 3 additions & 1 deletion app/.eslintignore
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ butler/target
/build
/.svelte-kit
/package
/e2e
.env
.env.*
!.env.example
Expand All @@ -22,6 +23,7 @@ yarn.lock
/svelte.config.js
/postcss.config.cjs
/tailwind.config.cjs
/playwright.config.ts

# Written to disk when using `act`
.pnpm-store
.pnpm-store
4 changes: 4 additions & 0 deletions app/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,3 +21,7 @@ vite.config.ts.timestamp-*

# Written to disk when using `act`
.pnpm-store

# playwright
test-results*
playwright-report
15 changes: 15 additions & 0 deletions app/e2e/playwright/basic.spec.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import { test, expect } from '@playwright/test';

test('has empty title', async ({ page }) => {
await page.goto('http://localhost:1420');

await expect(page).toHaveTitle('');
});

test('has text package.json', async ({ page }) => {
await page.goto('http://localhost:1420');

const listBox = page.getByRole('listbox').getByRole('button');

await expect(listBox).toHaveText('package.json');
});
4 changes: 4 additions & 0 deletions app/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@
"dev": "vite dev",
"test": "vitest run --mode development",
"test:watch": "vitest --watch --mode development",
"test:e2e": "playwright test -c ./playwright.config.ts",
"test:e2e:watch": "playwright test -c ./playwright.config.ts --ui",
"test:e2e:run": "vite dev --mode testing",
"build:development": "vite build --mode development",
"build:nightly": "vite build --mode nightly",
"build": "vite build",
Expand Down Expand Up @@ -39,6 +42,7 @@
"@lezer/common": "^1.2.1",
"@lezer/highlight": "^1.2.0",
"@octokit/rest": "^20.1.1",
"@playwright/test": "^1.44.0",
"@replit/codemirror-lang-svelte": "^6.0.0",
"@sentry/sveltekit": "^7.114.0",
"@sveltejs/adapter-static": "^2.0.3",
Expand Down
25 changes: 25 additions & 0 deletions app/playwright.config.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
import { defineConfig, devices } from '@playwright/test';

/**
* See https://playwright.dev/docs/test-configuration.
*/
export default defineConfig({
testDir: './e2e/playwright',
testMatch: /(.+\.)?(test|spec)\.[jt]s/,
projects: [
{
name: 'Google Chrome',
use: { ...devices['Desktop Chrome'] }
}
],
expect: {
timeout: 20 * 1000
},
use: {
trace: 'retain-on-failure'
},
webServer: {
command: 'pnpm test:e2e:run',
url: 'http://localhost:1420'
}
});
17 changes: 7 additions & 10 deletions app/src/lib/ai/anthropicClient.ts
Original file line number Diff line number Diff line change
@@ -1,24 +1,21 @@
import {
MessageRole,
type AIClient,
type AnthropicModelName,
type PromptMessage
} from '$lib/ai/types';
import { SHORT_DEFAULT_COMMIT_TEMPLATE, SHORT_DEFAULT_BRANCH_TEMPLATE } from '$lib/ai/prompts';
import { fetch, Body } from '@tauri-apps/api/http';
import type { AIClient, AnthropicModelName, PromptMessage } from '$lib/ai/types';

type AnthropicAPIResponse = { content: { text: string }[] };

export class AnthropicAIClient implements AIClient {
defaultCommitTemplate = SHORT_DEFAULT_COMMIT_TEMPLATE;
defaultBranchTemplate = SHORT_DEFAULT_BRANCH_TEMPLATE;

constructor(
private apiKey: string,
private modelName: AnthropicModelName
) {}

async evaluate(prompt: string) {
const messages: PromptMessage[] = [{ role: MessageRole.User, content: prompt }];

async evaluate(prompt: PromptMessage[]) {
const body = Body.json({
messages,
messages: prompt,
max_tokens: 1024,
model: this.modelName
});
Expand Down
12 changes: 7 additions & 5 deletions app/src/lib/ai/butlerClient.ts
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
import { MessageRole, type ModelKind, type AIClient, type PromptMessage } from '$lib/ai/types';
import { SHORT_DEFAULT_BRANCH_TEMPLATE, SHORT_DEFAULT_COMMIT_TEMPLATE } from '$lib/ai/prompts';
import type { AIClient, ModelKind, PromptMessage } from '$lib/ai/types';
import type { HttpClient } from '$lib/backend/httpClient';

export class ButlerAIClient implements AIClient {
defaultCommitTemplate = SHORT_DEFAULT_COMMIT_TEMPLATE;
defaultBranchTemplate = SHORT_DEFAULT_BRANCH_TEMPLATE;

constructor(
private cloud: HttpClient,
private userToken: string,
private modelKind: ModelKind
) {}

async evaluate(prompt: string) {
const messages: PromptMessage[] = [{ role: MessageRole.User, content: prompt }];

async evaluate(prompt: PromptMessage[]) {
const response = await this.cloud.post<{ message: string }>('evaluate_prompt/predict.json', {
body: {
messages,
messages: prompt,
max_tokens: 400,
model_kind: this.modelKind
},
Expand Down
166 changes: 166 additions & 0 deletions app/src/lib/ai/ollamaClient.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,166 @@
import { LONG_DEFAULT_BRANCH_TEMPLATE, LONG_DEFAULT_COMMIT_TEMPLATE } from '$lib/ai/prompts';
import { MessageRole, type PromptMessage, type AIClient } from '$lib/ai/types';
import { isNonEmptyObject } from '$lib/utils/typeguards';
import { fetch, Body, Response } from '@tauri-apps/api/http';

export const DEFAULT_OLLAMA_ENDPOINT = 'http://127.0.0.1:11434';
export const DEFAULT_OLLAMA_MODEL_NAME = 'llama3';

enum OllamaAPEndpoint {
Generate = 'api/generate',
Chat = 'api/chat',
Embed = 'api/embeddings'
}

interface OllamaRequestOptions {
/**
* The temperature of the model.
* Increasing the temperature will make the model answer more creatively. (Default: 0.8)
*/
temperature: number;
}

interface OllamaChatRequest {
model: string;
messages: PromptMessage[];
stream: boolean;
format?: 'json';
options?: OllamaRequestOptions;
}

interface BaseOllamaMResponse {
created_at: string;
done: boolean;
model: string;
}

interface OllamaChatResponse extends BaseOllamaMResponse {
message: PromptMessage;
done: true;
}

interface OllamaChatMessageFormat {
result: string;
}

const OLLAMA_CHAT_MESSAGE_FORMAT_SCHEMA = {
type: 'object',
properties: {
result: { type: 'string' }
},
required: ['result'],
additionalProperties: false
};

function isOllamaChatMessageFormat(message: unknown): message is OllamaChatMessageFormat {
if (!isNonEmptyObject(message)) {
return false;
}

return typeof message.result === 'string';
}

function isOllamaChatResponse(response: unknown): response is OllamaChatResponse {
if (!isNonEmptyObject(response)) {
return false;
}

return (
isNonEmptyObject(response.message) &&
typeof response.message.role == 'string' &&
typeof response.message.content == 'string'
);
}

export class OllamaClient implements AIClient {
defaultCommitTemplate = LONG_DEFAULT_COMMIT_TEMPLATE;
defaultBranchTemplate = LONG_DEFAULT_BRANCH_TEMPLATE;

constructor(
private endpoint: string,
private modelName: string
) {}

async evaluate(prompt: PromptMessage[]) {
const messages = this.formatPrompt(prompt);
const response = await this.chat(messages);
const rawResponse = JSON.parse(response.message.content);
if (!isOllamaChatMessageFormat(rawResponse)) {
throw new Error('Invalid response: ' + response.message.content);
}

return rawResponse.result;
}

/**
* Appends a system message which instructs the model to respond using a particular JSON schema
* Modifies the prompt's Assistant messages to make use of the correct schema
*/
private formatPrompt(prompt: PromptMessage[]) {
const withFormattedResponses = prompt.map((promptMessage) => {
if (promptMessage.role == MessageRole.Assistant) {
return {
role: MessageRole.Assistant,
content: JSON.stringify({ result: promptMessage.content })
};
} else {
return promptMessage;
}
});

return [
{
role: MessageRole.System,
content: `You are an expert in software development. Answer the given user prompts following the specified instructions.
Return your response in JSON and only use the following JSON schema:
${JSON.stringify(OLLAMA_CHAT_MESSAGE_FORMAT_SCHEMA, null, 2)}`
},
...withFormattedResponses
];
}

/**
* Fetches the chat using the specified request.
* @param request - The OllamaChatRequest object containing the request details.
* @returns A Promise that resolves to the Response object.
*/
private async fetchChat(request: OllamaChatRequest): Promise<Response<any>> {
const url = new URL(OllamaAPEndpoint.Chat, this.endpoint);
const body = Body.json(request);
const result = await fetch(url.toString(), {
method: 'POST',
headers: {
'Content-Type': 'application/json'
},
body
});
return result;
}

/**
* Sends a chat message to the LLM model and returns the response.
*
* @param messages - An array of LLMChatMessage objects representing the chat messages.
* @param options - Optional LLMRequestOptions object for specifying additional options.
* @throws Error if the response is invalid.
* @returns A Promise that resolves to an LLMResponse object representing the response from the LLM model.
*/
private async chat(
messages: PromptMessage[],
options?: OllamaRequestOptions
): Promise<OllamaChatResponse> {
const result = await this.fetchChat({
model: this.modelName,
stream: false,
messages,
options,
format: 'json'
});

if (!isOllamaChatResponse(result.data)) {
throw new Error('Invalid response\n' + JSON.stringify(result.data));
}

return result.data;
}
}
Loading

0 comments on commit b4b0de6

Please sign in to comment.