diff --git a/.cockpit-ci/container b/.cockpit-ci/container new file mode 100644 index 00000000000..b5515747f92 --- /dev/null +++ b/.cockpit-ci/container @@ -0,0 +1 @@ +ghcr.io/cockpit-project/tasks:2024-08-19 diff --git a/.eslintrc.json b/.eslintrc.json index a045c49cc88..62a8340ae34 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -44,6 +44,7 @@ "react/jsx-closing-tag-location": "off", "react/jsx-curly-newline": "off", "react/jsx-first-prop-new-line": "off", + "react/jsx-no-useless-fragment": "error", "react/prop-types": "off", "space-before-function-paren": "off", "standard/no-callback-literal": "off", @@ -53,9 +54,42 @@ "react/jsx-wrap-multilines": "off" }, "globals": { - "require": false, - "module": false + "require": "readonly", + "module": "readonly" }, + "overrides": [ + { + "files": ["**/*.ts", "**/*.tsx"], + "plugins": [ + "@typescript-eslint" + ], + "extends": "plugin:@typescript-eslint/recommended", + "parser": "@typescript-eslint/parser", + "parserOptions": { + "project": ["./tsconfig.json"] + }, + "rules": { + // https://typescript-eslint.io/rules/no-use-before-define + // Note: you must disable the base rule as it can report incorrect errors + "no-use-before-define": "off", + "@typescript-eslint/no-use-before-define": "error", + + // as recommended by https://typescript-eslint.io/rules/no-unused-vars/ + "@typescript-eslint/no-unused-vars": [ + "error", + { + "args": "all", + "argsIgnorePattern": "^_", + "caughtErrors": "all", + "caughtErrorsIgnorePattern": "^_", + "destructuredArrayIgnorePattern": "^_", + "varsIgnorePattern": "^_", + "ignoreRestSiblings": true + } + ] + } + } + ], "settings": { "react": { "version": "detect" diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 841f2a49186..00000000000 --- a/.flake8 +++ /dev/null @@ -1,8 +0,0 @@ -[flake8] -# Use flake8 only to cover rules not yet supported (or enabled by -# default) with ruff. See https://github.com/astral-sh/ruff/issues/2402 -# and https://github.com/astral-sh/ruff/releases/tag/v0.0.269 -select= - E1 - E2 - E3 diff --git a/.flowconfig b/.flowconfig deleted file mode 100644 index 27065357aa8..00000000000 --- a/.flowconfig +++ /dev/null @@ -1,12 +0,0 @@ -[ignore] - -[include] - -[libs] - -[lints] - -[options] -module.name_mapper='.*cockpit$' -> '/src/base1/cockpit.js' - -[strict] diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 64499107ed5..7159de0f31f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -3,9 +3,9 @@ updates: - package-ecosystem: "npm" directory: "/" schedule: - interval: "daily" - # run these when most of our developers don't work, don't DoS our CI over the day - time: "22:00" + interval: "weekly" + # run these when most of our developers don't work + day: "sunday" timezone: "Europe/Berlin" open-pull-requests-limit: 3 commit-message: @@ -19,12 +19,27 @@ updates: esbuild: patterns: - "esbuild*" + patternfly: + patterns: + - "@patternfly*" + react: + patterns: + - "react*" stylelint: patterns: - "stylelint*" - xterm: + types: patterns: - - "xterm*" - patternfly: + - "@types*" + - "types*" + xterm: patterns: - - "@patternfly*" + - "@xterm/*" + + - package-ecosystem: "github-actions" + directory: "/" + open-pull-requests-limit: 3 + labels: + - "no-test" + schedule: + interval: "weekly" diff --git a/.github/workflows/build-ws-container.yml b/.github/workflows/build-ws-container.yml index f2134146562..27e74a9f389 100644 --- a/.github/workflows/build-ws-container.yml +++ b/.github/workflows/build-ws-container.yml @@ -17,7 +17,7 @@ jobs: steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Log into container registry run: $RUNC login -u ${{ secrets.QUAY_BOTUSER }} -p ${{ secrets.QUAY_TOKEN }} quay.io diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 29eb77ab484..540961ded1c 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -25,10 +25,10 @@ jobs: steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: ${{ matrix.language }} queries: +security-and-quality @@ -37,7 +37,7 @@ jobs: - name: Install build dependencies run: | sudo apt-get update - sudo apt-get install -y --no-install-recommends gettext libssh-dev zlib1g-dev libkrb5-dev libxslt1-dev libglib2.0-dev libgnutls28-dev libsystemd-dev libpolkit-agent-1-dev libpcp3-dev libjson-glib-dev libpam0g-dev libpcp-import1-dev libpcp-pmda3-dev systemd xsltproc xmlto docbook-xsl + sudo apt-get install -y --no-install-recommends gettext zlib1g-dev libkrb5-dev libxslt1-dev libglib2.0-dev libgnutls28-dev libsystemd-dev libpolkit-agent-1-dev libpcp3-dev libjson-glib-dev libpam0g-dev libpcp-import1-dev libpcp-pmda3-dev systemd xsltproc xmlto docbook-xsl if: ${{ matrix.language == 'cpp' }} - name: Build @@ -49,6 +49,6 @@ jobs: if: ${{ matrix.language == 'cpp' }} - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 with: category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/dependabot.yml b/.github/workflows/dependabot.yml index d69587b129a..0ab89757760 100644 --- a/.github/workflows/dependabot.yml +++ b/.github/workflows/dependabot.yml @@ -16,14 +16,14 @@ jobs: steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.ref }} repository: ${{ github.event.pull_request.head.repo.full_name }} fetch-depth: 0 - name: Clear node_modules label - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | try { @@ -56,7 +56,7 @@ jobs: - name: Clear [no-test] prefix from PR title if: ${{ contains(github.event.pull_request.title, '[no-test]') }} - uses: actions/github-script@v6 + uses: actions/github-script@v7 env: TITLE: '${{ github.event.pull_request.title }}' with: diff --git a/.github/workflows/differential-shellcheck.yml b/.github/workflows/differential-shellcheck.yml index 504e1b999fb..708a36ab2b0 100644 --- a/.github/workflows/differential-shellcheck.yml +++ b/.github/workflows/differential-shellcheck.yml @@ -20,12 +20,12 @@ jobs: steps: - name: Repository checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - name: Differential ShellCheck - uses: redhat-plumbers-in-action/differential-shellcheck@v4 + uses: redhat-plumbers-in-action/differential-shellcheck@v5 with: severity: warning token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/flatpak-test.yml b/.github/workflows/flatpak-test.yml index 6a34e8fe90c..785706201a7 100644 --- a/.github/workflows/flatpak-test.yml +++ b/.github/workflows/flatpak-test.yml @@ -11,19 +11,22 @@ jobs: - name: Install required build and test dependencies run: | sudo apt update - sudo apt install -y --no-install-recommends autoconf automake elfutils libglib2.0-dev libsystemd-dev xsltproc xmlto gettext flatpak-builder xvfb cockpit-system appstream appstream-util + sudo apt install -y --no-install-recommends autoconf automake elfutils libglib2.0-dev libsystemd-dev xsltproc xmlto gettext flatpak xvfb cockpit-system appstream - name: Configure flathub remote run: flatpak remote-add --user --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo + - name: Install flatpak-builder + run: flatpak install --assumeyes --user org.flatpak.Builder + - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: # need this to also fetch tags fetch-depth: 0 - name: Build and install flatpak - run: ELEMENT_TREE_NO_INDENT=1 sh -x containers/flatpak/install --user --install-deps-from=flathub + run: dbus-run-session sh -x containers/flatpak/install --user --install-deps-from=flathub - name: Smoke-test the installed flatpak run: | diff --git a/.github/workflows/nightly.yml b/.github/workflows/nightly.yml index edf5d77a167..d5bec57ca78 100644 --- a/.github/workflows/nightly.yml +++ b/.github/workflows/nightly.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Trigger updates-testing scenario run: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index da85d3ea3fb..b7330bba294 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -8,35 +8,26 @@ on: jobs: source: - runs-on: ubuntu-latest - container: - image: ghcr.io/cockpit-project/unit-tests - options: --user root + # 22.04's podman has issues with piping and causes tar errors + runs-on: ubuntu-20.04 permissions: # create GitHub release contents: write steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 - # https://github.blog/2022-04-12-git-security-vulnerability-announced/ - - name: Pacify git's permission check - run: git config --global --add safe.directory /__w/cockpit/cockpit - - name: Workaround for https://github.com/actions/checkout/pull/697 run: git fetch --force origin $(git describe --tags):refs/tags/$(git describe --tags) - - name: Bootstrap automake - run: ./autogen.sh - - name: Build release - run: make dist -j$(nproc) VERSION='${{ github.ref_name }}' + run: tools/release '${{ github.server_url }}/${{ github.repository }}' '${{ github.ref_name }}' - id: publish name: Publish GitHub release - uses: cockpit-project/action-release@88d994da62d1451c7073e26748c18413fcdf46e9 + uses: cockpit-project/action-release@7d2e2657382e8d34f88a24b5987f2b81ea165785 with: filename: "cockpit-${{ github.ref_name }}.tar.xz" @@ -52,11 +43,11 @@ jobs: permissions: {} runs-on: ubuntu-latest container: - image: ghcr.io/cockpit-project/unit-tests + image: ghcr.io/cockpit-project/tasks:latest options: --user root steps: - name: Checkout website repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: website repository: cockpit-project/cockpit-project.github.io @@ -106,17 +97,13 @@ jobs: permissions: {} runs-on: ubuntu-latest steps: - - uses: actions/setup-python@v4 - with: - python-version: '3.10' - - name: Checkout source repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: src - name: Checkout flathub repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: flathub repository: flathub/org.cockpit_project.CockpitClient @@ -158,7 +145,7 @@ jobs: permissions: {} steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Set up git run: | diff --git a/.github/workflows/reposchutz.yml b/.github/workflows/reposchutz.yml index 2e212f3ba95..2f690007a49 100644 --- a/.github/workflows/reposchutz.yml +++ b/.github/workflows/reposchutz.yml @@ -18,7 +18,7 @@ jobs: steps: - name: Clone target branch - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: fetch-depth: 0 @@ -27,7 +27,7 @@ jobs: - name: Clear .github-changes label if: ${{ !endsWith(github.event.action, 'labeled') }} - uses: actions/github-script@v6 + uses: actions/github-script@v7 with: script: | try { diff --git a/.github/workflows/tasks-container-update.yml b/.github/workflows/tasks-container-update.yml new file mode 100644 index 00000000000..482040073fe --- /dev/null +++ b/.github/workflows/tasks-container-update.yml @@ -0,0 +1,37 @@ +name: tasks-container-update +on: + schedule: + - cron: '0 2 * * 1' + # can be run manually on https://github.com/cockpit-project/cockpit/actions + workflow_dispatch: +jobs: + tasks-container-update: + environment: self + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + statuses: write + container: + image: ghcr.io/cockpit-project/tasks + options: --user root + steps: + - name: Set up configuration and secrets + run: | + printf '[user]\n\tname = Cockpit Project\n\temail=cockpituous@gmail.com\n' > ~/.gitconfig + mkdir -p ~/.config + echo '${{ secrets.GITHUB_TOKEN }}' > ~/.config/github-token + + - name: Clone repository + uses: actions/checkout@v4 + with: + ssh-key: ${{ secrets.DEPLOY_KEY }} + + # https://github.blog/2022-04-12-git-security-vulnerability-announced/ + - name: Pacify git's permission check + run: git config --global --add safe.directory /__w/cockpit/cockpit + + - name: Run tasks-container-update + run: | + test/common/make-bots + bots/tasks-container-update diff --git a/.github/workflows/tox.yaml b/.github/workflows/tox.yaml index b235c92ee85..1c58de6ab0f 100644 --- a/.github/workflows/tox.yaml +++ b/.github/workflows/tox.yaml @@ -12,7 +12,7 @@ jobs: steps: - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Checkout submodules run: vendor/checkout diff --git a/.github/workflows/trigger-anaconda.yml b/.github/workflows/trigger-anaconda.yml index 5affc4185ca..50a17bda429 100644 --- a/.github/workflows/trigger-anaconda.yml +++ b/.github/workflows/trigger-anaconda.yml @@ -19,7 +19,7 @@ jobs: permissions: contents: read statuses: write - container: registry.fedoraproject.org/fedora:rawhide + container: registry.fedoraproject.org/fedora:40 # this polls for a COPR build, which can take long timeout-minutes: 120 diff --git a/.github/workflows/unit-tests-refresh.yml b/.github/workflows/unit-tests-refresh.yml deleted file mode 100644 index a0d7c6f5814..00000000000 --- a/.github/workflows/unit-tests-refresh.yml +++ /dev/null @@ -1,57 +0,0 @@ -name: unit-test-refresh -on: - schedule: - # auto-refresh every Sunday evening - - cron: '0 22 * * 0' - # can be run manually on https://github.com/cockpit-project/cockpit/actions - workflow_dispatch: -jobs: - # we do both builds and all tests in a single run, so that we only upload the containers on success - refresh: - runs-on: ubuntu-22.04 - permissions: - contents: read - packages: write - timeout-minutes: 60 - steps: - - name: Clone repository - uses: actions/checkout@v3 - with: - # need this to also fetch tags - fetch-depth: 0 - - - name: Build fresh containers - timeout-minutes: 10 - run: containers/unit-tests/build - - - name: Run amd64 gcc check-memory test - timeout-minutes: 20 - run: containers/unit-tests/start --verbose --env=CC=gcc --image-tag=latest --make check-memory - - - name: Run amd64 clang distcheck test - timeout-minutes: 15 - run: containers/unit-tests/start --verbose --env=CC=clang --image-tag=latest --make distcheck - - - name: Run i386 gcc distcheck test - timeout-minutes: 15 - run: containers/unit-tests/start --verbose --env=CC=clang --image-tag=i386 --make distcheck - - - name: Run amd64 gcc distcheck test for C bridge - timeout-minutes: 15 - run: containers/unit-tests/start --verbose --env=CC=gcc --env=EXTRA_DISTCHECK_CONFIGURE_FLAGS=--enable-old-bridge --image-tag=latest --make distcheck - - - name: Run amd64 gcc check test - timeout-minutes: 15 - run: containers/unit-tests/start --verbose --env=CC=gcc --image-tag=latest --make check - - - name: Run pytest-cov test - timeout-minutes: 15 - run: containers/unit-tests/start --verbose --env=CC=gcc --image-tag=latest --make pytest-cov - - - name: Log into container registry - run: podman login -u ${{ github.actor }} -p ${{ secrets.GITHUB_TOKEN }} ghcr.io - - - name: Push containers to registry - run: | - podman push ghcr.io/cockpit-project/unit-tests:latest - podman push ghcr.io/cockpit-project/unit-tests:i386 diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml index 527661c1043..0c2f21aedf4 100644 --- a/.github/workflows/unit-tests.yml +++ b/.github/workflows/unit-tests.yml @@ -6,46 +6,33 @@ jobs: permissions: {} strategy: matrix: - startarg: - # avoid check-memory on i386, it has literally thousands of uninteresting/wrong errors - - { make: 'check-memory', cc: 'gcc', tag: 'latest' } - # with default Python bridge - - { make: 'distcheck', cc: 'clang', tag: 'latest' } - - { make: 'distcheck', cc: 'gcc', tag: 'i386' } - # with old C bridge - - { make: 'distcheck', cc: 'gcc', distcheck_flags: '--enable-old-bridge', tag: 'latest' } + target: + - check-memory + - distcheck # this runs static code checks, unlike distcheck - - { make: 'check', cc: 'gcc', tag: 'latest' } - - { make: 'pytest-cov', cc: 'gcc', tag: 'latest' } + - check + - pytest-cov fail-fast: false timeout-minutes: 60 steps: - - name: Clone repository - uses: actions/checkout@v3 - with: - # need this to also fetch tags - fetch-depth: 0 - - - name: Build unit test container if it changed - run: | - changes=$(git diff --name-only origin/${{ github.event.pull_request.base.ref }}..HEAD -- containers/unit-tests/) - if [ -n "${changes}" ]; then - case '${{ matrix.startarg.tag }}' in - i386) arch=i386;; - latest) arch=amd64;; - esac - containers/unit-tests/build $arch - fi - - - name: Run unit-tests container + - name: Run unit test timeout-minutes: 30 - # HACK: -gdwarf-4 is for clang: https://bugs.kde.org/show_bug.cgi?id=452758 run: | - containers/unit-tests/start \ - --verbose \ - --env=FORCE_COLOR=1 \ - --env=CC='${{ matrix.startarg.cc }}' \ - --env=CFLAGS='-O2 -gdwarf-4' \ - --env=EXTRA_DISTCHECK_CONFIGURE_FLAGS='${{ matrix.startarg.distcheck_flags }}' \ - --image-tag='${{ matrix.startarg.tag }}' \ - --make '${{ matrix.startarg.make }}' + set -eux + + IMAGE="$(curl --fail https://raw.githubusercontent.com/${GITHUB_REPOSITORY}/${GITHUB_SHA}/.cockpit-ci/container)" + podman run --security-opt=seccomp=unconfined --network=host --rm --interactive "${IMAGE}" sh -eux < ~/.config/github-token - name: Clone repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Run urls-check action run: | diff --git a/.github/workflows/weblate-sync-po.yml b/.github/workflows/weblate-sync-po.yml index 09cc8760c36..96fbbbc6d0e 100644 --- a/.github/workflows/weblate-sync-po.yml +++ b/.github/workflows/weblate-sync-po.yml @@ -21,13 +21,13 @@ jobs: sudo apt install -y --no-install-recommends gettext - name: Clone source repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ssh-key: ${{ secrets.DEPLOY_KEY }} path: src - name: Clone weblate repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: repository: ${{ github.repository }}-weblate path: weblate diff --git a/.github/workflows/weblate-sync-pot.yml b/.github/workflows/weblate-sync-pot.yml index 3ac5dfd74bd..4ccd9555e09 100644 --- a/.github/workflows/weblate-sync-pot.yml +++ b/.github/workflows/weblate-sync-pot.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest permissions: {} container: - image: ghcr.io/cockpit-project/unit-tests + image: ghcr.io/cockpit-project/tasks options: --user root timeout-minutes: 10 steps: @@ -21,7 +21,7 @@ jobs: run: git config --global --add safe.directory /__w/ - name: Clone source repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: src fetch-depth: 0 @@ -33,7 +33,7 @@ jobs: make po/cockpit.pot - name: Clone weblate repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: path: weblate repository: ${{ github.repository }}-weblate diff --git a/.gitignore b/.gitignore index 5e2968ce87d..086ba2532ab 100644 --- a/.gitignore +++ b/.gitignore @@ -38,7 +38,6 @@ Makefile.in /cockpit-certificate-ensure /cockpit-pcp /cockpit-session -/cockpit-ssh /cockpit-tls /cockpit-ws /cockpit-wsinstance-factory @@ -62,6 +61,7 @@ Makefile.in /test_rsa_key /tmp-dist /tmp/ +/tsconfig.tsbuildinfo /version.m4 /wsinstance-start @@ -74,10 +74,10 @@ Makefile.in /po/*.pot /src/bridge/org.cockpit-project.cockpit-bridge.policy /src/cockpit.egg-info/ -/src/common/fail.html.c +/src/common/fail-html.c /src/systemd/cockpit*.service /src/systemd/cockpit*.socket -/src/systemd/cockpit-tempfiles.conf +/src/systemd/tmpfiles.d/cockpit-ws.conf /src/tls/cockpit-certificate-helper /src/ws/cockpit-desktop /src/ws/cockpit.appdata.xml diff --git a/.stylelintrc.json b/.stylelintrc.json index 60c5bc78553..762bf194cca 100644 --- a/.stylelintrc.json +++ b/.stylelintrc.json @@ -4,11 +4,7 @@ "stylelint-use-logical-spec" ], "rules": { - "declaration-colon-newline-after": null, - "selector-list-comma-newline-after": null, - "at-rule-empty-line-before": null, - "declaration-colon-space-before": null, "declaration-empty-line-before": null, "custom-property-empty-line-before": null, "comment-empty-line-before": null, @@ -23,7 +19,6 @@ "font-family-no-duplicate-names": null, "function-url-quotes": null, "keyframes-name-pattern": null, - "max-line-length": null, "media-feature-range-notation": "prefix", "no-descending-specificity": null, "no-duplicate-selectors": null, diff --git a/HACKING.md b/HACKING.md index 95821774551..b7bfc10b6d2 100644 --- a/HACKING.md +++ b/HACKING.md @@ -17,7 +17,7 @@ remote, for example with ## Setting up development container -The cockpit team maintains a [cockpit/tasks container](https://quay.io/repository/cockpit/tasks) +The cockpit team maintains a [cockpit/tasks container](https://ghcr.io/cockpit-project/tasks) for both local development and CI. If you can install [toolbx](https://containertoolbx.org/) or [distrobox](https://distrobox.privatedns.org/) on your system, it is highly recommended to do that: @@ -38,7 +38,7 @@ recommended to do that: 2. Create a development toolbox for Cockpit - toolbox create --image quay.io/cockpit/tasks -c cockpit + toolbox create --image ghcr.io/cockpit-project/tasks -c cockpit 3. Enter the toolbox: @@ -52,7 +52,7 @@ toolbox container. If desired, you can install additional packages with The Cockpit team occasionally refreshes the `tasks` container image. To re-create your development container from the latest image, run: - podman pull quay.io/cockpit/tasks + podman pull ghcr.io/cockpit-project/tasks toolbox rm cockpit ...and then repeat steps 2 and 3 from above. @@ -60,29 +60,27 @@ To re-create your development container from the latest image, run: ## Working on Cockpit's session pages Most contributors want to work on the web (HTML, JavaScript, CSS) parts of Cockpit. + +### Install Cockpit + First, install Cockpit on your local machine as described in: +### Build session pages + Next, run this command from your top level Cockpit checkout directory, and make sure to run it as the same user that you'll use to log into Cockpit below. mkdir -p ~/.local/share/ ln -s $(pwd)/dist ~/.local/share/cockpit -This will cause cockpit to read JavaScript, HTML, and CSS files directly from the +This will cause Cockpit to read JavaScript, HTML, and CSS files directly from the locally built package output directory instead of using the system-installed Cockpit files. -Now you can log into Cockpit on your local Linux machine at the following -address. Use the same user and password that you used to log into your Linux -desktop. - - - -After every change to the source files, bundles need to be rebuilt. The -recommended and fastest way is to do that is using the "watch" mode (`-w` or -`--watch`) on the page that you are working on. For example, if you want to +The recommended way to build bundles is to use the "watch" mode +(`-w` or`--watch`) on the page you are working on. For example, if you want to work on anything in [pkg/systemd](./pkg/systemd/), run: ./build.js -w systemd @@ -94,11 +92,16 @@ pkg/lib/), you can also build all pages: ./build.js -w -Note that this enables eslint and stylelint by default -- if you want to -disable them, run it with `-e`/`--no-eslint` and/or `-s`/`--no-stylelint`. +Now you can log into Cockpit on your local Linux machine at the following +address, using the same username and password as your desktop login: + + -Reload cockpit in your browser after page is built. Press `Ctrl`-`C` to -stop watch mode once you are done with changing the code. +Watch mode automatically rebuilds when source files are modified. Once it +finishes building, refresh your browser to see the changes in Cockpit. +Press `Ctrl-C` to stop watch mode when you are done changing the code. + +### Testing You often need to test code changes in a VM. You can set the `$RSYNC` env variable to copy the built page into the given SSH target's @@ -109,6 +112,8 @@ one of these commands: RSYNC=c ./build.js -w kdump RSYNC=c ./build.js -w +### Returning to system packages + To make Cockpit use system packages again, instead of your checkout directory, remove the symlink with the following command and log back into Cockpit: @@ -152,10 +157,26 @@ which will output a URL to connect to with a browser, such as . Adjust the path for different tests and inspect the results there. -You can also run individual tests by specifying the `TESTS` environment -variable: +QUnit tests are run as part of a pytest test called `test_browser`. You can +run individual tests via `pytest -k`, like so: + + pytest -k test-fsinfo.html - make check TESTS=qunit/base1/test-chan.html +You can see JavaScript code coverage information for QUnit tests. For a +summary table: + + pytest -k test_browser --js-cov + +And for detailed output on uncovered sections in a specific file, something +like: + + pytest -k test-fsinfo.html --js-cov-files='*/fsinfo.ts' + +Coverage information is gathered into the pytest tmpdir, regardless of which +coverage-related commandline flags are given, so it's also possible to drill +down after the fact — without re-running tests — using something like: + + test/common/js_coverage.py -m '*/fsinfo.ts' /tmp/pytest-of-*/pytest-current/js-coverage/* There are also static code and syntax checks which you should run often: @@ -199,16 +220,16 @@ The Python bridge can be used interactively on a local machine: To make it easy to test out channels without having to write out messages manually, `cockpit.misc.print` can be used: - PYTHONPATH=src python3 -m cockpit.misc.print open fslist1 path=/etc watch=False | PYTHONPATH=src python3 -m cockpit.bridge + PYTHONPATH=src python3 -m cockpit.misc.print open fsinfo path=/etc 'attrs=["type", "entries"]' | PYTHONPATH=src python3 -m cockpit.bridge These shell aliases might be useful when experimenting with the protocol: alias cpy='PYTHONPATH=src python3 -m cockpit.bridge' alias cpf='PYTHONPATH=src python3 -m cockpit.misc.print' -When working with the Python bridge on test images, note that `RHEL/CentOS 8`, -`debian-stable`, and `ubuntu-2204` still use the C bridge. So if you want to -explicitly have the Python bridge on those images use: +When working with the Python bridge on test images, note that `rhel-8*` still +uses the C bridge. So if you want to explicitly have the Python bridge on those +images use: ./test/image-prepare --python @@ -232,7 +253,7 @@ The tests require at least `pytest` 7.0.0 or higher to run. Cockpit uses [ESLint](https://eslint.org/) to automatically check JavaScript code style in `.js` and `.jsx` files. -The linter is executed on every build. +The linter is executed as part of `test/static-code`. For developer convenience, the ESLint can be started explicitly by: @@ -254,12 +275,15 @@ unused identifiers, and other JavaScript-related issues. Cockpit uses [Stylelint](https://stylelint.io/) to automatically check CSS code style in `.css` and `.scss` files. -The linter is executed on every build. +The linter is executed as part of `test/static-code`. For developer convenience, the Stylelint can be started explicitly by: npm run stylelint +But note that this only covers files in `pkg/`. `test/static-code` covers +*all* (S)CSS files tracked in git. + Some rule violations can be automatically fixed by running: npm run stylelint:fix @@ -375,14 +399,18 @@ A local cache is maintained in `~/.cache/cockpit-dev`. Make a pull request on github.com with your change. All changes get reviewed, tested, and iterated on before getting into Cockpit. The general workflow is described in the [wiki](https://github.com/cockpit-project/cockpit/wiki/Workflow). -Don't feel bad if there's multiple steps back and forth asking for changes or -tweaks before your change gets in. You need to be familiar with git to contribute a change. Do your changes on a branch. Your change should be one or more git commits that each contain one single logical simple reviewable change, without modifications that are unrelated to the commit message. +Don't feel bad if there's multiple steps back and forth asking for changes or +tweaks before your change gets in. If you fix your commits after getting a +review, just force-push to your branch -- this will update the pull request +automatically. Do *not* close it and open a new one; that would destroy the +conversation and reviews. + Cockpit is a designed project. Anything that the user will see should have design done first. This is done on the wiki and mailing list. @@ -474,6 +502,20 @@ Cockpit log out, use something like: >> localStorage.debugging = "spawn" +## Using React Developer Tools + +Cockpit uses React for the JavaScript frontend, [React Developer +Tools](https://react.dev/learn/react-developer-tools) is a browser extension to +inspect React components, edit props and state. Out of the box the developer +tools do not work with Cockpit due to the pages being loaded in a separate +iframe. A workaround is to load the page directly by embedding, for example for +the system overview page: + + + +This loads the system overview as a standalone page allowing React Developer +tools to inspect its state. + ## Running Cockpit processes under a debugger You may want to run cockpit-ws under a debugger such as valgrind or gdb. You can @@ -521,7 +563,7 @@ For running tests, the following dependencies are required: sudo dnf install curl expect xz rpm-build chromium-headless dbus-daemon \ libvirt-daemon-driver-storage-core libvirt-daemon-driver-qemu libvirt-client python3-libvirt \ - python3-flake8 python3-pyyaml + python3-pyyaml For compiling the C parts, you will need the package build dependencies: diff --git a/Makefile.am b/Makefile.am index a4a3e021054..2aa715633d3 100644 --- a/Makefile.am +++ b/Makefile.am @@ -31,13 +31,11 @@ CLEANFILES += cockpit-*.tar.xz EXTRA_DIST += $(EXTRA_FILES) distdir: $(DISTFILES) @if [ -e '$(srcdir)/.git' ]; then \ - git -C '$(srcdir)' ls-files -x test/reference .fmf plans pkg test tools > .extra_dist.tmp && \ + git -C '$(srcdir)' ls-files -x test/reference .fmf .cockpit-ci plans pkg test tools > .extra_dist.tmp && \ mv .extra_dist.tmp '$(srcdir)/.extra_dist'; fi $(MAKE) $(AM_MAKEFLAGS) distdir-am EXTRA_FILES="$$(tr '\n' ' ' < $(srcdir)/.extra_dist) .extra_dist" sed -i "s/[@]VERSION@/$(VERSION)/" "$(distdir)/src/client/org.cockpit_project.CockpitClient.metainfo.xml" $(srcdir)/tools/fix-spec $(distdir)/tools/cockpit.spec $(VERSION) - test -z '$(HACK_SPEC_FOR_PYTHON)' || \ - sed -i 's/\(define enable_old_bridge\) 1/\1 0/' $(distdir)/tools/cockpit.spec sed -i "/^pkgver=/ s/0/$(VERSION)/" "$(distdir)/tools/arch/PKGBUILD" sed -i "1 s/0/$(VERSION)/" "$(distdir)/tools/debian/changelog" cp -r "$(srcdir)/dist" "$(distdir)" @@ -87,21 +85,17 @@ clean-local:: find $(builddir) -name '*.gc??' -delete find $(srcdir) -name '*.pyc' -delete -# required for running unit and integration tests; commander and ws are deps of chrome-remote-interface +# required for running integration tests node_modules/%: $(srcdir)/package-lock.json @true EXTRA_DIST += \ - node_modules/chrome-remote-interface \ - node_modules/commander \ node_modules/sizzle \ - node_modules/ws \ $(NULL) check: export VERBOSE=1 -TEST_EXTENSIONS = .html .sh -HTML_LOG_COMPILER = $(top_srcdir)/test/common/tap-cdp --strip=$(abs_top_srcdir)/ $(HTML_TEST_WRAPPER) ./test-server $(COCKPIT_BRIDGE) +TEST_EXTENSIONS = .sh VALGRIND = valgrind --trace-children=yes --quiet --error-exitcode=33 --gen-suppressions=all \ $(foreach file,$(wildcard $(srcdir)/tools/*.supp),--suppressions=$(file)) \ @@ -110,12 +104,10 @@ VALGRIND = valgrind --trace-children=yes --quiet --error-exitcode=33 --gen-suppr check-memory: $(MAKE) LOG_FLAGS="$(VALGRIND)" \ - HTML_TEST_WRAPPER="$(VALGRIND)" \ COCKPIT_SKIP_SLOW_TESTS=1 \ $(AM_MAKEFLAGS) check TESTS="$(filter-out test/% bots/%,$(TESTS))" recheck-memory: $(MAKE) LOG_FLAGS="$(VALGRIND_ARGS)" \ - HTML_TEST_WRAPPER="$(VALGRIND)" \ $(AM_MAKEFLAGS) recheck # checkout Cockpit's bots for standard test VM images and API to launch them @@ -173,9 +165,7 @@ include src/branding/centos/Makefile.am include src/branding/debian/Makefile.am include src/branding/default/Makefile.am include src/branding/fedora/Makefile.am -include src/branding/kubernetes/Makefile.am include src/branding/opensuse/Makefile.am -include src/branding/registry/Makefile.am include src/branding/rhel/Makefile.am include src/branding/scientific/Makefile.am include src/branding/ubuntu/Makefile.am @@ -184,7 +174,6 @@ include src/client/Makefile.am include src/common/Makefile-common.am include src/pam-ssh-add/Makefile.am include src/session/Makefile-session.am -include src/ssh/Makefile-ssh.am include src/systemd/Makefile.am include src/tls/Makefile-tls.am include src/websocket/Makefile-websocket.am diff --git a/build.js b/build.js index d6c27b01c73..f959ce9151d 100755 --- a/build.js +++ b/build.js @@ -1,10 +1,10 @@ #!/usr/bin/env node -import child_process from 'child_process'; -import fs from 'fs'; -import os from 'os'; -import path from 'path'; -import process from 'process'; +import child_process from 'node:child_process'; +import fs from 'node:fs'; +import os from 'node:os'; +import path from 'node:path'; +import process from 'node:process'; import { getFiles, getTestFiles, all_subdirs } from './files.js'; @@ -37,6 +37,7 @@ const pkgOptions = { // context options for qunit tests in qunit/ const qunitOptions = { + sourcemap: "linked", bundle: true, minify: false, nodePaths, @@ -50,11 +51,13 @@ const qunitOptions = { const parser = (await import('argparse')).default.ArgumentParser(); parser.add_argument('-r', '--rsync', { help: "rsync bundles to ssh target after build", metavar: "HOST" }); parser.add_argument('-w', '--watch', { action: 'store_true', help: "Enable watch mode" }); -parser.add_argument('-e', '--no-eslint', { action: 'store_true', help: "Disable eslint linting", default: production }); -parser.add_argument('-s', '--no-stylelint', { action: 'store_true', help: "Disable stylelint linting", default: production }); +parser.add_argument('-m', '--metafile', { help: "Enable bund size information file", metavar: "FILE" }); parser.add_argument('onlydir', { nargs: '?', help: "The pkg/ to build (eg. base1, shell, ...)", metavar: "DIRECTORY" }); const args = parser.parse_args(); +if (args.metafile) + pkgOptions.metafile = true; + if (args.onlydir?.includes('/')) parser.error("Directory must not contain '/'"); @@ -112,22 +115,18 @@ async function build() { const cockpitPoEsbuildPlugin = (await import('./pkg/lib/cockpit-po-plugin.js')).cockpitPoEsbuildPlugin; const cockpitRsyncEsbuildPlugin = (await import('./pkg/lib/cockpit-rsync-plugin.js')).cockpitRsyncEsbuildPlugin; const cockpitTestHtmlPlugin = (await import('./pkg/lib/esbuild-test-html-plugin.js')).cockpitTestHtmlPlugin; - const eslintPlugin = (await import('./pkg/lib/esbuild-eslint-plugin.js')).eslintPlugin; - const stylelintPlugin = (await import('./pkg/lib/esbuild-stylelint-plugin.js')).stylelintPlugin; const esbuildStylesPlugins = (await import('./pkg/lib/esbuild-common.js')).esbuildStylesPlugins; const { entryPoints, assetFiles, redhat_fonts } = getFiles(args.onlydir); const tests = getTestFiles(); - const testEntryPoints = tests.map(test => "pkg/" + test + ".js"); + const testEntryPoints = tests.map(test => "pkg/" + test); const pkgFirstPlugins = [ cleanPlugin({ subdir: args.onlydir }), ]; const pkgPlugins = [ - ...args.no_stylelint ? [] : [stylelintPlugin({ filter: /pkg\/.*\.(css?|scss?)$/ })], - ...args.no_eslint ? [] : [eslintPlugin({ filter: /pkg\/.*\.(jsx?|js?)$/ })], cockpitJSResolvePlugin, ...esbuildStylesPlugins ]; @@ -184,8 +183,6 @@ async function build() { ...qunitOptions, entryPoints: testEntryPoints, plugins: [ - ...args.no_stylelint ? [] : [stylelintPlugin({ filter: /pkg\/.*\.(css?|scss?)$/ })], - ...args.no_eslint ? [] : [eslintPlugin({ filter: /pkg\/.*\.(jsx?|js?)$/ })], cockpitTestHtmlPlugin({ testFiles: tests }), ], }); @@ -204,14 +201,14 @@ async function build() { ...qunitOptions, entryPoints: testEntryPoints, plugins: [ - ...args.no_stylelint ? [] : [stylelintPlugin({ filter: /pkg\/.*\.(css?|scss?)$/ })], - ...args.no_eslint ? [] : [eslintPlugin({ filter: /pkg\/.*\.(jsx?|js?)$/ })], cockpitTestHtmlPlugin({ testFiles: tests }), ], }); try { - await Promise.all([pkgContext.rebuild(), qunitContext.rebuild()]); + const results = await Promise.all([pkgContext.rebuild(), qunitContext.rebuild()]); + if (args.metafile) + fs.writeFileSync(args.metafile, JSON.stringify(results[0].metafile)); } catch (e) { if (!args.watch) process.exit(1); diff --git a/configure.ac b/configure.ac index ad8c853dd85..25549037b4d 100644 --- a/configure.ac +++ b/configure.ac @@ -13,7 +13,7 @@ # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License -# along with Cockpit; If not, see . +# along with Cockpit; If not, see . m4_include([version.m4]) AC_INIT([Cockpit], @@ -70,17 +70,8 @@ if test "$enable_polkit" != 'no'; then fi AC_MSG_RESULT(${enable_polkit:=yes}) -# --disable-ssh -AC_MSG_CHECKING([whether to build cockpit-ssh]) -AC_ARG_ENABLE(ssh, AS_HELP_STRING([--disable-ssh], [Disable cockpit-ssh build and libssh dependency])) -AM_CONDITIONAL(WITH_COCKPIT_SSH, test "$enable_ssh" != "no") -AC_MSG_RESULT(${enable_ssh:=yes}) - -# --enable-old-bridge -AC_MSG_CHECKING([whether to install the old C cockpit-bridge]) -AC_ARG_ENABLE(old_bridge, AS_HELP_STRING([--enable-old-bridge], [Install old C cockpit-bridge])) -AM_CONDITIONAL(WITH_OLD_BRIDGE, test "$enable_old_bridge" = "yes") -AC_MSG_RESULT(${enable_old_bridge:=no}) +# TODO: remove this and clean up the old bridge in src/bridge/ +AM_CONDITIONAL(WITH_OLD_BRIDGE, false) AC_SEARCH_LIBS([argp_parse], [argp]) case "$ac_cv_search_argp_parse" in @@ -111,14 +102,6 @@ PKG_CHECK_MODULES(krb5, [krb5-gssapi >= 1.11 krb5 >= 1.11]) if test "$enable_polkit" != "no"; then PKG_CHECK_MODULES(polkit, [polkit-agent-1 >= 0.105]) fi -if test "$enable_ssh" != "no"; then - PKG_CHECK_MODULES(libssh, [libssh >= 0.8.5]) - old_CFLAGS=$CFLAGS; CFLAGS=$libssh_CFLAGS - old_LIBS=$LIBS; LIBS=$libssh_LIBS - AC_CHECK_FUNCS(ssh_userauth_publickey_auto_get_current_identity) - CFLAGS=$old_CFLAGS - LIBS=$old_LIBS -fi # pam AC_CHECK_HEADER([security/pam_appl.h], , @@ -230,62 +213,6 @@ fi AM_CONDITIONAL(WITH_ASAN, test "$enable_asan" = "yes") AC_MSG_RESULT($asan_status) -# User and group for running cockpit web server (cockpit-tls or -ws in customized setups) - -AC_ARG_WITH(cockpit_user, - AS_HELP_STRING([--with-cockpit-user=], - [User for running cockpit (root)] - ) - ) -AC_ARG_WITH(cockpit_group, - AS_HELP_STRING([--with-cockpit-group=], - [Group for running cockpit] - ) - ) -if test -z "$with_cockpit_user"; then - COCKPIT_USER=root - COCKPIT_GROUP= -else - COCKPIT_USER=$with_cockpit_user - if test -z "$with_cockpit_group"; then - COCKPIT_GROUP=$with_cockpit_user - else - COCKPIT_GROUP=$with_cockpit_group - fi -fi - -AC_SUBST(COCKPIT_USER) -AC_SUBST(COCKPIT_GROUP) - -# User for running cockpit-ws instances from cockpit-tls - -AC_ARG_WITH(cockpit_ws_instance_user, - AS_HELP_STRING([--with-cockpit-ws-instance-user=], - [User for running cockpit-ws instances from cockpit-tls (root)] - ) - ) -AC_ARG_WITH(cockpit_ws_instance_group, - AS_HELP_STRING([--with-cockpit-ws-instance-group=], - [Group for running cockpit-ws instances from cockpit-tls] - ) - ) -if test -z "$with_cockpit_ws_instance_user"; then - if test "$COCKPIT_USER" != "root"; then - AC_MSG_ERROR([--with-cockpit-ws-instance-user is required when setting --with-cockpit-user]) - fi - COCKPIT_WSINSTANCE_USER=root -else - COCKPIT_WSINSTANCE_USER=$with_cockpit_ws_instance_user - if test -z "$with_cockpit_ws_instance_group"; then - COCKPIT_WSINSTANCE_GROUP=$with_cockpit_ws_instance_user - else - COCKPIT_WSINSTANCE_GROUP=$with_cockpit_ws_instance_group - fi -fi - -AC_SUBST(COCKPIT_WSINSTANCE_USER) -AC_SUBST(COCKPIT_WSINSTANCE_GROUP) - # admin users group AC_ARG_WITH([admin-group], [AS_HELP_STRING([--with-admin-group=GROUP], @@ -361,6 +288,21 @@ fi AM_CONDITIONAL([ENABLE_DOC], [test "$enable_doc" = "yes"]) +# Default for AllowMultiHost + +AC_MSG_CHECKING([for AllowMultiHost default]) +AC_ARG_ENABLE(multihost, + AS_HELP_STRING([--enable-multihost], + [Set AllowMultiHost to true by default]), + [], [enable_multihost=no]) +AC_MSG_RESULT($enable_multihost) +if test "$enable_multihost" = "no"; then + multihost_def=0 +else + multihost_def=1 +fi +AC_DEFINE_UNQUOTED(ALLOW_MULTIHOST_DEFAULT, [$multihost_def], [default for AllowMultiHost configuration setting]) + # cockpit-client AC_MSG_CHECKING([whether to install cockpit-client]) AC_ARG_ENABLE([cockpit-client], @@ -369,10 +311,6 @@ AC_ARG_ENABLE([cockpit-client], AC_MSG_RESULT($enable_cockpit_client) AM_CONDITIONAL([ENABLE_COCKPIT_CLIENT], [test "$enable_cockpit_client" = "yes"]) -if test "$enable_cockpit_client" = "yes" && test "$enable_old_bridge" = "yes"; then - AC_MSG_ERROR([--enable-cockpit-client conflicts with --enable-old-bridge]) -fi - # Debug AC_MSG_CHECKING([for debug mode]) @@ -477,10 +415,6 @@ echo " cflags: ${CFLAGS} cppflags: ${CPPFLAGS} - cockpit-ws user: ${COCKPIT_USER} - cockpit-ws group: ${COCKPIT_GROUP} - cockpit-ws instance user: ${COCKPIT_WSINSTANCE_USER} - cockpit-ws instance group: ${COCKPIT_WSINSTANCE_GROUP} admin group: ${admin_group} cockpit-session PATH: ${default_session_path} @@ -494,7 +428,6 @@ echo " SELinux Policy: ${enable_selinux_policy} cockpit-client: ${enable_cockpit_client} - cockpit-ssh: ${enable_ssh} ssh-add: ${SSH_ADD} ssh-agent: ${SSH_AGENT} diff --git a/containers/flatpak/Makefile.am b/containers/flatpak/Makefile.am index 314ee2d2d7b..658b934c78e 100644 --- a/containers/flatpak/Makefile.am +++ b/containers/flatpak/Makefile.am @@ -16,12 +16,12 @@ INSTALL_FLATPAK_TARGETS = \ $(NULL) install-for-flatpak: $(INSTALL_FLATPAK_TARGETS) - appstream-util validate --nonet src/client/org.cockpit_project.CockpitClient.metainfo.xml + appstreamcli validate --no-net src/client/org.cockpit_project.CockpitClient.metainfo.xml if test -s "${DOWNSTREAM_RELEASES_XML}"; then \ $(top_srcdir)/tools/patch-metainfo \ '$(DESTDIR)$(datadir)/metainfo/org.cockpit_project.CockpitClient.metainfo.xml' \ "${DOWNSTREAM_RELEASES_XML}"; \ fi - appstream-util validate --nonet $(DESTDIR)$(datadir)/metainfo/org.cockpit_project.CockpitClient.metainfo.xml + appstreamcli validate --no-net $(DESTDIR)$(datadir)/metainfo/org.cockpit_project.CockpitClient.metainfo.xml cp -rT dist/static $(DESTDIR)$(pkgdatadir)/static rm -rf $(DESTDIR)$(pkgdatadir)/apps $(DESTDIR)$(pkgdatadir)/playground diff --git a/containers/flatpak/install b/containers/flatpak/install index 479f91ec20e..25092e9cd97 100755 --- a/containers/flatpak/install +++ b/containers/flatpak/install @@ -18,8 +18,14 @@ cd tmp MANIFEST="$(../containers/flatpak/prepare)" -flatpak-builder \ - "$@" \ - --disable-rofiles-fuse \ - --force-clean \ - --install flatpak-build-dir "${MANIFEST}" +flatpak run \ + org.flatpak.Builder \ + "$@" \ + --force-clean \ + --install flatpak-build-dir "${MANIFEST}" + +flatpak run \ + --command=flatpak-builder-lint \ + org.flatpak.Builder \ + --exceptions \ + builddir flatpak-build-dir diff --git a/containers/flatpak/prepare b/containers/flatpak/prepare index d2ca14a7465..b8fd1efb006 100755 --- a/containers/flatpak/prepare +++ b/containers/flatpak/prepare @@ -23,6 +23,7 @@ RELEASES_XML = f'{FLATPAK_ID}.releases.xml' # Constants related to extra packages UPSTREAM_REPOS = [ + 'cockpit-project/cockpit-files', 'cockpit-project/cockpit-machines', 'cockpit-project/cockpit-ostree', 'cockpit-project/cockpit-podman', @@ -85,9 +86,8 @@ def create_manifest( return { 'app-id': FLATPAK_ID, 'runtime': 'org.gnome.Platform', - 'runtime-version': '45', + 'runtime-version': '47', 'sdk': 'org.gnome.Sdk', - 'default-branch': branch, 'command': 'cockpit-client', 'rename-icon': 'cockpit-client', 'finish-args': [ @@ -104,7 +104,6 @@ def create_manifest( 'config-opts': [ '--enable-cockpit-client', '--disable-polkit', - '--disable-ssh', '--disable-pcp', '--with-systemdunitdir=/invalid', 'CPPFLAGS=-Itools/mock-build-env', diff --git a/containers/flatpak/test/test-browser-login-ssh.js b/containers/flatpak/test/test-browser-login-ssh.js index 38e1129a97f..b8ab3091651 100644 --- a/containers/flatpak/test/test-browser-login-ssh.js +++ b/containers/flatpak/test/test-browser-login-ssh.js @@ -12,10 +12,12 @@ async function test() { document.getElementById("server-field").value = "%HOST%"; ph_mouse("#login-button", "click"); - // unknown host key - await assert_conversation("authenticity of host"); - document.getElementById("conversation-input").value = "yes"; - ph_mouse("#login-button", "click"); + // accept unknown host key + if (!"%HOST%".includes("127.0.0.1")) { + await ph_wait_present("#hostkey-message-1"); + await ph_wait_in_text("#hostkey-message-1", "%HOST%"); + ph_mouse("#login-button", "click"); + } await ph_wait_present("#conversation-prompt"); await assert_conversation("password"); diff --git a/containers/unit-tests/Dockerfile b/containers/unit-tests/Dockerfile deleted file mode 100644 index 19d8200b8a8..00000000000 --- a/containers/unit-tests/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -ARG debian_arch=amd64 -FROM docker.io/${debian_arch}/debian:testing - -ARG personality=linux64 -ENV personality=${personality} - -COPY setup.sh / -RUN ${personality} /setup.sh ${personality} && rm -rf /setup.sh - -# 'builder' user created in setup.sh -USER builder -WORKDIR /home/builder - -ENV LANG=C.UTF-8 - -# HACK: unbreak distcheck on Debian: https://bugs.debian.org/1035546 -ENV DEB_PYTHON_INSTALL_LAYOUT=deb - -VOLUME /source - -COPY entrypoint / -ENTRYPOINT ["/entrypoint"] -CMD ["/bin/bash"] - -# for filtering from our 'exec' script -LABEL org.cockpit-project.container=unit-tests diff --git a/containers/unit-tests/README.md b/containers/unit-tests/README.md deleted file mode 100644 index 74355939702..00000000000 --- a/containers/unit-tests/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# Cockpit unit test container - -This container has all build dependencies and toolchains (GCC and clang) that we -want to exercise Cockpit with, mostly for `make distcheck` and `make check-memory`. -This container runs on [GitHub](.github/workflows/unit-tests.yml), but can be easily -run locally too. - -It assumes that the Cockpit source git checkout is available in `/source`. It -will not modify that directory or take uncommitted changes into account, but it -will re-use an already existing `node_modules/` directory. - -The scripts can use either podman (preferred) or docker. If you use docker, you -need to run all commands as root. With podman the containers work as either user -or root. - -## Building - -The `build` script will build the `cockpit/unit-tests` and -`cockpit/unit-tests:i386` containers. Call it with an architecture to only -build one variant, e.g. `build i386`. - -## Running tests - -You need to disable SELinux with `sudo setenforce 0` for this. There is no -other way for the container to access the files in your build tree (do *not* -use the `--volume` `:Z` option, as that will destroy the file labels on the -host). - -Tests in that container get started with the `start` script. By default, this -script runs the unit tests on amd64. The script accepts a number of arguments -to modify its behaviour: - - - `--env CC=othercc` to set the `CC` environment variable inside the container (ie: - to build with a different compiler) - - `--image-tag` to specify a different tag to use for the `cockpit/unit-tests` image - (eg: `--image-tag=i386`) - -Additionally, a testing scenario can be provided with specifying a `make` target. -Supported scenarios are: - - - `check-memory`: runs 'make check-memory' (ie: run the unit tests under valgrind) - - `distcheck`: runs 'make distcheck' and some related checks - - `pycheck`: runs browser unit tests against the Python bridge - -Some examples: - - $ ./start --make check-memory # run the valgrind tests on amd64 - - $ ./start --env=CC=clang --make check-memory # run the valgrind tests, compiled with clang - - $ ./start --image-tag=i386 distcheck # run the distcheck tests on i386 - -## Debugging tests - -For interactive debugging, run a shell in the container: - - $ ./start - -You will find the cockpit source tree (from the host) mounted at `/source` in -the container. Run - - $ /source/autogen.sh - -to create a build tree, then you can run any make or other debugging command -interactively. - -You can also attach to another container using the provided `exec` script. For example: - - $ ./exec uname -a # run a command as the "builder" user - - $ ./exec --root # start a shell as root - -## More Info - - * [Cockpit Project](https://cockpit-project.org) - * [Cockpit Development](https://github.com/cockpit-project/cockpit) diff --git a/containers/unit-tests/build b/containers/unit-tests/build deleted file mode 100755 index 03359eb27d0..00000000000 --- a/containers/unit-tests/build +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -set -eu - -dir=$(dirname "$0") - -if [ -z "${1:-}" ] || [ "${1:-}" = amd64 ]; then - podman build --build-arg debian_arch=amd64 --build-arg personality=linux64 -t ghcr.io/cockpit-project/unit-tests ${dir} -fi - -if [ -z "${1:-}" ] || [ "${1:-}" = i386 ]; then - podman build --build-arg debian_arch=i386 --build-arg personality=linux32 -t ghcr.io/cockpit-project/unit-tests:i386 ${dir} -fi diff --git a/containers/unit-tests/entrypoint b/containers/unit-tests/entrypoint deleted file mode 100755 index 8a53d107449..00000000000 --- a/containers/unit-tests/entrypoint +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/sh -e - -export TEST_BROWSER=firefox - -printf "Host: " && uname -srvm - -. /usr/lib/os-release -printf "Container: \${NAME} \${VERSION} / " && ${personality} uname -nrvm -echo - -set -ex -exec ${personality} -- "$@" diff --git a/containers/unit-tests/setup.sh b/containers/unit-tests/setup.sh deleted file mode 100755 index 24764675438..00000000000 --- a/containers/unit-tests/setup.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/bin/sh -ex - -dependencies="\ - appstream-util \ - autoconf \ - automake \ - build-essential \ - clang \ - curl \ - dbus \ - firefox-esr \ - flake8 \ - gcc-multilib \ - gdb \ - git \ - glib-networking \ - glib-networking-dbgsym\ - gtk-doc-tools \ - gettext \ - libc6-dbg \ - libfontconfig1 \ - libglib2.0-0-dbgsym \ - libglib2.0-dev \ - libgnutls28-dev \ - libjavascript-minifier-xs-perl \ - libjson-glib-dev \ - libjson-perl \ - libkrb5-dev \ - libpam0g-dev \ - libpcp-import1-dev \ - libpcp-pmda3-dev \ - libpcp3-dev \ - libpolkit-agent-1-dev \ - libpolkit-gobject-1-dev \ - libssh-4-dbgsym \ - libssh-dev \ - libsystemd-dev \ - mypy \ - npm \ - nodejs \ - pkg-config \ - python3 \ - python3-mypy \ - python3-pip \ - python3-pytest-asyncio \ - python3-pytest-cov \ - python3-pytest-timeout \ - ssh \ - strace \ - valgrind \ - vulture \ - xmlto \ - xsltproc \ -" - -echo "deb http://deb.debian.org/debian-debug/ testing-debug main" > /etc/apt/sources.list.d/ddebs.list -echo "deb http://deb.debian.org/debian-debug/ testing-proposed-updates-debug main" >> /etc/apt/sources.list.d/ddebs.list -apt-get update -apt-get install -y --no-install-recommends eatmydata -DEBIAN_FRONTEND=noninteractive eatmydata apt-get install -y --no-install-recommends ${dependencies} - -# https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1057968 -echo "deb http://deb.debian.org/debian unstable main" > /etc/apt/sources.list.d/unstable.list -apt-get update -apt-get install -y --no-install-recommends python3-flake8 -rm /etc/apt/sources.list.d/unstable.list - -adduser --gecos "Builder" builder - -if [ "$(uname -m)" = "x86_64" ] ; then - # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1030835 - pip install --break-system-packages ruff -fi - -# minimize image -# useful command: dpkg-query --show -f '${package} ${installed-size}\n' | sort -k2n -dpkg -P --force-depends libgl1-mesa-dri libglx-mesa0 perl - -rm -rf /var/cache/apt /var/lib/apt /var/log/* /usr/share/doc/ /usr/share/man/ /usr/share/help /usr/share/info diff --git a/containers/unit-tests/start b/containers/unit-tests/start deleted file mode 100755 index e01cb66370a..00000000000 --- a/containers/unit-tests/start +++ /dev/null @@ -1,190 +0,0 @@ -#!/usr/bin/python3 - -# This file is part of Cockpit. -# -# Copyright (C) 2022 Red Hat, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import argparse -import os -import shlex -import sys -import tempfile -from subprocess import run - - -def logged(func): - def wrapper(args, **kwargs): - print('+', shlex.join(args)) - return func(args, **kwargs) - return wrapper - - -def git(*args): - run(['git', *args], check=True) - - -def git_output(*args): - return run(['git', *args], check=True, capture_output=True, text=True).stdout.strip() - - -def podman(*args, check=True): - if os.path.exists('/run/.toolboxenv'): - cmd = ['flatpak-spawn', '--host', 'podman', *args] - else: - cmd = ['podman', *args] - - return run(cmd, check=check) - - -class PodmanTemporaryDirectory(tempfile.TemporaryDirectory): - """TemporaryDirectory subclass capable of removing files owned by subuids""" - @classmethod - def _rmtree(cls, name, ignore_errors=False): # noqa: FBT002 - del ignore_errors # can't remove or rename this kwarg - podman('unshare', 'rm', '-r', name) - - def __enter__(self): - # Override the TemporaryDirectory behaviour of returning its name here - return self - - -class SourceDirectory(PodmanTemporaryDirectory): - def __init__(self): - super().__init__(prefix='cockpit-source.') - - def prepare(self, args): - if args.branch: - opts = ['-c', 'advice.detachedHead=false', '-b', args.branch] - else: - opts = [] - - git('clone', '--recurse-submodule=vendor/*', *opts, '.', self.name) - - if not args.head and not args.branch: - if stash := git_output('stash', 'create'): - git('-C', self.name, 'fetch', '--quiet', '--no-write-fetch-head', 'origin', stash) - git('-C', self.name, 'stash', 'apply', stash) - - if not args.no_node_modules: - run([f'{self.name}/tools/node-modules', 'checkout'], check=True) - - -class ResultsDirectory(PodmanTemporaryDirectory): - def __init__(self): - super().__init__(prefix='cockpit-results.') - - def copy_out(self, destination): - podman('unshare', 'cp', '-rT', self.name, destination) - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument('--verbose', '-v', action='store_true', help='Show commands when running them') - parser.add_argument('--results', metavar='DIRECTORY', help="Copy container /results to the given host directory") - - group = parser.add_argument_group(title='Container options') - group.add_argument('--image', default='ghcr.io/cockpit-project/unit-tests', help='Container image to use') - group.add_argument('--image-tag', default='latest', help='Container image tag to use') - group.add_argument('--env', metavar='NAME=VAL', action='append', default=[], - help='Set an environment variable in the container') - group.add_argument('--network', action="store_true", - help="Enable network in the container (default: disabled)") - group.add_argument('--interactive', '-i', action="store_true", - help="Interactive mode (implied by no command or script)") - group.add_argument('--tty', '-t', action="store_true", - help="Allocate a pseudoterminal (implied by no command or script)") - group.add_argument('--user', help="Pass through the --user flag to podman") - group.add_argument('--entrypoint', metavar='CMD', help="Provide the --entrypoint flag to podman") - group.add_argument('--workdir', help="Provide the --workdir flag to podman") - - group = parser.add_argument_group(title='What to build').add_mutually_exclusive_group() - group.add_argument('--head', action='store_true', help='Build the HEAD commit') - group.add_argument('-b', dest='branch', metavar='NAME', help='Build the named branch or tag') - group.add_argument('--work-tree', action='store_true', - help='Build the HEAD commit, plus changes on the filesystem (default)') - - group = parser.add_argument_group(title='Preparation').add_mutually_exclusive_group() - group.add_argument('--no-node-modules', action='store_true', - help='Disable checking out node_modules/ during preparation') - - group = parser.add_argument_group(title='Command to run').add_mutually_exclusive_group() - group.add_argument('-c', metavar='SCRIPT', dest='script', help="Run the provided shell script") - group.add_argument('--make-dist', action='store_true', help='Run `make dist`. Requires --results.') - group.add_argument('--make', metavar='TARGET', help='Run `make` on the given target') - # re: default=[]: https://github.com/python/cpython/issues/86020 - group.add_argument('command', metavar='CMD', nargs='*', default=[], help="Run a normal command, with arguments") - - args = parser.parse_args() - - if args.results and os.path.exists(args.results): - parser.error(f'--results directory `{args.results}` already exists') - - if args.make_dist and not args.results: - parser.error('--make-dist requires --results directory') - - if args.verbose: - global run - run = logged(run) - - with SourceDirectory() as source_dir, ResultsDirectory() as results_dir: - options = { - '--rm', - '--log-driver=none', - f'--volume={source_dir.name}:/source:Z,U', - } - - if args.results: - options.add(f'--volume={results_dir.name}:/results:Z,U') - - if not args.network: - options.add('--network=none') - if args.user: - options.add(f'--user={args.user}') - if args.entrypoint: - options.add(f'--entrypoint={args.entrypoint}') - if args.workdir: - options.add(f'--workdir={args.workdir}') - if args.interactive: - options.add('--interactive') - if args.tty: - options.add('--tty') - for keyval in args.env: - options.add(f'--env={keyval}') - - command = [] - if args.command: - command = args.command - elif args.script: - command = ['sh', '-c', args.script] - elif args.make: - command = ['sh', '-c', '/source/autogen.sh; exec make -j$(nproc) ' + shlex.quote(args.make)] - elif args.make_dist: - command = ['sh', '-c', 'cp -t /results $(/source/tools/make-dist)'] - else: - options.update(['--tty', '--interactive']) - - source_dir.prepare(args) - - result = podman('run', *options, f'{args.image}:{args.image_tag}', *command) - - if result.returncode == 0 and args.results: - results_dir.copy_out(args.results) - - return result.returncode - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/containers/ws/Dockerfile b/containers/ws/Containerfile similarity index 91% rename from containers/ws/Dockerfile rename to containers/ws/Containerfile index 1c43f6982ef..a5f78144a1f 100644 --- a/containers/ws/Dockerfile +++ b/containers/ws/Containerfile @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora:39 AS builder +FROM registry.fedoraproject.org/fedora:40 AS builder LABEL maintainer="cockpit-devel@lists.fedorahosted.org" LABEL VERSION=main diff --git a/containers/ws/README.md b/containers/ws/README.md index f997c21d81b..bdafa404716 100644 --- a/containers/ws/README.md +++ b/containers/ws/README.md @@ -1,25 +1,37 @@ -# Cockpit on Fedora CoreOS or other container hosts +# Cockpit webserver container -The standard Fedora and Red Hat Enterprise Linux CoreOS images does not contain -Cockpit packages. +[Cockpit](https://cockpit-project.org/) is a web-based graphical interface for Linux servers. +It is [packaged in most major Linux distributions](https://cockpit-project.org/running.html). -1. Install Cockpit packages as overlay RPMs: - ``` - rpm-ostree install cockpit-system cockpit-ostree cockpit-podman - ``` +This container image provides Cockpit's web server and a subset of available pages (like the cockpit-system package) +for deployment on container hosts such as Fedora CoreOS or Kubernetes, where installing rpms is difficult or impossible. + +## Usage on container host distributions + +The standard Fedora and Red Hat Enterprise Linux CoreOS images do not contain Cockpit packages. The +`cockpit/ws` container includes a minimal set of builtin Cockpit pages which are being used when connecting to +such a machine, i.e. a host which doesn't have the `cockpit-bridge` package installed. + +If these builtin pages are not enough for your use cases, you can install desired Cockpit packages +as overlay RPMs. For example: + +``` +rpm-ostree install cockpit-system cockpit-ostree cockpit-podman +reboot +``` - Depending on your configuration, you may want to use - [other extensions](https://apps.fedoraproject.org/packages/s/cockpit-) as - well, such as `cockpit-kdump` or `cockpit-networkmanager`. +Depending on your configuration, you may want to use +[other extensions](https://packages.fedoraproject.org/search?query=cockpit-) as +well, such as `cockpit-podman` or `cockpit-networkmanager`. - If you have a custom-built OSTree, simply include the same packages in your build. +If you have a custom-built OSTree, simply include the same packages in your build. -2. Reboot +These packages are enough when the CoreOS machine is only connected to through another host running Cockpit. -Steps 1 and 2 are enough when the CoreOS machine is only connected to through another host running Cockpit. +You also need to run a Cockpit web server somewhere, as the "entry point" for browsers. That can +then connect to the local host or any remote machine via ssh to get a Cockpit UI for that machine. -If you want to also run a web server to log in directly on the CoreOS host, you -can use this container in two modes. +This web server can be deployed as container. It has two modes, which are described below. ## Privileged ws container @@ -97,11 +109,15 @@ can mount your known host keys into the container at -v /path/to/known_hosts:/etc/ssh/ssh_known_hosts:ro,Z -You can also mount an encrypted private key inside the container and set the environment variable `COCKPIT_SSH_KEY_PATH` to point to it: +You can also mount encrypted private keys inside the container. You can set an environment variable, `COCKPIT_SSH_KEY_PATH_MYHOST`, where `MYHOST` is the uppercased hostname used in the `Connect to` field, and cockpit will use that private key to login for the specified host. Private keys can be set for multiple hosts this way by changing the value of `MYHOST`. You can also set an environment variable, `COCKPIT_SSH_KEY_PATH`, which will be used as a fallback key if no host-specific key is set: - -e COCKPIT_SSH_KEY_PATH=/id_rsa -v ~/.ssh/id_rsa:/id_rsa:ro,Z + -e COCKPIT_SSH_KEY_PATH_MYHOST=/.ssh/myhost_id_rsa \ + -e COCKPIT_SSH_KEY_PATH_MYSERVER=/.ssh/myserver_id_rsa \ + -e COCKPIT_SSH_KEY_PATH_192.168.1.1=/.ssh/another_id_rsa \ + -e COCKPIT_SSH_KEY_PATH=/.ssh/id_rsa \ + -v ~/.ssh/:/.ssh:ro,Z -Then cockpit will use the provided password to decrypt the key and establish an SSH connection to the given host using that private key. +Private keys can be encrypted; then cockpit uses the provided password to decrypt the key. ## More Info diff --git a/containers/ws/cockpit-auth-ssh-key b/containers/ws/cockpit-auth-ssh-key index d77fc2cd496..d585a478b92 100755 --- a/containers/ws/cockpit-auth-ssh-key +++ b/containers/ws/cockpit-auth-ssh-key @@ -14,16 +14,16 @@ # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License -# along with Cockpit; If not, see . -# -# This command is meant to be used as an authentication command launched -# by cockpit-ws. It asks for authentication from cockpit-ws and expects -# to receive a basic auth header in response. If COCKPIT_SSH_KEY_PATH is set -# we will try to decrypt the key with the given password. If successful -# we send the decrypted key to cockpit-ws for use with cockpit-ssh. -# Once finished we exec cockpit-ssh to actually establish the ssh connection. -# All communication with cockpit-ws happens on stdin and stdout using the -# cockpit protocol +# along with Cockpit; If not, see . + +# This command is meant to be used as an authentication command launched by +# cockpit-ws. It asks for authentication from cockpit-ws and expects to +# receive a basic auth header in response. If COCKPIT_SSH_KEY_PATH or +# COCKPIT_SSH_KEY_PATH_{HOSTNAME} is set, we will try to decrypt the key with +# the given password. If successful we send the decrypted key to cockpit-ws +# for use with ssh. Once finished we exec cockpit.beiboot to actually +# establish the ssh connection. All communication with cockpit-ws happens on +# stdin and stdout using the cockpit protocol # (https://github.com/cockpit-project/cockpit/blob/main/doc/protocol.md) import base64 @@ -33,8 +33,6 @@ import subprocess import sys import time -COCKPIT_SSH_COMMAND = "/usr/libexec/cockpit-ssh" - def usage(): print("usage", sys.argv[0], "[user@]host[:port]", file=sys.stderr) @@ -42,8 +40,8 @@ def usage(): def send_frame(content): - data = json.dumps(content).encode('utf-8') - os.write(1, str(len(data) + 1).encode('utf-8')) + data = json.dumps(content).encode() + os.write(1, str(len(data) + 1).encode()) os.write(1, b"\n\n") os.write(1, data) @@ -109,7 +107,7 @@ def read_frame(fd): size = size - len(d) data += d - return data.decode("UTF-8") + return data.decode() def read_auth_reply(): @@ -129,7 +127,7 @@ def decode_basic_header(response): assert response assert response.startswith(starts), response - val = base64.b64decode(response[len(starts):].encode('utf-8')).decode("utf-8") + val = base64.b64decode(response[len(starts):].encode()).decode() user, password = val.split(':', 1) return user, password @@ -150,7 +148,7 @@ cat /run/password""") pass_fd = os.open("/run/password", os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_CLOEXEC, mode=0o600) try: - os.write(pass_fd, password.encode("UTF-8")) + os.write(pass_fd, password.encode()) os.close(pass_fd) p = subprocess.run(["ssh-add", "-t", "30", fname], @@ -172,7 +170,8 @@ def main(args): usage() host = args[1] - key_name = os.environ.get("COCKPIT_SSH_KEY_PATH") + key_env = f"COCKPIT_SSH_KEY_PATH_{host.upper()}" + key_name = os.environ.get(key_env) or os.environ.get("COCKPIT_SSH_KEY_PATH") if key_name: send_auth_command("*", None) try: @@ -189,7 +188,7 @@ def main(args): {"password": "denied"}) return - os.execlpe(COCKPIT_SSH_COMMAND, COCKPIT_SSH_COMMAND, host, os.environ) + os.execlpe("python3", "python3", "-m", "cockpit.beiboot", host, os.environ) if __name__ == '__main__': diff --git a/containers/ws/install.sh b/containers/ws/install.sh index 9cc9e38b886..768542dace8 100755 --- a/containers/ws/install.sh +++ b/containers/ws/install.sh @@ -3,18 +3,33 @@ set -ex OSVER=$(. /etc/os-release && echo "$VERSION_ID") +INSTALLROOT=/build +INSTALL="dnf install -y --installroot=$INSTALLROOT --releasever=$OSVER --setopt=install_weak_deps=False" -INSTALL="dnf install -y --installroot=/build --releasever=$OSVER --setopt=install_weak_deps=False" +dnf install -y 'dnf-command(download)' cpio $INSTALL coreutils-single util-linux-core sed sscg python3 openssh-clients arch=`uname -p` rpm=$(ls /container/rpms/cockpit-ws-*$OSVER.*$arch.rpm /container/rpms/cockpit-bridge-*$OSVER.*$arch.rpm || true) +unpack() { + rpm2cpio "$1" | cpio -i --make-directories --directory=$INSTALLROOT +} + # If there are rpm files in the current directory we'll install those +# -system and -networkmanager are only for beibooting; don't install their dependencies if [ -n "$rpm" ]; then $INSTALL /container/rpms/cockpit-ws-*$OSVER.*$arch.rpm /container/rpms/cockpit-bridge-*$OSVER.*$arch.rpm + for rpm in /container/rpms/cockpit-system-*$OSVER.*$arch.rpm \ + /container/rpms/cockpit-networkmanager-*$OSVER.*$arch.rpm; do + unpack $rpm + done else $INSTALL cockpit-ws cockpit-bridge + dnf download cockpit-networkmanager cockpit-system + for rpm in cockpit-networkmanager*.rpm cockpit-system*.rpm; do + unpack $rpm + done fi rm -rf /build/var/cache/dnf /build/var/lib/dnf /build/var/lib/rpm* /build/var/log/* diff --git a/doc/anaconda.md b/doc/anaconda.md index cca2b9552ea..0ef6fb2ae01 100644 --- a/doc/anaconda.md +++ b/doc/anaconda.md @@ -15,22 +15,10 @@ Entering Anaconda mode ---------------------- The "storaged" page is put into Anaconda mode by storing a -"cockpit_anaconda" item in its `window.localStorage`. The value +"cockpit_anaconda" item in its `window.sessionStorage`. The value should be a JSON encoded object, the details of which are explained below. -Since both Anaconda and the storaged page are served from the same -origin, Anaconda can just execute something like this: - -``` - window.localStorage.setItem("cockpit_anaconda", - JSON.stringify({ - "mount_point_prefix": "/sysroot", - "available_devices": [ "/dev/sda" ] - })); - window.open("/cockpit/@localhost/storage/index.html", "storage-tab"); -``` - Ignoring storage devices ------------------------ @@ -38,7 +26,7 @@ Anaconda needs to tell Cockpit which devices can be used to install the OS on. This is done with the "available_devices" entry, which is an array of strings. -``` +```json { "available_devices": [ "/dev/sda" ] } @@ -54,7 +42,7 @@ Mount point prefix Cockpit can be put into a kind of "chroot" environment by giving it a mount point prefix like so: -``` +```json { "mount_point_prefix": "/sysroot" } @@ -71,8 +59,6 @@ configuration into the real /etc/fstab (_not_ /sysroot/etc/fstab). This is done for the convenience of Cockpit, and Anaconda is not expected to read it. -If and how Cockpit communicates back to Anaconda is still open. - BIOS or EFI ----------- @@ -82,7 +68,7 @@ created easily. This is done by setting the "efi" flag to true or false: -``` +```json { "efi": true } @@ -97,7 +83,7 @@ back to the type of the filesystem mounted as "/". When in Anaconda mode, there might not be anything assigned to "/" yet, and in this case, Cockpit will use the type from "default_fsys_type". -``` +```json { "default_fsys_type": "xfs" } @@ -106,18 +92,18 @@ case, Cockpit will use the type from "default_fsys_type". Exported information -------------------- -Cockpit maintains some information in local browser storage that can -be used by Anaconda to learn things that it doesn't get from -blivet. This is mostly information from fstab and crypttab. +Cockpit maintains some information in the session storage that can be +used by Anaconda to learn things that it doesn't get from blivet. This +is mostly information from fstab. The "cockpit_mount_points" entry in local storage will have a JSON encoded object, for example: -``` +```json { "/dev/sda": { "type": "filesystem", - "dir": "/", + "dir": "/" }, "/dev/sdb": { "type": "swap" @@ -127,15 +113,14 @@ encoded object, for example: "content": { "type": "filesystem", "subvolumes": { - "home": { dir: "/home" } + "home": { "dir": "/home" } } } } } ``` -The keys are pathnames of device nodes in /dev, they are never -symlinks to device nodes. +The keys are pathnames of device nodes in /dev. Each value is an object with a "type" field. The type determines which other fields might be present, and what they mean. The following @@ -161,8 +146,18 @@ types might appear: An encrypted device. It has a "content" field with a value that is structured like a value for "cockpit_mount_points", i.e., a object with a "type" field and maybe a "dir" field if "type" is - "filesystem". This is also present when the crypto device is closed. + "filesystem". This might also be present when the crypto device is + closed. It might also have a "cleartext_device" field if the encrpyted device - is currently open. (Although this is something that blivet should be - able to tell.) + is currently open. + +Cockpit does some magic (via the "x-parent" options in fstab and +crypttab) to produce information also for locked LUKS devices, and +inactive logical volumes. + +Cockpit also remembers and exports encryption passphrases in session +storage, in the "cockpit_passphrases" entry. This is a map from device +names to cleartext passphrases. This is only done when Cockpit runs in +a "secure context", see +https://developer.mozilla.org/en-US/docs/Web/Security/Secure_Contexts diff --git a/doc/authentication.md b/doc/authentication.md index 9f91d7c0bd1..976fbd00480 100644 --- a/doc/authentication.md +++ b/doc/authentication.md @@ -97,19 +97,20 @@ Remote machines Cockpit also supports logging directly into remote machines. The remote machine to connect to is provided by using a application name that begins with `cockpit+=`. -The default command used for this is cockpit-ssh. +The default command used for this is `python3 -m cockpit.beiboot`, which +invokes `ssh`. -The section `SSH-Login` defines the options for all ssh commands. The section +The section `Ssh-Login` defines the options for all ssh commands. The section has the same options as the other authentication sections with the following additions. - * `host` The default host to log into. Defaults to 127.0.0.1. + * `host` The default host to log into. Defaults to 127.0.0.1. That host's key + will not be checked/validated. * `connectToUnknownHosts`. By default cockpit will refuse to connect to any machines that - are not already present in ssh's global `known_hosts` file (usually - `/etc/ssh/ssh_known_hosts`). Set this to `true` is to allow those connections - to proceed. + are not already present in ssh's global `known_hosts` file (usually + `/etc/ssh/ssh_known_hosts`). Set this to `true` is to allow those connections + to proceed. -This uses the [cockpit-ssh](https://github.com/cockpit-project/cockpit/tree/main/src/ssh) -bridge. After the user authentication with the `"*"` challenge, if the remote +After the user authentication with the `"*"` challenge, if the remote host is not already present in any local `known_hosts` file, this will send an `"x-host-key"` challenge: @@ -159,7 +160,7 @@ Actions Setting an action can modify the behavior for an auth scheme. Currently two actions are supported. - * **remote-login-ssh** Use the `SSH-Login` section instead. + * **remote-login-ssh** Use the `Ssh-Login` section instead. * **none** Disable this auth scheme. To configure an action add the `action` option. For example to disable basic authentication, @@ -191,7 +192,7 @@ The following environment variables are set by cockpit-ws when spawning an auth * **COCKPIT_REMOTE_PEER** Set to the ip address of the connecting user. -The following environment variables are used to set options for the `cockpit-ssh` process: +The following environment variables are used to set options for SSH connections: * **COCKPIT_SSH_CONNECT_TO_UNKNOWN_HOSTS** Set to `1` to allow connecting to hosts that are not present in the current `known_hosts` files. If not set, @@ -201,6 +202,3 @@ The following environment variables are used to set options for the `cockpit-ssh * **COCKPIT_SSH_KNOWN_HOSTS_FILE** Path to knownhost files. Defaults to `PACKAGE_SYSCONF_DIR/ssh/ssh_known_hosts` - - * **COCKPIT_SSH_BRIDGE_COMMAND** Command to launch after a ssh connection is - established. Defaults to `cockpit-bridge` if not provided. diff --git a/doc/branding.md b/doc/branding.md index d8a102606f0..45d2f3c0af6 100644 --- a/doc/branding.md +++ b/doc/branding.md @@ -10,7 +10,7 @@ system itself, and are incorporated into the branding. ## How Cockpit Selects Branding -In ```$prefix/share/cockpit/branding``` are multiple directories, each of which +In `$prefix/share/cockpit/branding` are multiple directories, each of which contain branding information. Branding files are served from the directories based in the order below, if a file is not present in the first directory on the list, the second will be consulted, and so on. @@ -20,8 +20,8 @@ the list, the second will be consulted, and so on. $prefix/share/cockpit/branding/default $prefix/share/cockpit/static -The ```$ID``` and ```$VARIANT_ID``` variables are those listed in ```/etc/os-release```, -and ```$prefix``` is usually ```/usr```. +The `$ID` and `$VARIANT_ID` variables are those listed in `/etc/os-release`, +and `$prefix` is usually `/usr`. All of the files served from these directories are available over HTTP without authentication. This is required since these resources will be used @@ -43,39 +43,34 @@ on the system. ## Branding Styles -The Cockpit login screen and navigation area loads a ```branding.css``` file +The Cockpit login screen and navigation area loads a `branding.css` file from the above directories. The branding.css file should override the following areas of the login screen: - /* Background of the login prompt */ - body.login-pf { - background: url("my-background-image.jpg"); - background-size: cover; - } - - /* Upper right logo of login screen */ - #badge { - width: 225px; - height: 80px; - background-image: url("logo.png"); - background-size: contain; - background-repeat: no-repeat; - } - - /* The brand text above the login fields */ - #brand { - font-size: 18pt; - text-transform: uppercase; - content: "${NAME} ${VARIANT}"; - } - -The ```branding.css``` file should override the following areas of the navigation bar: - - /* The text in the upper left corner of logged in Cockpit - #index-brand { - content: "${NAME} ${VARIANT}"; - } - -Notice how we can use variables from ```/etc/os-release``` in the branding. +```css +/* Background of the login prompt */ +body.login-pf { + background: url("my-background-image.jpg"); + background-size: cover; +} + +/* Upper right logo of login screen */ +#badge { + width: 225px; + height: 80px; + background-image: url("logo.png"); + background-size: contain; + background-repeat: no-repeat; +} + +/* The brand text above the login fields */ +#brand { + font-size: 18pt; + text-transform: uppercase; + content: "${NAME} ${VARIANT}"; +} +``` + +Notice how we can use variables from `/etc/os-release` in the branding. The value for these variables come from the machine that cockpit is logged into. diff --git a/doc/guide/authentication.xml b/doc/guide/authentication.xml index e3c8d6aa176..400d921db6c 100644 --- a/doc/guide/authentication.xml +++ b/doc/guide/authentication.xml @@ -61,11 +61,35 @@ +
+ Directly logging into a secondary server without a primary session + + It is also possible to log into a secondary server without + opening a session on the primary server. This is useful if you + are not actually interested in the primary server and would only + use it because you do not have direct network access to the + secondary server. + + In this case, cockpit-ws still runs on + the primary server, but the credentials from the login screen are + directly used with SSH to log into the secondary server given in + the "Connect To" field of the login screen. + + Thus, the PAM configuration and accounts on the primary + server don't matter at all. Often, the only purpose of the primary + server is to sit on the boundary of your network and forward + connections to internal machines. + + In this case, the login page will prompt you to verify + unknown SSH keys. Accepted keys will be remembered in the local + storage of your browser. +
+
Logging into a secondary server from the primary session - Once you have a session on the primary server you will be - able to connect to additional servers by using the host switching + Once you have a session on the primary server, it is possible + connect to additional servers by using the host switching UI of the Cockpit Shell. This is useful if you have direct network access to the primary server, but not to the secondary server. @@ -75,8 +99,22 @@ of running a interactive shell there, however, it starts a cockpit-bridge process. - Thus, these servers will need to be running an SSH server on - port 22 and be configured to support one of the following + Warning: Unlike with SSH on the command line + though, this will also load and use the Cockpit pages (i.e. JavaScript) + from the remote machine, which means that the remote machine can execute + arbitrary code on your primary and all other connected secondary machines. + Hence, only connect to machines which you trust. + + Due to this security risk, this host switcher functionality + is disabled by default, except on long-term stable Linux + distributions (Red Hat Enterprise Linux 9, Debian 12, and Ubuntu + 22.04/24.04 LTS). If you are comfortable with the security + implications, you can enable it manually with the + AllowMultiHost option in + cockpit.conf. + + These servers will need to be running an SSH server + and be configured to support one of the following authentication methods.
@@ -121,28 +159,4 @@
-
- Directly logging into a secondary server without a primary session - - It is also possible to log into a secondary server without - opening a session on the primary server. This is useful if you - are not actually interested in the primary server and would only - use it because you do not have direct network access to the - secondary server. - - In this case, cockpit-ws still runs on - the primary server, but the credentials from the login screen are - directly used with SSH to log into the secondary server given in - the "Connect To" field of the login screen. - - Thus, the PAM configuration and accounts on the primary - server don't matter at all. Often, the only purpose of the primary - server is to sit on the boundary of your network and forward - connections to internal machines. - - In this case, the login page will prompt you to verify - unknown SSH keys. Accepted keys will be remembered in the local - storage of your browser. -
- diff --git a/doc/guide/cockpit-channel.xml b/doc/guide/cockpit-channel.xml index 314d4adb15f..b25b88834c7 100644 --- a/doc/guide/cockpit-channel.xml +++ b/doc/guide/cockpit-channel.xml @@ -242,12 +242,10 @@ cockpit.transport.close([problem]) cockpit.transport.filter() -cockpit.transport.filter((message, channelid, control) => { ... }, [out]) +cockpit.transport.filter((message, channelid, control) => { ... }) Add a filter to the underlying channel transport. All incoming messages will be - passed to each of the filter callbacks that are registered. If the out - argument is equal to true then the filter will receive outgoing messages - that being sent on the underlying channel transport. + passed to each of the filter callbacks that are registered. This function is rarely used. Filter callbacks are called in the order they are registered. If a filter callback returns false then the message will not be dispatched @@ -299,53 +297,4 @@ data = cockpit.base64_decode(string, [constructor]) does not contain bytes that would be invalid for a string. - - cockpit.utf8_encoder() - -encoder = cockpit.utf8_encoder([constructor]) - - Create an encoder for encoding a string into a UTF8 sequence of bytes. - You can pass Uint8Array, Array or String - as an alternate constructor if you want the decoded data in an - alternate form. The default is to return an Array. - - - - encoder.encode() - -data = encoder.encode(string) - - Encode a string into a UTF8 sequence of bytes. - The resulting data is an array of bytes, but it's type may be - modified by passing an alternate constructor to - cockpit.utf8_encoder(). - - - - cockpit.utf8_decoder() - -decoder = cockpit.utf8_decoder([fatal]) - - Creates a decoder to decode a UTF8 sequence of bytes data into a string. - If the fatal is set to true then the decoder - will throw an exception when it encounters invalid UTF8 data. By default invalid data - will be substituted with special UTF8 characters. - - - - decoder.decode() - -string = decoder.decode(data, [options]) - - Decode an array of UTF8 bytes into a string. The data - argument may be an Array, a Uint8Array or a string containing - binary data. - If options is passed it should be a plain javascript object. If - options has a stream property equal to true, - then multiple invocations of this function can be made with parts of the UTF8 sequence - of bytes. Any trailing bytes that don't yet build a complete unicode character, will be - cached until the next invocation. To drain the last data, call this function without - the stream property set. - - diff --git a/doc/guide/cockpit-session.xml b/doc/guide/cockpit-session.xml index 467d185c563..80ce5f51214 100644 --- a/doc/guide/cockpit-session.xml +++ b/doc/guide/cockpit-session.xml @@ -30,6 +30,10 @@ promise.then(user => { ... }); "id" This is unix user id. + + "gid" + This is unix user group id. + "name" This is the unix user name like "root". @@ -40,7 +44,8 @@ promise.then(user => { ... }); "groups" - This is an array of group names to which the user belongs. + This is an array of group names to which the user belongs. Since + version 318, the first item in this list is the primary group. "home" diff --git a/doc/guide/cockpit-util.xml b/doc/guide/cockpit-util.xml index 9bff53e62e4..60af1b82357 100644 --- a/doc/guide/cockpit-util.xml +++ b/doc/guide/cockpit-util.xml @@ -44,75 +44,44 @@ string = cockpit.format_number(number, [precision]) cockpit.format_bytes() -string = cockpit.format_bytes(number, [factor]) -array = cockpit.format_bytes(number, [factor, options]) +string = cockpit.format_bytes(number, [options]) Formats number into a displayable string with a suffix, such as - KB or MB. Returns an array of the - formatted number and the suffix if options.separate is set to true. + kB or MB. - If specifying 1000 or 1024 is specified as a factor then an appropriate suffix - will be chosen. By default the factor is 1000. You can pass a string suffix as a - factor in which case the resulting number will be formatted with the same suffix. - - If the number is less than the factor or an unknown factor - was passed in, then the formatted number is returned without a suffix. If options.separate - is true, returns an array of [formatted_number, suffix] or - [formatted_number] if returned without a suffix. + By default, SI units are used. IEC units (1024-based) can be requested by including + base2: true in options. By default, non-integer numbers will be formatted with 3 digits of precision. This can be changed with options.precision. - If number is null or undefined an empty string or - an array without a suffix will be returned. + If number is null or undefined an empty string will be + returned. cockpit.format_bytes_per_sec() - string = cockpit.format_bytes_per_sec(number, [factor]) - array = cockpit.format_bytes_per_sec(number, [factor, options]) +string = cockpit.format_bytes_per_sec(number, [options]) Format number of bytes into a displayable speed string. - If specifying 1000 or 1024 is specified as a factor then an appropriate suffix - will be chosen. By default the factor is 1000. You can pass a string suffix as a - factor in which case the resulting number will be formatted with the same suffix. - - If the number is less than the factor or an unknown factor - was passed in, then the formatted number is returned without a suffix. If options.separate - is true, returns an array of [formatted_number, suffix] or - [formatted_number] if returned without a suffix. - - By default, non-integer numbers will be formatted with 3 digits of precision. This can be changed - with options.precision. - - If number is null or undefined an empty string or array - will be returned. + This function is mostly equivalent to cockpit.format_bytes() but the returned + value contains a unit like kB/s or MB/s. cockpit.format_bits_per_sec() - string = cockpit.format_bits_per_sec(number, [factor]) - array = cockpit.format_bytes_per_sec(number, [factor, options]) +string = cockpit.format_bits_per_sec(number, [options]) Format number of bits into a displayable speed string. - If specifying 1000 or 1024 is specified as a factor then an appropriate suffix - will be chosen. By default the factor is 1000. You can pass a string suffix as a - factor in which case the resulting number will be formatted with the same suffix. - - If the number is less than the factor or an unknown factor - was passed in, then the formatted number is returned without a suffix. If options.separate - is true, returns an array of [formatted_number, suffix] or - [formatted_number] if returned without a suffix. - - By default, non-integer numbers will be formatted with 3 digits of precision. This can be changed - with options.precision. + This function is mostly equivalent to cockpit.format_bytes() but the returned + value contains a unit like kbps or Mbps. - If number is null or undefined an empty string or array - will be returned. + This function does not support IEC units. base2 may not be passed as part of + options. diff --git a/doc/guide/feature-machines.xml b/doc/guide/feature-machines.xml index 0f733aa762c..c9d1f88239e 100644 --- a/doc/guide/feature-machines.xml +++ b/doc/guide/feature-machines.xml @@ -12,9 +12,6 @@ authenticated with the logged in user's password and/or SSH keys. - Using SSH keys is only supported when the system has the - necessary APIs in libssh. - SSH host keys are stored in /etc/ssh/ssh_known_hosts. diff --git a/doc/guide/feature-pcp.xml b/doc/guide/feature-pcp.xml index 559709d27bf..ee36f3ccb6a 100644 --- a/doc/guide/feature-pcp.xml +++ b/doc/guide/feature-pcp.xml @@ -32,7 +32,7 @@ $ pmstat These metrics can also be exposed to other machines on a TCP port with pmproxy - and Redis: + and Redis or Valkey: systemctl enable --now redis pmproxy diff --git a/doc/guide/https.xml b/doc/guide/https.xml index 5cf427bd7f2..7b6146ac950 100644 --- a/doc/guide/https.xml +++ b/doc/guide/https.xml @@ -15,8 +15,8 @@ connection to HTTPS. There are some exceptions: - If an HTTP connection comes from 127.0.0.0/8, then - Cockpit will allow communication without redirecting to HTTPS. + If an HTTP connection comes from localhost (127.0.0.1 or + ::1, then Cockpit will allow communication without redirecting to HTTPS. Certain URLs, like /ping are not required to use HTTPS. @@ -68,7 +68,7 @@ getcert request -f /etc/cockpit/ws-certs.d/50-certmonger.cert \ -D myhostname.example.com \ [--ca=...] - This will not work on Red Hat Enterprise Linux/CentOS 8 by default. Adjust the SELinux type of the certificate directory to cert_t to allow certmonger to write its certificates there: + This will not work on Red Hat Enterprise Linux 8 by default. Adjust the SELinux type of the certificate directory to cert_t to allow certmonger to write its certificates there: semanage fcontext -a -t cert_t '/etc/cockpit/ws-certs\.d(/.*)?' diff --git a/doc/guide/static/gtk-doc.css b/doc/guide/static/gtk-doc.css index 56189268e93..d6e49e01f7a 100644 --- a/doc/guide/static/gtk-doc.css +++ b/doc/guide/static/gtk-doc.css @@ -5,6 +5,7 @@ border: solid 1px #d3d7cf; padding: 0.5em; } + .programlisting { /* tango:sky blue 0/1 */ @@ -12,11 +13,13 @@ border: solid 1px #729fcf; padding: 0.5em; } + .variablelist { padding: 4px; - margin-left: 3em; + margin-inline-start: 3em; } + .variablelist td:first-child { vertical-align: top; @@ -26,27 +29,26 @@ sup a.footnote { position: relative; - top: 0em ! important; + inset-block-start: 0 ! important; } /* this is needed so that the local anchors are displayed below the naviagtion */ div.footnote a[name], div.refnamediv a[name], div.refsect1 a[name], div.refsect2 a[name], div.index a[name], div.glossary a[name], div.sect1 a[name] { display: inline-block; position: relative; - top:-5em; + inset-block-start:-5em; } /* this seems to be a bug in the xsl style sheets when generating indexes */ div.index div.index { - top: 0em; + inset-block-start: 0; } /* make space for the fixed navigation bar and add space at the bottom so that * link targets appear somewhat close to top */ body { - padding-top: 3.2em; - padding-bottom: 20em; + padding-block: 3.2em 20em; } /* style and size the navigation bar */ table.navigation#top @@ -55,23 +57,25 @@ /* tango:scarlet red 0/1 */ background: #ffe6e6; border: solid 1px #ef2929; - margin-top: 0; - margin-bottom: 0; - top: 0; - left: 0; - height: 3em; + margin-block: 0; + inset-block-start: 0; + inset-inline-start: 0; + block-size: 3em; z-index: 10; } + .navigation a, .navigation a:visited { /* tango:scarlet red 3 */ color: #a40000; } + .navigation a:hover { /* tango:scarlet red 1 */ color: #ef2929; } + td.shortcuts { /* tango:scarlet red 1 */ @@ -85,17 +89,17 @@ visibility: collapse; display: none; } + div.titlepage table.navigation { visibility: visible; display: table; /* tango:scarlet red 0/1 */ background: #ffe6e6; border: solid 1px #ef2929; - margin-top: 0; - margin-bottom: 0; - top: 0; - left: 0; - height: 3em; + margin-block: 0; + inset-block-start: 0; + inset-inline-start: 0; + block-size: 3em; } } @@ -106,13 +110,15 @@ div.gallery-float { - float: left; + float: inline-start; padding: 10px; } + div.gallery-float img { border-style: none; } + div.gallery-spacer { clear: both; @@ -124,6 +130,7 @@ a, a:visited /* tango:sky blue 2 */ color: #3465a4; } + a:hover { text-decoration: underline; @@ -134,7 +141,7 @@ a:hover div.table table { border-collapse: collapse; - border-spacing: 0px; + border-spacing: 0; /* tango:aluminium 3 */ border: solid 1px #babdb6; } @@ -158,14 +165,14 @@ hr /* tango:aluminium 3 */ color: #babdb6; background: #babdb6; - border: none 0px; - height: 1px; + border: none 0; + block-size: 1px; clear: both; } .footer { - padding-top: 3.5em; + padding-block-start: 3.5em; /* tango:aluminium 3 */ color: #babdb6; text-align: center; @@ -178,31 +185,35 @@ hr background: #ffeed9; border-color: #ffb04f; } + .note { /* tango:chameleon 0/0.5 */ background: #d8ffb2; border-color: #abf562; } + .note, .warning { padding: 0.5em; border-width: 1px; border-style: solid; } + .note h3, .warning h3 { - margin-top: 0.0em + margin-block-start: 0.0 } + .note p, .warning p { - margin-bottom: 0.0em + margin-block-end: 0.0 } /* blob links */ h2 .extralinks, h3 .extralinks { - float: right; + float: inline-end; /* tango:aluminium 3 */ color: #babdb6; font-size: 80%; @@ -221,44 +232,47 @@ h2 .extralinks, h3 .extralinks .listing_code .programlisting .cbracket { color: #a40000; } /* tango: scarlet red 3 */ .listing_code .programlisting .comment { color: #a1a39d; } /* tango: aluminium 4 */ -.listing_code .programlisting .function { color: #000000; font-weight: bold; } +.listing_code .programlisting .function { color: #000; font-weight: bold; } .listing_code .programlisting .function a { color: #11326b; font-weight: bold; } /* tango: sky blue 4 */ .listing_code .programlisting .keyword { color: #4e9a06; } /* tango: chameleon 3 */ .listing_code .programlisting .linenum { color: #babdb6; } /* tango: aluminium 3 */ -.listing_code .programlisting .normal { color: #000000; } +.listing_code .programlisting .normal { color: #000; } .listing_code .programlisting .number { color: #75507b; } /* tango: plum 2 */ .listing_code .programlisting .preproc { color: #204a87; } /* tango: sky blue 3 */ .listing_code .programlisting .string { color: #c17d11; } /* tango: chocolate 2 */ -.listing_code .programlisting .type { color: #000000; } +.listing_code .programlisting .type { color: #000; } .listing_code .programlisting .type a { color: #11326b; } /* tango: sky blue 4 */ .listing_code .programlisting .symbol { color: #ce5c00; } /* tango: orange 3 */ .listing_frame { /* tango:sky blue 1 */ border: solid 1px #729fcf; - padding: 0px; + padding: 0; } .listing_lines, .listing_code { - margin-top: 0px; - margin-bottom: 0px; + margin-block: 0; padding: 0.5em; } + .listing_lines { /* tango:sky blue 0.5 */ background: #a6c5e3; /* tango:aluminium 6 */ color: #2e3436; } + .listing_code { /* tango:sky blue 0 */ background: #e6f3ff; } + .listing_code .programlisting { /* override from previous */ - border: none 0px; - padding: 0px; + border: none 0; + padding: 0; } + .listing_lines pre, .listing_code pre { - margin: 0px; + margin: 0; } diff --git a/doc/guide/static/style.css b/doc/guide/static/style.css index 292efcd2fb2..adb22352574 100644 --- a/doc/guide/static/style.css +++ b/doc/guide/static/style.css @@ -1,71 +1,71 @@ -@import url("gtk-doc.css"); +@import "gtk-doc.css"; /* keep this in sync with node_modules/@redhat/redhat-font/webfonts/red-hat-font.css */ @font-face { - font-family: "RedHatText"; + font-family: RedHatText; src: url("./RedHatText-Regular.woff2") format("woff2"); /* Modern Browsers */ font-style: normal; font-weight: 400; - text-rendering: optimizeLegibility; + text-rendering: optimizelegibility; } @font-face { - font-family: "RedHatText"; + font-family: RedHatText; src: url("./RedHatText-Medium.woff2") format("woff2"); /* Modern Browsers */ font-style: normal; font-weight: 700; - text-rendering: optimizeLegibility; + text-rendering: optimizelegibility; } -H1.guides { +h1.guides { background-color: #238b49; color: white; font-family: Georgia, "Times New Roman", Times, serif; font-size: 30pt; - padding: 5px 0px 5px 20px; + padding-block: 5px; + padding-inline: 20px 0; } -H2.guides { - margin-left: 20px; +h2.guides { + margin-inline-start: 20px; } -DIV.guides { - margin-left: 20px; - margin-right: 20px; - font-family: 'RedHatText', Verdana, Arial, 'Bitstream Vera Sans', Helvetica, sans-serif; +div.guides { + margin-inline: 20px; + font-family: RedHatText, Verdana, Arial, 'Bitstream Vera Sans', Helvetica, sans-serif; font-size: 11pt; } -DIV.guides > p { - margin-left: 20px; +div.guides > p { + margin-inline-start: 20px; } -DIV.guides > A { - margin-left: 40px; - margin-right: 20px; +div.guides > a { + margin-inline: 40px 20px; font-size: 12px; line-height: 200%; display: block; } -TABLE.navigation { +table.navigation { background-color: #238b49 !important; border-width: 0 !important; color: white; font-family: Georgia, "Times New Roman", Times, serif; } -TABLE.navigation TH { +table.navigation th { font-size: 30pt !important; font-weight: normal; - text-align: left !important; - padding-left: 10pt; + text-align: start !important; + padding-inline-start: 10pt; } -TABLE.navigation TH:first-child { - padding: 5px 0px 5px 20px; +table.navigation th:first-child { + padding-block: 5px; + padding-inline: 20px 0; } .shortcuts { @@ -74,157 +74,155 @@ TABLE.navigation TH:first-child { .shortcuts a { color: white !important; - font-family: 'RedHatText', Verdana, Arial, 'Bitstream Vera Sans', Helvetica, sans-serif; + font-family: RedHatText, Verdana, Arial, 'Bitstream Vera Sans', Helvetica, sans-serif; } -P.title { +p.title { font-size: 30pt !important; } -DIV.warning { +div.warning { padding: 15px; } + .warning .title { font-weight: bold !important; - left: 0px; + inset-inline-start: 0; } -BODY { - padding-top: 5.5em !important; - margin: 0px; +body { + padding-block-start: 5.5em !important; + margin: 0; } /* Target all Firefox, since firefox has bug wrt TABLE + position: fixed */ +/* stylelint-disable-next-line at-rule-no-vendor-prefix */ @-moz-document url-prefix() { - TABLE.navigation { position: static !important; } - BODY { padding-top: 0 !important; } + table.navigation { position: static !important; } + body { padding-block-start: 0 !important; } } -P.releaseinfo { +p.releaseinfo { margin: 1em; } -DIV.toc { +div.toc { margin: 1em; } -DIV.book, -DIV.refentry, -DIV.chapter, -DIV.article, -DIV.reference, -DIV.index, -DIV.footer, -DIV.section, -DIV.part { - font-family: 'RedHatText', Verdana, Arial, 'Bitstream Vera Sans', Helvetica, sans-serif; +div.book, +div.refentry, +div.chapter, +div.article, +div.reference, +div.index, +div.footer, +div.section, +div.part { + font-family: RedHatText, Verdana, Arial, 'Bitstream Vera Sans', Helvetica, sans-serif; font-size: 11pt; line-height: 160%; } -CODE { +code { font-size: 10.5pt; } -BODY > DIV.footer, -BODY > DIV.part { - margin-left: 1em; - margin-right: 1em; +body > div.footer, +body > div.part { + margin-inline: 1em; } -BODY > DIV.refentry, -BODY > DIV.chapter, -BODY > DIV.article, -BODY > DIV.reference, -BODY > DIV.index, -BODY > DIV.section, -BODY > DIV.part, -BODY > DIV.book > DIV.toc { - padding-top: 50px; - max-width: 800px; - padding-left: 40px; - padding-right: 20px; - margin-left: auto; - margin-right: auto; +body > div.refentry, +body > div.chapter, +body > div.article, +body > div.reference, +body > div.index, +body > div.section, +body > div.part, +body > div.book > div.toc { + padding-block-start: 50px; + max-inline-size: 800px; + padding-inline: 40px 20px; + margin-inline: auto; /* margin-left: 3em; margin-right: 1em; */ } -BODY > DIV.book > DIV.toc { - padding-top: 20px; +body > div.book > div.toc { + padding-block-start: 20px; } -DIV.section { - margin-top: 3em; +div.section { + margin-block-start: 3em; } -PRE.programlisting { +pre.programlisting { font-size: 9.5pt; line-height: 130%; - padding-left: 10px; + padding-inline-start: 10px; } -DIV.variablelist TABLE { +div.variablelist table { font-size: 13pt; line-height: 150%; - margin-left: 0px; + margin-inline-start: 0; } -DIV.variablelist TABLE SPAN.term { - padding-right: 1em; +div.variablelist table span.term { + padding-inline-end: 1em; } -DIV.variablelist { - margin-left: 0px; +div.variablelist { + margin-inline-start: 0; } -DIV.refsect1, -DIV.refsect2, -DIV.refsection, -DIV.refnamediv, -DIV.refsynopsisdiv { - margin-bottom: 2em !important; +div.refsect1, +div.refsect2, +div.refsection, +div.refnamediv, +div.refsynopsisdiv { + margin-block-end: 2em !important; } -DIV.refsection > H3 { - margin-top: 1.5em !important; +div.refsection > h3 { + margin-block-start: 1.5em !important; } -DIV.abstract > .title { +div.abstract > .title { margin: 0.3em; visibility: hidden; } -H1 { +h1 { font-weight: normal !important; } -H2 { +h2 { position: relative; - left: -1em; + inset-inline-start: -1em; font-weight: normal !important; } -H3 { +h3 { position: relative; - left: -1em; + inset-inline-start: -1em; font-weight: normal !important; } -DD > DL { - margin-top: 0.3em; - margin-bottom: 0.3em; +dd > dl { + margin-block: 0.3em; } -PRE.screen { +pre.screen { border: solid 1px #729fcf; padding: 0.5em; background: #e6f3ff; } -CODE.option { +code.option { white-space: nowrap; } diff --git a/doc/login.md b/doc/login.md index 00cdbfd4c14..46cb10fbe95 100644 --- a/doc/login.md +++ b/doc/login.md @@ -10,12 +10,12 @@ header is built like this. Basic base64(user:password) ``` -A successful response is a 200 http code with a json body that contains a ```user``` field with the user +A successful response is a 200 http code with a json body that contains a `user` field with the user name of the user that was just logged in. Additional fields may be present Other http codes are considered errors. Generally these are 401 or 403 http status codes. In most cases the error can detrived from the status text. Examples are - ```authentication-failed```, ```authentication-unavailable``` or ```access-denied``` +`authentication-failed`, `authentication-unavailable` or `access-denied` In some cases additional error messages may be included. In some authentication setups additional steps are required. When this happens cockpit will diff --git a/doc/man/cockpit-bridge.xml b/doc/man/cockpit-bridge.xml index 0d31f1d3098..0006b0777ad 100644 --- a/doc/man/cockpit-bridge.xml +++ b/doc/man/cockpit-bridge.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> diff --git a/doc/man/cockpit-desktop.xml b/doc/man/cockpit-desktop.xml index eb3b4abc58a..a5a10a04ada 100644 --- a/doc/man/cockpit-desktop.xml +++ b/doc/man/cockpit-desktop.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> diff --git a/doc/man/cockpit-tls.xml b/doc/man/cockpit-tls.xml index b1b19d61d10..f40e8afef5e 100644 --- a/doc/man/cockpit-tls.xml +++ b/doc/man/cockpit-tls.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> diff --git a/doc/man/cockpit-ws.xml b/doc/man/cockpit-ws.xml index 0031d2dff76..543f62e97cb 100644 --- a/doc/man/cockpit-ws.xml +++ b/doc/man/cockpit-ws.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> diff --git a/doc/man/cockpit.conf.xml b/doc/man/cockpit.conf.xml index f27257190cf..baf2c7d6f43 100644 --- a/doc/man/cockpit.conf.xml +++ b/doc/man/cockpit.conf.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> @@ -108,7 +108,8 @@ ForwardedForHeader = X-Forwarded-For on the login screen is visible and allows logging into another server. When set to false, direct remote logins are disallowed. If this option is not specified then it will be automatically detected based on whether the - cockpit-ssh process is available or not. + cockpit-bridge package is installed and the ssh + program is available. If cockpit-ws is exposed to the public internet, and also has access to a private internal network, it is recommended to explicitly set LoginTo=false. This prevents @@ -123,6 +124,23 @@ ForwardedForHeader = X-Forwarded-For Connect to option to specify the host to log into. + + + + + When set to true, cockpit will allow + users to connect to multiple hosts in one session. The + default is OS specific. + + + When connecting to multiple servers, JavaScript runs + without isolation. All systems will be vulnerable to + potential attacks from other connected hosts. Enable this + option only when all hosts are + trusted. + + + Same as the sshd configuration option by the same name. diff --git a/doc/man/cockpit.xml b/doc/man/cockpit.xml index d615e227b71..8fd2d6d9e98 100644 --- a/doc/man/cockpit.xml +++ b/doc/man/cockpit.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> diff --git a/doc/man/pam_ssh_add.xml b/doc/man/pam_ssh_add.xml index 665043a92fd..934d8fabbdc 100644 --- a/doc/man/pam_ssh_add.xml +++ b/doc/man/pam_ssh_add.xml @@ -16,7 +16,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License - along with Cockpit; If not, see . + along with Cockpit; If not, see . --> diff --git a/doc/protocol.md b/doc/protocol.md index 91d23d0bc9b..e63ecd90240 100644 --- a/doc/protocol.md +++ b/doc/protocol.md @@ -55,11 +55,13 @@ Control messages are always sent in the control channel. They always have an empty channel. The payload of a control message is always a json object. There is always a "command" field: - { - "command": , - "channel": , - ... - } +``` +{ + "command": , + "channel": , + ... +} +``` If a control message pertains to a specific channel it has a "channel" field containing the id of the channel. It is invalid to have a present but empty @@ -112,6 +114,8 @@ The following fields are defined: * "capabilities": Optional, array of capability strings required from the bridge * "session": Optional, set to "private" or "shared". Defaults to "shared" * "flow-control": Optional boolean whether the channel should throttle itself via flow control. + * "send-acks": Set to "bytes" to send "ack" messages after processing each data frame + If "binary" is set to "raw" then this channel transfers binary messages. @@ -123,12 +127,14 @@ The channel id must not already be in use by another channel. An example of an open: - { - "command": "open", - "channel": "a4", - "payload": "stream", - "host": "localhost" - } +```json +{ + "command": "open", + "channel": "a4", + "payload": "stream", + "host": "localhost" +} +``` This message is sent to the cockpit-bridge. @@ -162,36 +168,45 @@ channels in the "fence" group are closed before resuming. The "flow-control" option controls whether a channel should attempt to throttle itself via flow control when sending or receiving large amounts of data. The current default (when this option is not provided) is to not do flow control. -However, this default will likely change in the future. +However, this default will likely change in the future. This only impacts data +sent by the bridge to the browser. + +If "send-acks" is set to "bytes" then the bridge will send acknowledgement +messages detailing the number of payload bytes that it has received and +processed. This mechanism is provided for senders (ie: in the browser) who +wish to throttle the data that they're sending to the bridge. **Host values** Because the host parameter is how cockpit maps url requests to the correct bridge, cockpit may need to additional information to route the message correctly. -For example you want to connect to a container ```my-container``` running +For example you want to connect to a container `my-container` running on "my.host". To allow this the host parameter can encode a key/value pair that will be expanded in the open command json. The format is host+key+value. For example - { - "command": "open", - "channel": "a4", - "payload": "stream", - "host": "my.host+container+my-container" - } +```json +{ + "command": "open", + "channel": "a4", + "payload": "stream", + "host": "my.host+container+my-container" +} +``` will be expanded to - { - "command": "open", - "channel": "a4", - "payload": "stream", - "host": "my.host", - "host-container": "my-container", - "host": "my.host" - } - +```json +{ + "command": "open", + "channel": "a4", + "payload": "stream", + "host": "my.host", + "host-container": "my-container", + "host": "my.host" +} +``` Command: close -------------- @@ -205,11 +220,13 @@ The following fields are defined: The channel id must be set. An example of a close: - { - "command": "close", - "channel" : "5x", - "problem": "access-denied" - } +```json +{ + "command": "close", + "channel" : "5x", + "problem": "access-denied" +} +``` Any protocol participant can send this message. The cockpit-bridge and cockpit-ws backends will send this message when a channel closes whether because of an @@ -228,9 +245,11 @@ The "auth-method-results" object contains a key for each method that cockpit-ws is able to attempt authentication with as well as the result of the attempt. For example: - { - "password": "denied" - } +```json +{ + "password": "denied" +} +``` This possible "result" values are: @@ -296,9 +315,11 @@ is set this "ping" will be forwarded. Otherwise it be limited to a single hop. An example of a ping: - { - "command": "ping", - } +```json +{ +"command": "ping" +} +``` Any protocol participant can send a "ping". It is responded to by sending a "pong" with identical options as a reply. If a "ping" is sent with a @@ -323,27 +344,33 @@ For challenge/response authentication, the following fields are defined: Example authorize challenge and response messages: - { - "command": "authorize", - "cookie": "555", - "challenge": "crypt1:74657374:$6$2rcph,noe92ot..." - } - - { - "command": "authorize", - "cookie": "555", - "response": "crypt1:$6$r0oetn2039ntoen..." - } +```json +{ + "command": "authorize", + "cookie": "555", + "challenge": "crypt1:74657374:$6$2rcph,noe92ot..." +} +``` + +```json +{ + "command": "authorize", + "cookie": "555", + "response": "crypt1:$6$r0oetn2039ntoen..." +} +``` Authorize messages are used during authentication by authentication -commands (ei: cockpit-session, cockpit-ssh) to obtain the users credentials +commands like `cockpit-session` to obtain the users credentials from cockpit-ws. An authentication command can send a authorize message with a response but no cookie. For example - { - "command": "authorize", - "response": "Basic ..." - } +```json +{ + "command": "authorize", + "response": "Basic ..." +} +``` In that case cockpit-ws will store the response and use it in a reply to a subsequent challenge. @@ -430,10 +457,12 @@ is received, then the channel is closed with a "protocol-error". Method calls are a JSON object with a "call" field, whose value is an array, with parameters in this order: path, interface, method, in arguments. - { - "call": [ "/path", "org.Interface", "Method", [ "arg0", 1, "arg2" ] ], - "id": "cookie" - } +```json +{ + "call": [ "/path", "org.Interface", "Method", [ "arg0", 1, "arg2" ] ], + "id": "cookie" +} +``` All the various parameters must be valid for their use. arguments may be null if no DBus method call body is expected. If a "type" field is specified @@ -460,26 +489,30 @@ Method reply messages are JSON objects with a "reply" field whose value is an array, the array contains another array of out arguments, or null if the DBus reply had no body. - { - "reply": [ [ "arg0", 1, 2 ] ], - "id": "cookie" - } +```json +{ + "reply": [ [ "arg0", 1, 2 ] ], + "id": "cookie" +} +``` If the call had a "type" field, then the reply will have one too containing the DBus type signature of the arguments. If a "flags" field was present on the call, then "flags" will also be present on the reply. Valid out flags are: - * ">": Big endian message - * "<": Little endian message + * `>`: Big endian message + * `<`: Little endian message An error message is JSON object with an "error" field whose value is an array. The array contains: error name, error arguments - { - "error": [ "org.Error", [ "Usually a message" ] ] - "id": "cookie" - } +```json +{ + "error": [ "org.Error", [ "Usually a message" ] ], + "id": "cookie" +} +``` To receive signals you must subscribe to them. This is done by sending a "add-match" message. It contains various fields to match on. If a field @@ -498,15 +531,17 @@ given string as their first argument. If any of the values are not valid according to the dbus specification, the channel will close with a "protocol-error". - { - "add-match": { - "name": "org.the.Name", - "path": "/the/path", - "interface": "org.Interface", - "member": "SignalName", - "arg0": "first argument", - } +```json +{ + "add-match": { + "name": "org.the.Name", + "path": "/the/path", + "interface": "org.Interface", + "member": "SignalName", + "arg0": "first argument" } +} +``` If the "name" field is omitted, it will be populated from the "open" message. If no "name" was specified in the "open" message, then DBus messages from any @@ -518,31 +553,37 @@ times before the signals are actually unsubscribed. The form of "remove-match" is identical to "add-match". - { - "remove-match": { - "name": "org.the.Name", - "path": "/the/path", - "interface": "org.Interface", - "member": "SignalName", - "arg0": "first argument", - } +```json +{ + "remove-match": { + "name": "org.the.Name", + "path": "/the/path", + "interface": "org.Interface", + "member": "SignalName", + "arg0": "first argument" } +} +``` Signals are sent in JSON objects that have a "signal" field, which is an array of parameters: path, interface, signal name, and arguments. arguments may be null if the DBus signal had no body. - { - "signal": [ "/the/path", "org.Interface", "SignalName", [ "arg0", 1, 2 ] ] - } +```json +{ + "signal": [ "/the/path", "org.Interface", "SignalName", [ "arg0", 1, 2 ] ] +} +``` If a signal message is sent to the bridge, the signal will be emitted. In addition a "destination" field may be present to indicate whether the signal should be broadcast or not. - { - "signal": [ "/the/path", "org.Interface", "SignalName", [ "arg0", 1, 2 ] ] - } +```json +{ + "signal": [ "/the/path", "org.Interface", "SignalName", [ "arg0", 1, 2 ] ] +} +``` If the bus name of the sender of the signal does not match the "name" field of the "open" message, then a "name" field will be included with the "signal" message. @@ -555,23 +596,27 @@ interfaces, otherwise DBus introspection is used. The "id" field is optional, if present a "reply" will be sent with this same "id" when the watch has sent "notify" messages about the things being watched. - { - "watch": { - "name": "org.the.Name", - "path": "/the/path/to/watch", - "interface": org.Interface - } - "id": 5 - } +```json +{ + "watch": { + "name": "org.the.Name", + "path": "/the/path/to/watch", + "interface": "org.Interface" + }, + "id": 5 +} +``` To remove a watch, pass the identical parameters with an "unwatch" request. - { - "unwatch": { - "path": "/the/path/to/watch" - } +```json +{ + "unwatch": { + "path": "/the/path/to/watch" } +} +``` If the "name" field is omitted, it will be populated from the "open" message. Either a "name" field must be specified here or in the "open" message. @@ -581,20 +626,22 @@ addition of interfaces without properties, which will be an empty interface object, or interfaces removed, which will be null. Only the changes since the last "notify" message will be sent. - { - "notify": { - "/a/path": { - "org.Interface1": { - "Prop1": x, - "Prop2": y - }, - "org.Interface2": { } +```json +{ + "notify": { + "/a/path": { + "org.Interface1": { + "Prop1": "x", + "Prop2": 1 }, - "/another/path": { - "org.Removed": null - } + "org.Interface2": { } + }, + "/another/path": { + "org.Removed": null } } +} +``` If the bus name of the sender of the signal does not match the "name" field of the "open" message, then a "name" field will be included with the "notify" message. @@ -604,20 +651,22 @@ first time an interface is sent using a "notify" message, a "meta" will be sent with that interface introspection info. Additional fields will be defined here, but this is it for now. - { - "meta": { - "org.Interface": { - "methods": { - "Method1": { }, - "Method2": { } - }, - "properties": { - "Prop1": { "flags": "rw" }, - "Prop2": { "flags": "r" } - } +```json +{ + "meta": { + "org.Interface": { + "methods": { + "Method1": { }, + "Method2": { } + }, + "properties": { + "Prop1": { "flags": "rw" }, + "Prop2": { "flags": "r" } } } } +} +``` If the bus name of the sender of the signal does not match the "name" field of the "open" message, then a "name" field will be included with the "meta" message. @@ -628,9 +677,11 @@ When the owner of the DBus "name" (specified in the open message) changes an "ow message is sent. The owner value will be the id of the owner or null if the name is unowned. - { - "owner": "1:" - } +```json +{ + "owner": "1:" +} +``` A "publish" message can be used to export DBus interfaces on the bus. The bridge will then send "call" messages back to the frontend for each method invocation @@ -639,17 +690,21 @@ with DBus meta information. If a cookie is specified then a reply will be sent when the interface is published. If the interface is already published at the given path, it will be replaced. - { - "publish": [ "/a/path", "org.Interface" ], - "id": "cookie" - } +```json +{ + "publish": [ "/a/path", "org.Interface" ], + "id": "cookie" +} +``` An "unpublish" message will unexport a DBus interface on the bus. It is not an error if no such interface has been published. - { - "unpublish": [ "/a/path", "org.Interface" ], - } +```json +{ + "unpublish": [ "/a/path", "org.Interface" ] +} +``` DBus types are encoded in various places in these messages, such as the arguments. These types are encoded as follows: @@ -666,10 +721,12 @@ arguments. These types are encoded as follows: * variant: encoded as a JSON object with a "v" field containing a value and a "t" field containing a DBus type signature. - { - "v": "value", - "t": "s" - } +```json +{ + "v": "value", + "t": "s" +} +``` Payload: http-stream2 --------------------- @@ -981,36 +1038,6 @@ fields: * "type": If the event was created this contains the type of the new file. Will be one of: file, directory, link, special or unknown. -In case of an error, the channel will be closed. In addition to the -usual "problem" field, the "close" control message sent by the server -might have the following additional fields: - - * "message": A string in the current locale describing the error. - -Payload: fslist1 ---------------- - -A channel of this type lists the files in a directory and will watch -for further changes. - -The following options can be specified in the "open" control message: - - * "path": The path name of the directory to watch. This should be an - absolute path. - * "watch": Boolean, when true the directory will be watched and signal - on changes. Defaults to "true" - -The channel will send a number of JSON messages that list the current -content of the directory. These messages have a "event" field with -value "present", a "path" field that holds the (relative) name of -the file, "owner", "group", "size" and "modified" (timestamp) fields with -some basic file information, and a "type" field. Type will be one of: -file, directory, link, special or unknown. After all files have been listed the -"ready" control message will be sent. - -Other messages on the stream signal changes to the directory, in the -same format as used by the "fswatch1" payload type. - In case of an error, the channel will be closed. In addition to the usual "problem" field, the "close" control message sent by the server might have the following additional fields: @@ -1061,6 +1088,13 @@ The following options can be specified in the "open" control message: * "path": The path name of the file to replace. + * "size": The expected size of the file content. If set, the file is + allocated immediately, and the channel "open" request will fail if + insufficient space is available. If this option is set, it is not + possible to delete the file (ie: sending immediate EOF will result in + a 0 byte file) This option should always be provided if possible, to + avoid fragmentation, but is particularly important for large files. + * "tag": The expected transaction tag of the file. When the actual transaction tag of the file is different, the write will fail. If you don't set this field, the actual tag will not be checked. To @@ -1069,9 +1103,15 @@ The following options can be specified in the "open" control message: You should write the new content to the channel as one or more messages. To indicate the end of the content, send a "done" message. -If you don't send any content messages before sending "done", the file -will be removed. To create an empty file, send at least one content -message of length zero. +If you don't send any content messages before sending "done", and no +"size" was given, the file will be removed. To create an empty file, +send at least one content message of length zero, or set the "size" to +0. + +If "size" is given, and less data is actually sent, then the file will +be truncated down to the size of the data that was actually sent. If +more data is sent, the file will grow (subject to additional +fragmentation and potential ENOSPC errors). When the file does not have the expected tag, the channel will be closed with a "change-conflict" problem code. @@ -1081,6 +1121,10 @@ content will be replaced with a "rename" syscall when the channel is closed without problem code. If the channel is closed with a problem code (by either client or server), the file will be left untouched. +If `tag` is given, file owner and mode are preserved (copied from the +original file). Other attributes (like ACLs or locally modified SELinux +context) are never copied. + In addition to the usual "problem" field, the "close" control message sent by the server might have the following additional fields: @@ -1117,7 +1161,7 @@ The general open options are: * "direct": PCP metrics from plugins that are loaded into the Cockpit bridge directly. Use this when in doubt. - * "pcmd": PCP metrics from the local PCP daemon. + * "pmcd": PCP metrics from the local PCP daemon. * A string starting with "/": PCP metrics from one or more archives. @@ -1163,12 +1207,16 @@ The general open options are: You specify the desired metrics as an array of objects, where each object describes one metric. For example: - [ { name: "kernel.all.cpu.user", - units: "millisec", - derive: "rate" - }, - ... - ] +```json +[ + { + "name": "kernel.all.cpu.user", + "units": "millisec", + "derive": "rate" + }, + ... +] +``` A metric description can contain the following fields: @@ -1231,6 +1279,7 @@ message, and more fields might be present in the objects of the The 'data' messages are nested arrays in this shape: +``` [ // first point in time [ // first metric (instanced, with two instances) @@ -1250,10 +1299,11 @@ The 'data' messages are nested arrays in this shape: // same shape again as for the first point in time ] ] +``` Thus, a 'data' message contains data for one or more points in time where samples have been taken. A point in time is always one -"interval" later than the previous point in time, even when they are +`interval` later than the previous point in time, even when they are reported in the same 'data' message. For real time monitoring, you will generally only receive one point in @@ -1262,12 +1312,12 @@ might report multiple points in time in one message, to improve efficiency. For each point in time, there is an array with samples for each -metric, in the same order as the "metrics" option used when opening +metric, in the same order as the `metrics` option used when opening the channel. For non-instanced metrics, the array contains the value of the metric. For instanced metrics, the array contains another array with samples -for each instance, in the same order as reported in the "instances" +for each instance, in the same order as reported in the `instances` field of the most recent 'meta' message. In order to gain efficiency, 'data' messages are usually compressed. @@ -1275,8 +1325,8 @@ This is done by only transmitting the differences from one point in time to the next. If a value for a metric or a instance is the same as at the previous -point in time, the channel transmits a "null" value instead. -Additionally, "null" values at the end of an array are suppressed by +point in time, the channel transmits a `null` value instead. +Additionally, `null` values at the end of an array are suppressed by transmitting a shorter array. For example, say the samples for three points in time are @@ -1295,8 +1345,8 @@ This compression only happens when the last and current value belong to the same instance of the same metric. Thus, the client does not need to track layout changes when decompressing data messages. -Instead of a number of "null", a data message can also contain -"false". This indicates an error of some kind, or an unavailable +Instead of a number of `null`, a data message can also contain +`false`. This indicates an error of some kind, or an unavailable value. **PCP metric source** @@ -1311,11 +1361,11 @@ The format of the "units" member is the same as the one used by The metric information objects in the 'meta' messages for PCP sources also contain these fields: - * "semantics" (string): The semantics of this metric, one of - "counter", "instant", or "discrete". + * `semantics` (string): The semantics of this metric, one of + `counter`, `instant`, or `discrete`. Only numeric metrics are currently supported. Non-numeric metrics -have all their samples set to "false". +have all their samples set to `false`. Problem codes ------------- @@ -1323,15 +1373,15 @@ Problem codes These are problem codes for errors that cockpit-web responds to. They should be self explanatory. It's totally not interesting to arbitrarily invent new codes. Instead the web needs to be ready to react to these problems. When in -doubt use "internal-error". - - * "internal-error" - * "no-cockpit" - * "no-session" - * "access-denied" - * "authentication-failed" - * "not-found" - * "terminated" - * "timeout" - * "unknown-hostkey" - * "no-forwarding" +doubt use `internal-error`. + + * `internal-error` + * `no-cockpit` + * `no-session` + * `access-denied` + * `authentication-failed` + * `not-found` + * `terminated` + * `timeout` + * `unknown-hostkey` + * `no-forwarding` diff --git a/doc/urls.md b/doc/urls.md index 914f82a4e01..a53ecf9c81d 100644 --- a/doc/urls.md +++ b/doc/urls.md @@ -7,41 +7,41 @@ and their characteristics. This doesn't apply to the visible URLs shown in the main Cockpit address bar. Cockpit URLs are based on an application. A valid application name is -either the word ```cockpit``` or a string that begins with ```cockpit+``` -for example ```cockpit+application-name```. Each of the following URLs -are valid for any application, just replace ```/cockpit/``` with ```/cockpit+application-name/``` +either the word `cockpit` or a string that begins with `cockpit+` +for example `cockpit+application-name`. Each of the following URLs +are valid for any application, just replace `/cockpit/` with `/cockpit+application-name/` - * ```/cockpit/static``` static files available without authentication. Files + * `/cockpit/static` static files available without authentication. Files are cached for as long as possible, and names *must* change when the contents of the file changes. The exception to this is when the application refers to a different machine. In that case the user must be authenticated to serve those files and the cache varies on cookie. - * ```/cockpit/login``` authenticates a user and sets cookie based on application + * `/cockpit/login` authenticates a user and sets cookie based on application name. - * ```/cockpit/$xxxxxxxxxxxxxxx/package/path/to/file.ext``` are files which + * `/cockpit/$xxxxxxxxxxxxxxx/package/path/to/file.ext` are files which are cached by packages for as long as possible. The checksum changes when any of the packages on a system change. Only available after authentication. - * ```/cockpit/$xxxxxxxxxxxxxxx/*/path/to/file.ext``` are concatenated files from + * `/cockpit/$xxxxxxxxxxxxxxx/*/path/to/file.ext` are concatenated files from packages, as seen above. - * ```/cockpit/@host/package/path/to/file.ext``` are files from packages (on + * `/cockpit/@host/package/path/to/file.ext` are files from packages (on specific hosts) that are not cached. Only available after authentication. - * ```/cockpit/@host/*/path/to/file.ext``` are concatenated files from + * `/cockpit/@host/*/path/to/file.ext` are concatenated files from packages (on specific hosts). They are served uncompressed. - * ```/cockpit/@host/manifests.json``` includes a summary of all the manifest + * `/cockpit/@host/manifests.json` includes a summary of all the manifest files from all the packages - * ```/cockpit/@host/manifests.js``` includes a summary of all the manifest + * `/cockpit/@host/manifests.js` includes a summary of all the manifest files from all the packages, as an AMD loadable module - * ```/cockpit/socket``` The main web socket + * `/cockpit/socket` The main web socket - * ```/cockpit/channel/csrftoken?query``` External channel URLs + * `/cockpit/channel/csrftoken?query` External channel URLs These are commonly used to wrap/proxy port access for Cockpit pages, as their JavaScript cannot talk to them directly due to Cockpit's strict @@ -53,27 +53,27 @@ are valid for any application, just replace ```/cockpit/``` with ```/cockpit+app When loading through cockpit-ws any URL that does not begin with an application will be handled by the shell (shell/index.html by default) -using the default application ```cockpit```. +using the default application `cockpit`. Direct to machine urls ====================== Cockpit-ws supports logging in directly to a remote machine, without first -authenticating on the machine that cockpit-ws is running on. A cockpit-ssh +authenticating on the machine that cockpit-ws is running on. A `cockpit-beiboot` processes is spawned that connects via SSH to the remote machine and all requests are proxied via that connection. -To use this feature the application name MUST begin with an ```=``` for -example ```/cockpit+=machine/socket``` will attempt to open a socket on -```machine``` ```/cockpit+machine/socket``` will attempt to open a socket +To use this feature the application name MUST begin with an `=` for +example `/cockpit+=machine/socket` will attempt to open a socket on +`machine` `/cockpit+machine/socket` will attempt to open a socket on localhost. When loading through cockpit-ws any URL that does not begin with an application will be handled by the shell (shell/index.html by default) -using the default application ```cockpit```. +using the default application `cockpit`. -In addition any url that begins with ```/=``` will attempt to load +In addition any url that begins with `/=` will attempt to load the shell from the specified machine. For example a URL of -```/=machine/system``` will attempt to load ```shell/index.html``` -from ```machine``` using the application ```cockpit+machine```. +`/=machine/system` will attempt to load `shell/index.html` +from `machine` using the application `cockpit+machine`. diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md deleted file mode 100644 index f246c1cac30..00000000000 --- a/examples/kubernetes/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Some Kubernetes examples - -If you're using this with the openshift image in bots/images then you'll -need to run the following to access services: - - $ sudo ip route add 172.30.0.0/16 via 10.111.112.101 - -This image is preloaded with the docker images needed to run the cockpit -integration tests. diff --git a/examples/kubernetes/build-manifest.js b/examples/kubernetes/build-manifest.js deleted file mode 100644 index b911b0c2766..00000000000 --- a/examples/kubernetes/build-manifest.js +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/node - -const fs = require('fs'); - -let input; - -function fatal(message, code) { - console.log((input || "build-manifest") + ": " + message); - process.exit(code || 1); -} - -if (process.argv.length < 3) { - console.log("usage: build-manifest file ..."); - process.exit(2); -} - -const files = process.argv.slice(2); - -const manifest = { - kind: "List", - apiVersion: "v1beta3", - items: [] -}; - -function step() { - if (files.length == 0) { - input = null; - process.stdout.write(JSON.stringify(manifest, null, 4)); - process.exit(0); - } - - input = files.shift(); - - fs.readFile(input, { encoding: "utf-8" }, function(err, data) { - if (err) - fatal(err.message); - let item; - try { - item = JSON.parse(data); - } catch (ex) { - fatal(ex.message); - } - manifest.items.push(item); - step(); - }); -} - -step(); diff --git a/examples/kubernetes/default_namespace.json b/examples/kubernetes/default_namespace.json deleted file mode 100644 index bc7d2a1658f..00000000000 --- a/examples/kubernetes/default_namespace.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "apiVersion" : "v1", - "kind" : "Namespace", - "metadata" : { - "name": "default" - } -} - diff --git a/examples/kubernetes/empty-service.json b/examples/kubernetes/empty-service.json deleted file mode 100644 index 269a8a778af..00000000000 --- a/examples/kubernetes/empty-service.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "kind": "Service", - "apiVersion": "v1", - - "metadata": { - "name": "myservice", - "namespace": "example", - "labels": { - "test": "myservice" - } - }, - "spec": { - "ports": [{ - "name": "", - "port": 7777, - "protocol": "TCP", - "targetPort": 7777 - }], - "sessionAffinity": "None" - } -} diff --git a/examples/kubernetes/k8s-guestbook-app.json b/examples/kubernetes/k8s-guestbook-app.json deleted file mode 100644 index 889bb22c042..00000000000 --- a/examples/kubernetes/k8s-guestbook-app.json +++ /dev/null @@ -1,178 +0,0 @@ -{ - "kind": "List", - "apiVersion": "v1", - "items": [ - - { - "kind": "Pod", - "apiVersion": "v1", - "metadata": { - "name": "redis-master", - "labels": { - "name": "redis-master" - } - }, - "spec": { - "containers": [ - { - "name": "master", - "image": "stefwalter/redis:latest", - "ports": [ - { - "containerPort": 6379, - "protocol": "TCP" - } - ] - } - ] - } - }, - - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "redis-master", - "labels": { - "name": "redis-master" - } - }, - "spec": { - "ports": [ - { - "port": 6379, - "targetPort": 6379, - "protocol": "TCP" - } - ], - "selector":{ - "name": "redis-master" - } - } - }, - - { - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "redis-slave", - "labels": { - "name": "redis-slave" - } - }, - "spec": { - "replicas": 2, - "selector": { - "name": "redis-slave" - }, - "template": { - "metadata": { - "labels": { - "name": "redis-slave" - } - }, - "spec": { - "containers": [ - { - "name": "slave", - "image": "stefwalter/redis-slave:v2", - "ports": [ - { - "containerPort": 6379, - "protocol": "TCP" - } - ] - } - ] - } - } - } - }, - - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "redis-slave", - "labels": { - "name": "redis-slave" - } - }, - "spec": { - "ports": [ - { - "port": 6379, - "targetPort": 6379, - "protocol": "TCP" - } - ], - "selector": { - "name": "redis-slave" - } - } - }, - - { - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "frontend", - "labels": { - "name": "frontend" - } - }, - "spec": { - "replicas": 3, - "selector": { - "name": "frontend" - }, - "template": { - "metadata": { - "labels": { - "name":"frontend" - } - }, - "spec": { - "containers": [ - { - "name": "php-redis", - "image": "stefwalter/example-guestbook-php-redis:v2", - "ports": [ - { - "containerPort": 80, - "protocol": "TCP" - } - ] - } - ] - } - } - } - }, - - { - "kind": "Service", - "apiVersion": "v1", - "metadata": { - "name": "frontend", - "labels": { - "name": "frontend" - } - }, - "spec": { - "ports": [ - { - "port":80, - "targetPort":80, - "protocol":"TCP" - } - ], - "selector":{ - "name":"frontend" - } - } - } - - ] -} - diff --git a/examples/kubernetes/k8s-sample-app.json b/examples/kubernetes/k8s-sample-app.json deleted file mode 100644 index ae654af58b2..00000000000 --- a/examples/kubernetes/k8s-sample-app.json +++ /dev/null @@ -1,167 +0,0 @@ -{ - "kind":"List", - "apiVersion":"v1", - "items":[ - { - "kind":"ReplicationController", - "apiVersion":"v1", - "metadata":{ - "name":"database", - "labels":{ - "name":"database", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "replicas":1, - "selector":{ - "name":"database", - "template":"ruby-helloworld-sample" - }, - "template":{ - "metadata":{ - "labels":{ - "name":"database", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "containers":[ - { - "name":"ruby-helloworld-database", - "image":"mysql", - "ports":[ - { - "containerPort":3306, - "protocol":"TCP" - } - ], - "env":[ - { - "name":"MYSQL_ROOT_PASSWORD", - "key":"MYSQL_ROOT_PASSWORD", - "value":"rQHfVnTo" - }, - { - "name":"MYSQL_DATABASE", - "key":"MYSQL_DATABASE", - "value":"root" - } - ] - } - ] - } - } - } - }, - { - "kind":"ReplicationController", - "apiVersion":"v1", - "metadata":{ - "name":"frontend", - "labels":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "replicas":1, - "selector":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - }, - "template":{ - "metadata":{ - "labels":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "containers":[ - { - "name":"ruby-helloworld", - "image":"openshift/ruby-hello-world", - "ports":[ - { - "containerPort":8080, - "protocol":"TCP" - } - ], - "env":[ - { - "name":"ADMIN_USERNAME", - "key":"ADMIN_USERNAME", - "value":"admin6TM" - }, - { - "name":"ADMIN_PASSWORD", - "key":"ADMIN_PASSWORD", - "value":"xImx1tHR" - }, - { - "name":"MYSQL_ROOT_PASSWORD", - "key":"MYSQL_ROOT_PASSWORD", - "value":"rQHfVnTo" - }, - { - "name":"MYSQL_DATABASE", - "key":"MYSQL_DATABASE", - "value":"root" - } - ], - "capabilities":{ - - } - } - ] - } - } - } - }, - { - "kind":"Service", - "apiVersion":"v1", - "metadata":{ - "name":"database", - "labels":{ - "name":"database", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "ports": [{ - "name": "", - "protocol": "TCP", - "port": 5434, - "targetPort": 3306 - }], - "selector":{ - "name":"database" - } - } - }, - { - "kind":"Service", - "apiVersion":"v1", - "metadata":{ - "name":"frontend", - "labels":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "ports": [{ - "name": "", - "protocol": "TCP", - "port": 80, - "targetPort": 8080 - }], - "selector":{ - "name":"frontend" - } - } - } - ] -} diff --git a/examples/kubernetes/k8s-sample-multipod.json b/examples/kubernetes/k8s-sample-multipod.json deleted file mode 100644 index 0f5dd0ed2ba..00000000000 --- a/examples/kubernetes/k8s-sample-multipod.json +++ /dev/null @@ -1,177 +0,0 @@ -{ - "kind":"List", - "apiVersion":"v1", - "items":[ - { - "kind":"ReplicationController", - "apiVersion":"v1", - "metadata":{ - "name":"database", - "labels":{ - "name":"database", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "replicas":1, - "selector":{ - "name":"database", - "template":"ruby-helloworld-sample" - }, - "template":{ - "metadata":{ - "labels":{ - "name":"database", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "containers":[ - { - "name":"ruby-helloworld-database", - "image":"mysql", - "ports":[ - { - "containerPort":3306, - "protocol":"TCP" - } - ], - "env":[ - { - "name":"MYSQL_ROOT_PASSWORD", - "key":"MYSQL_ROOT_PASSWORD", - "value":"rQHfVnTo" - }, - { - "name":"MYSQL_DATABASE", - "key":"MYSQL_DATABASE", - "value":"root" - } - ] - } - ] - } - } - } - }, - { - "kind":"ReplicationController", - "apiVersion":"v1", - "metadata":{ - "name":"frontend", - "labels":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "replicas":1, - "selector":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - }, - "template":{ - "metadata":{ - "labels":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "containers":[ - { - "name":"ruby-helloworld", - "image":"openshift/ruby-hello-world", - "ports":[ - { - "containerPort":80, - "protocol":"TCP" - } - ], - "env" : [ - { - "name":"ADMIN_USERNAME", - "key":"ADMIN_USERNAME", - "value":"admin6TM" - }, - { - "name":"ADMIN_PASSWORD", - "key":"ADMIN_PASSWORD", - "value":"xImx1tHR" - }, - { - "name":"MYSQL_ROOT_PASSWORD", - "key":"MYSQL_ROOT_PASSWORD", - "value":"rQHfVnTo" - }, - { - "name":"MYSQL_DATABASE", - "key":"MYSQL_DATABASE", - "value":"root" - } - ], - "capabilities" : { } - }, - { - "name" : "apache-unused", - "image" : "fedora/apache", - "ports" : [ - { - "containerPort":8080, - "protocol":"TCP" - } - ], - "env" : [ ], - "capabilities" : { } - } - ] - } - } - } - }, - { - "kind":"Service", - "apiVersion":"v1", - "metadata":{ - "name":"database", - "labels":{ - "name":"database", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "ports": [{ - "name": "", - "protocol": "TCP", - "port": 80, - "targetPort": 80 - }], - "selector":{ - "name":"database" - } - } - }, - { - "kind":"Service", - "apiVersion":"v1", - "metadata":{ - "name":"frontend", - "labels":{ - "name":"frontend", - "template":"ruby-helloworld-sample" - } - }, - "spec":{ - "ports": [{ - "name": "", - "protocol": "TCP", - "port": 5432, - "targetPort": 8080 - }], - "selector":{ - "name":"frontend" - } - } - } - ] -} diff --git a/examples/kubernetes/kube2sky.json b/examples/kubernetes/kube2sky.json deleted file mode 100644 index f7e4f2b6353..00000000000 --- a/examples/kubernetes/kube2sky.json +++ /dev/null @@ -1,157 +0,0 @@ -{ - "kind": "List", - "apiVersion": "v1", - "items": [ - - { - "kind": "Secret", - "metadata": { - "name": "kubeconfig" - }, - "data": { - "kubeconfig": "ewoJImFwaVZlcnNpb24iOiAidjEiLAoJImtpbmQiOiAiQ29uZmlnIiwKCSJjbHVzdGVycyI6IFsKCQl7CgkJCSJuYW1lIjogImt1YmVybmV0ZXMiLAoJCQkiY2x1c3RlciI6IHsKImluc2VjdXJlLXNraXAtdGxzLXZlcmlmeSI6IHRydWUsCgkJCQkic2VydmVyIjogImh0dHA6Ly9rdWJlcm5ldGVzOjgwODAiCgkJCX0KCQl9CgldLAoJImNvbnRleHRzIjogWwoJCXsKCQkJIm5hbWUiOiAia3ViZXJuZXRlcyIsCgkJCSJjb250ZXh0IjogewoJCQkJImNsdXN0ZXIiOiAia3ViZXJuZXRlcyIsCgkJCQkibmFtZXNwYWNlIjogImRlZmF1bHQiCgkJCX0KCQl9CgldLAoJImN1cnJlbnQtY29udGV4dCI6ICJrdWJlcm5ldGVzIgp9Cg==" - } -}, - { - "kind": "Service", - "metadata": { - "name": "kubernetes-dns", - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "kubernetes.io/name": "KubeDNS" - }, - "annotations": { - "kubernetes.io/cluster-domain": "cluster.local" - } - }, - "spec": { - "ports": [ - { - "name": "dns", - "protocol": "UDP", - "port": 53, - "targetPort": 53, - "nodePort": 0 - }, - { - "name": "dns-tcp", - "protocol": "TCP", - "port": 53, - "targetPort": 53, - "nodePort": 0 - } - ], - "selector": { - "k8s-app": "kube-dns" - }, - "type": "ClusterIP", - "sessionAffinity": "None" - }, - "status": { - "loadBalancer": {} - } - }, - - - - { - "kind": "ReplicationController", - "metadata": { - "name": "kubernetes-dns", - "labels": { - "k8s-app": "kube-dns-v3", - "kubernetes.io/cluster-service": "true" - } - }, - "spec": { - "replicas": 1, - "selector": { - "k8s-app": "kube-dns", - "version": "v3" - }, - "template": { - "metadata": { - "creationTimestamp": null, - "labels": { - "k8s-app": "kube-dns", - "kubernetes.io/cluster-service": "true", - "version": "v3" - } - }, - "spec": { - "containers": [ - { - "name": "etcd", - "image": "gcr.io/google_containers/etcd:2.0.9", - "command": [ - "/usr/local/bin/etcd", - "-listen-client-urls", - "http://127.0.0.1:2379,http://127.0.0.1:4001", - "-advertise-client-urls", - "http://127.0.0.1:2379,http://127.0.0.1:4001", - "-initial-cluster-token", - "skydns-etcd" - ] - }, - { - "name": "kube2sky", - "image": "gcr.io/google_containers/kube2sky:1.9", - "args": [ - "-domain=cluster.local", - "-kube_master_url=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT", - "-kubecfg_file=/var/run/kubernetes/kubeconfig" - ], - "volumeMounts": [{ - "mountPath": "/var/run/kubernetes", - "name": "kubeconfig", - "readOnly": true - }] - }, - { - "name": "skydns", - "image": "gcr.io/google_containers/skydns:2015-03-11-001", - "args": [ - "-machines=http://localhost:4001", - "-addr=0.0.0.0:53", - "-domain=cluster.local" - ], - "ports": [ - { - "name": "dns", - "containerPort": 53, - "protocol": "UDP" - }, - { - "name": "dns-tcp", - "containerPort": 53, - "protocol": "TCP" - } - ], - "livenessProbe": { - "exec": { - "command": [ - "/bin/sh", - "-c", - "nslookup kubernetes.default.cluster.local localhost \u003e/dev/null" - ] - }, - "initialDelaySeconds": 30, - "timeoutSeconds": 5 - } - } - ], - "restartPolicy": "Always", - "dnsPolicy": "Default", - "volumes": [{ - "name": "kubeconfig", - "secret": { - "secretName": "kubeconfig" - } - }] - } - } - } - } - -]} diff --git a/examples/kubernetes/nginx-pod.json b/examples/kubernetes/nginx-pod.json deleted file mode 100644 index 2ee222c4b73..00000000000 --- a/examples/kubernetes/nginx-pod.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "apiVersion": "v1", - "kind": "Pod", - "metadata": { - "name": "tiny-nginx", - "labels": { - "name": "tiny-nginx" - } - }, - "spec": { - "containers": [ - { - "name": "tiny-nginx", - "image": "fedora/nginx", - "ports": [ - { - "containerPort": 80, - "protocol": "TCP" - } - ] - } - ] - } -} - diff --git a/examples/kubernetes/nginx-rc.json b/examples/kubernetes/nginx-rc.json deleted file mode 100644 index d9ba8baaad9..00000000000 --- a/examples/kubernetes/nginx-rc.json +++ /dev/null @@ -1,35 +0,0 @@ -{ - "kind": "ReplicationController", - "apiVersion": "v1", - "metadata": { - "name": "tiny-nginx" - }, - "spec": { - "replicas": 2, - "selector": { - "name": "tiny-nginx" - }, - "template": { - "metadata": { - "labels": { - "name": "tiny-nginx" - } - }, - "spec": { - "containers": [ - { - "name": "tiny-nginx", - "image": "fedora/nginx", - "ports": [ - { - "containerPort": 80, - "protocol": "TCP" - } - ] - } - ] - } - } - } -} - diff --git a/examples/kubernetes/nginx-service.json b/examples/kubernetes/nginx-service.json deleted file mode 100644 index 1b7ef03d571..00000000000 --- a/examples/kubernetes/nginx-service.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "kind":"Service", - "apiVersion":"v1", - "metadata":{ - "name":"tiny-nginx" - }, - "spec":{ - "ports": [{ - "name": "", - "protocol": "TCP", - "port": 80, - "targetPort": 80 - }], - "selector":{ - "name":"tiny-nginx" - } - } -} diff --git a/examples/kubernetes/node.json b/examples/kubernetes/node.json deleted file mode 100644 index 90cadbb5f05..00000000000 --- a/examples/kubernetes/node.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "kind": "Node", - "apiVersion": "v1", - "metadata": { - "name": "192.168.124.4" - }, - "status": { - "capacity": { - "cpu": "3", - "memory": "1Gi" - } - } -} diff --git a/examples/long-running-process/long-running.css b/examples/long-running-process/long-running.css index dc751e3a0d9..0ffa543b424 100644 --- a/examples/long-running-process/long-running.css +++ b/examples/long-running-process/long-running.css @@ -1,10 +1,10 @@ pre { - background-color: #EEEEEE; - min-height: 5em; - max-height: 50em; + background-color: #EEE; + min-block-size: 5em; + max-block-size: 50em; overflow-y: scroll; } #command { - min-width: 50ex; + min-inline-size: 50ex; } diff --git a/files.js b/files.js index 8a5d234fd5c..d839c5252c9 100644 --- a/files.js +++ b/files.js @@ -1,4 +1,5 @@ -import path from 'path'; +import path from 'node:path'; +import process from 'node:process'; const info = { entries: [ @@ -24,6 +25,7 @@ const info = { "playground/preloaded.js", "playground/notifications-receiver.js", "playground/journal.jsx", + "playground/remote.tsx", "selinux/selinux.js", "shell/shell.js", @@ -42,44 +44,49 @@ const info = { ], tests: [ - "base1/test-base64", - "base1/test-browser-storage", - "base1/test-cache", - "base1/test-chan", - "base1/test-dbus-address", - "base1/test-dbus-framed", - "base1/test-dbus", - "base1/test-echo", - "base1/test-events", - "base1/test-external", - "base1/test-file", - "base1/test-format", - "base1/test-framed-cache", - "base1/test-framed", - "base1/test-http", - "base1/test-journal-renderer", - "base1/test-locale", - "base1/test-location", - "base1/test-metrics", - "base1/test-no-jquery", - "base1/test-permissions", - "base1/test-promise", - "base1/test-protocol", - "base1/test-series", - "base1/test-spawn-proc", - "base1/test-spawn", - "base1/test-stream", - "base1/test-user", - "base1/test-utf8", - "base1/test-websocket", - - "kdump/test-config-client", - - "networkmanager/test-utils", - - "shell/machines/test-machines", - - "storaged/test-util", + "base1/test-base64.js", + "base1/test-browser-storage.js", + "base1/test-cache.js", + "base1/test-chan.js", + "base1/test-channel.ts", + "base1/test-dbus-address.js", + "base1/test-dbus-framed.js", + "base1/test-dbus.js", + "base1/test-echo.js", + "base1/test-events.js", + "base1/test-external.js", + "base1/test-file.js", + "base1/test-format.ts", + "base1/test-framed-cache.js", + "base1/test-framed.js", + "base1/test-fsinfo.ts", + "base1/test-http.js", + "base1/test-journal-renderer.js", + "base1/test-locale.js", + "base1/test-location.js", + "base1/test-metrics.js", + "base1/test-no-jquery.js", + "base1/test-permissions.js", + "base1/test-promise.ts", + "base1/test-protocol.js", + "base1/test-series.js", + "base1/test-spawn-proc.js", + "base1/test-spawn.js", + "base1/test-stream.js", + "base1/test-timeformat.ts", + "base1/test-types.ts", + "base1/test-user.js", + "base1/test-websocket.js", + + "lib/test-path.ts", + + "kdump/test-config-client.js", + + "networkmanager/test-utils.js", + + "shell/machines/test-machines.js", + + "storaged/test-util.js", ], files: [ @@ -109,6 +116,7 @@ const info = { "playground/preloaded.html", "playground/notifications-receiver.html", "playground/journal.html", + "playground/remote.html", "selinux/index.html", diff --git a/node_modules b/node_modules index 585c5855a6f..8856ddc35a5 160000 --- a/node_modules +++ b/node_modules @@ -1 +1 @@ -Subproject commit 585c5855a6f02a5d0af106ffccf6cf1da1481421 +Subproject commit 8856ddc35a5df949cdf1cb816f87b683b6af8e52 diff --git a/package.json b/package.json index 2a290330272..37590810c5f 100644 --- a/package.json +++ b/package.json @@ -4,57 +4,61 @@ "type": "module", "private": true, "dependencies": { - "@patternfly/patternfly": "5.1.0", - "@patternfly/react-core": "5.1.2", - "@patternfly/react-icons": "5.1.2", - "@patternfly/react-styles": "5.1.2", - "@patternfly/react-table": "5.1.2", - "@patternfly/react-tokens": "5.1.2", - "deep-equal": "2.2.3", - "date-fns": "3.2.0", - "js-sha1": "0.6.0", - "js-sha256": "0.10.1", + "@patternfly/patternfly": "5.4.0", + "@patternfly/react-core": "5.4.0", + "@patternfly/react-icons": "5.4.0", + "@patternfly/react-styles": "5.4.0", + "@patternfly/react-table": "5.4.0", + "@patternfly/react-tokens": "5.4.0", + "dequal": "2.0.3", + "js-sha1": "0.7.0", + "js-sha256": "0.11.0", "json-stable-stringify-without-jsonify": "1.0.1", "prop-types": "15.8.1", - "react": "18.2.0", - "react-dom": "18.2.0", + "react": "18.3.1", + "react-dom": "18.3.1", "remarkable": "2.0.1", - "throttle-debounce": "5.0.0", - "uuid": "9.0.1", - "xterm": "5.3.0", - "xterm-addon-canvas": "0.5.0" + "throttle-debounce": "5.0.2", + "uuid": "10.0.0", + "@xterm/xterm": "5.5.0", + "@xterm/addon-canvas": "0.7.0" }, "devDependencies": { + "@types/qunit": "^2.19.10", + "@types/react": "18.3.10", + "@types/react-dom": "18.3.0", + "@typescript-eslint/eslint-plugin": "8.7.0", "argparse": "2.0.1", - "axe-core": "4.8.3", - "chrome-remote-interface": "0.33.0", - "esbuild": "0.19.11", + "esbuild": "0.24.0", "esbuild-plugin-copy": "2.1.1", "esbuild-plugin-replace": "1.4.0", - "esbuild-sass-plugin": "2.16.1", - "esbuild-wasm": "0.19.11", - "eslint": "8.56.0", + "esbuild-sass-plugin": "3.3.1", + "esbuild-wasm": "0.24.0", + "eslint": "8.57.1", "eslint-config-standard": "17.1.0", "eslint-config-standard-jsx": "11.0.0", "eslint-config-standard-react": "13.0.0", - "eslint-plugin-import": "2.29.1", - "eslint-plugin-jsx-a11y": "6.8.0", + "eslint-plugin-import": "2.30.0", + "eslint-plugin-jsx-a11y": "6.10.0", "eslint-plugin-node": "11.1.0", - "eslint-plugin-promise": "6.1.1", - "eslint-plugin-react": "7.33.2", - "eslint-plugin-react-hooks": "4.6.0", - "gettext-parser": "7.0.1", + "eslint-plugin-promise": "6.6.0", + "eslint-plugin-react": "7.37.0", + "eslint-plugin-react-hooks": "4.6.2", + "gettext-parser": "8.0.0", + "glob": "11.0.0", "htmlparser": "1.7.7", "jed": "1.1.1", - "qunit": "2.20.0", + "qunit": "2.22.0", "qunit-tap": "1.5.1", - "sass": "1.70.0", + "sass": "1.79.4", "sizzle": "2.3.10", - "stylelint": "15.11.0", - "stylelint-config-standard": "34.0.0", - "stylelint-config-standard-scss": "11.1.0", - "stylelint-formatter-pretty": "3.2.1", - "stylelint-use-logical-spec": "5.0.0" + "stylelint": "16.9.0", + "stylelint-config-recommended-scss": "14.0.0", + "stylelint-config-standard": "36.0.1", + "stylelint-config-standard-scss": "13.1.0", + "stylelint-formatter-pretty": "4.0.1", + "stylelint-use-logical-spec": "5.0.1", + "typescript": "^5.3.3" }, "scripts": { "eslint": "eslint --ext .js --ext .jsx pkg/ test/common/", diff --git a/packit.yaml b/packit.yaml index 8f77cd7cab3..c95b93350aa 100644 --- a/packit.yaml +++ b/packit.yaml @@ -27,13 +27,13 @@ jobs: identifier: self trigger: pull_request targets: - - fedora-38 - - fedora-39 - - fedora-latest-aarch64 - - fedora-development - - centos-stream-8-x86_64 + - fedora-40 + - fedora-41 + - fedora-latest-stable-aarch64 + - fedora-rawhide - centos-stream-9-x86_64 - centos-stream-9-aarch64 + - centos-stream-10 # current Fedora runs reverse dependency testing against https://copr.fedorainfracloud.org/coprs/g/cockpit/main-builds/ - job: tests @@ -54,8 +54,6 @@ jobs: - job: copr_build trigger: pull_request targets: - # 32 bit - - fedora-development-i386 # big-endian - fedora-development-s390x @@ -90,19 +88,19 @@ jobs: trigger: release dist_git_branches: - fedora-development - - fedora-38 - - fedora-39 + - fedora-40 + - fedora-41 - job: koji_build trigger: commit dist_git_branches: - fedora-development - - fedora-38 - - fedora-39 + - fedora-40 + - fedora-41 - job: bodhi_update trigger: commit dist_git_branches: # rawhide updates are created automatically - - fedora-38 - - fedora-39 + - fedora-40 + - fedora-41 diff --git a/pkg/Makefile.am b/pkg/Makefile.am index fd0bb675767..47ca35ea1c3 100644 --- a/pkg/Makefile.am +++ b/pkg/Makefile.am @@ -16,11 +16,6 @@ EXTRA_DIST += \ $(pixmaps_DATA) \ $(NULL) -if ENABLE_PCP -pcpmanifestdir = $(datadir)/cockpit/pcp -dist_pcpmanifest_DATA = pkg/pcp/manifest.json -endif - # one built file in dist/ which we use as dependency DIST_STAMP = $(srcdir)/dist/static/manifest.json @@ -32,7 +27,7 @@ V_BUNDLE = $(V_BUNDLE_$(V)) V_BUNDLE_ = $(V_BUNDLE_$(AM_DEFAULT_VERBOSITY)) V_BUNDLE_0 = @echo " BUNDLE dist"; -# delete the stamp first; neither webpack nor esbuild touch it if the contents didn't change, +# delete the stamp first; esbuild does not touch it if the contents didn't change, # but this is just a representative for all of dist/* $(DIST_STAMP): $(srcdir)/package-lock.json $(PKG_INPUTS) @rm -f $(DIST_STAMP) @@ -40,13 +35,6 @@ $(DIST_STAMP): $(srcdir)/package-lock.json $(PKG_INPUTS) EXTRA_DIST += build.js files.js package.json package-lock.json -# This is how the qunit tests get included. We need to prevent automake from -# seeing them during ./autogen.sh, but need make to find them at compile time. -# We don't run them in the pybridge case since they're part of `pytest`. -if WITH_OLD_BRIDGE --include $(wildcard pkg/Makefile.qunit*) -endif - INSTALL_DATA_LOCAL_TARGETS += install-bundles install-bundles: cd $(srcdir)/dist; find */* -type f -exec install -D -m 644 '{}' '$(abspath $(DESTDIR)$(datadir))/cockpit/{}' \; diff --git a/pkg/Makefile.qunit b/pkg/Makefile.qunit deleted file mode 100644 index 794c571c005..00000000000 --- a/pkg/Makefile.qunit +++ /dev/null @@ -1,2 +0,0 @@ -# We need to avoid automake seeing this, but want it evaluated at runtime -TESTS += $(wildcard $(srcdir)/qunit/*/test-*.html) diff --git a/pkg/apps/application-list.jsx b/pkg/apps/application-list.jsx index 35ee25353ce..0f33a31b9d4 100644 --- a/pkg/apps/application-list.jsx +++ b/pkg/apps/application-list.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; @@ -29,13 +29,15 @@ import { Stack, StackItem } from "@patternfly/react-core/dist/esm/layouts/Stack/ import { RebootingIcon } from "@patternfly/react-icons"; -import { check_uninstalled_packages } from "packagekit.js"; +import { check_uninstalled_packages } from "packagekit"; +import { get_manifest_config_matchlist } from "utils"; +import { read_os_release } from "os-release"; +import { EmptyStatePanel } from "cockpit-components-empty-state.jsx"; +import { useInit } from "hooks"; + import * as PackageKit from "./packagekit.js"; -import { read_os_release } from "os-release.js"; import { icon_url, show_error, launch, ProgressBar, CancelButton } from "./utils.jsx"; import { ActionButton } from "./application.jsx"; -import { EmptyStatePanel } from "cockpit-components-empty-state.jsx"; -import { useInit } from "../lib/hooks.js"; const _ = cockpit.gettext; @@ -103,25 +105,6 @@ export const ApplicationList = ({ metainfo_db, appProgress, appProgressTitle, ac comps.push(metainfo_db.components[id]); comps.sort((a, b) => a.name.localeCompare(b.name)); - function get_config(name, os_release, def) { - // ID is a single value, ID_LIKE is a list - const os_list = [os_release?.ID || "", ...(os_release?.ID_LIKE || "").split(/\s+/)]; - - if (cockpit.manifests.apps && cockpit.manifests.apps.config) { - const val = cockpit.manifests.apps.config[name]; - if (typeof val === 'object' && val !== null && !Array.isArray(val)) { - for (const os of os_list) { - if (val[os]) - return val[os]; - } - return def; - } - return val !== undefined ? val : def; - } else { - return def; - } - } - async function check_missing_data(packages) { try { const missing = await check_uninstalled_packages(packages); @@ -131,26 +114,33 @@ export const ApplicationList = ({ metainfo_db, appProgress, appProgressTitle, ac } } - useInit(async () => { + async function get_packages() { const os_release = await read_os_release(); - const configPackages = get_config('appstream_config_packages', os_release, []); - const dataPackages = get_config('appstream_data_packages', os_release, []); - await check_missing_data([...dataPackages, ...configPackages]); + // ID is a single value, ID_LIKE is a list + const os_list = [os_release?.ID, ...(os_release?.ID_LIKE || "").split(/\s+/)]; + const configPackages = get_manifest_config_matchlist('apps', 'appstream_config_packages', [], os_list); + const dataPackages = get_manifest_config_matchlist('apps', 'appstream_data_packages', [], os_list); + return [configPackages, dataPackages]; + } + + useInit(async () => { + const [config, data] = await get_packages(); + await check_missing_data([...config, ...data]); }); - function refresh() { - read_os_release().then(os_release => { - const configPackages = get_config('appstream_config_packages', os_release, []); - const dataPackages = get_config('appstream_data_packages', os_release, []); - PackageKit.refresh(metainfo_db.origin_files, - configPackages, - dataPackages, - setProgress) - .finally(async () => { - await check_missing_data([...dataPackages, ...configPackages]); - setProgress(false); - }).catch(show_error); - }); + async function refresh() { + const [configPackages, dataPackages] = await get_packages(); + try { + await PackageKit.refresh(metainfo_db.origin_files, + configPackages, + dataPackages, + setProgress); + } catch (e) { + show_error(e); + } finally { + await check_missing_data([...dataPackages, ...configPackages]); + setProgress(false); + } } let refresh_progress, refresh_button, tbody; diff --git a/pkg/apps/application.jsx b/pkg/apps/application.jsx index 6d11e859950..38fb8de60af 100644 --- a/pkg/apps/application.jsx +++ b/pkg/apps/application.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; diff --git a/pkg/apps/apps.jsx b/pkg/apps/apps.jsx index 873e0066462..f1e2536c89e 100644 --- a/pkg/apps/apps.jsx +++ b/pkg/apps/apps.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import '../lib/patternfly/patternfly-5-cockpit.scss'; diff --git a/pkg/apps/appstream.js b/pkg/apps/appstream.js index ad023208d93..afdf92ed6e9 100644 --- a/pkg/apps/appstream.js +++ b/pkg/apps/appstream.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; diff --git a/pkg/apps/index.html b/pkg/apps/index.html index 2c14ed33de5..ee2a49ebef6 100644 --- a/pkg/apps/index.html +++ b/pkg/apps/index.html @@ -15,7 +15,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License -along with Cockpit; If not, see . +along with Cockpit; If not, see . --> diff --git a/pkg/apps/manifest.json b/pkg/apps/manifest.json index 56039e57f53..ee732194072 100644 --- a/pkg/apps/manifest.json +++ b/pkg/apps/manifest.json @@ -10,8 +10,6 @@ } }, - "content-security-policy": "img-src *", - "config": { "appstream_config_packages": { "debian": ["appstream"] diff --git a/pkg/apps/packagekit.js b/pkg/apps/packagekit.js index 059233c8845..1171dbc93df 100644 --- a/pkg/apps/packagekit.js +++ b/pkg/apps/packagekit.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; diff --git a/pkg/apps/utils.jsx b/pkg/apps/utils.jsx index 53cef7846c5..a3ab209886f 100644 --- a/pkg/apps/utils.jsx +++ b/pkg/apps/utils.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; diff --git a/pkg/apps/watch-appstream.py b/pkg/apps/watch-appstream.py index 0b0ad243f39..1fcaa5f5af1 100644 --- a/pkg/apps/watch-appstream.py +++ b/pkg/apps/watch-appstream.py @@ -356,10 +356,15 @@ def installed_callback(path): def available_callback(path): process_file(path, lambda path, xml: db.notice_available(path, xml)) - watcher.watch_directory('/usr/share/metainfo', installed_callback) + # https://www.freedesktop.org/software/appstream/docs/chap-CatalogData.html watcher.watch_directory('/usr/share/swcatalog/xml', available_callback) + watcher.watch_directory('/var/cache/swcatalog/xml', available_callback) + watcher.watch_directory('/var/lib/swcatalog/xml', available_callback) + # legacy paths watcher.watch_directory('/usr/share/app-info/xmls', available_callback) watcher.watch_directory('/var/cache/app-info/xmls', available_callback) + # installed packages + watcher.watch_directory('/usr/share/metainfo', installed_callback) db.start_dumping() watcher.run() diff --git a/pkg/base1/cockpit.js b/pkg/base1/cockpit.js index d09ec61aa35..853417ede1a 100644 --- a/pkg/base1/cockpit.js +++ b/pkg/base1/cockpit.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ // this registers itself as global on window.cockpit diff --git a/pkg/base1/test-chan.js b/pkg/base1/test-chan.js index 143ef96ff0a..58b48a2e2da 100644 --- a/pkg/base1/test-chan.js +++ b/pkg/base1/test-chan.js @@ -719,42 +719,6 @@ QUnit.test("filter message in", function (assert) { channel.send("three"); }); -QUnit.test("filter message out", function (assert) { - const done = assert.async(); - assert.expect(10); - - let filtered = 0; - let filtering = true; - cockpit.transport.filter(function(message, channelid, control) { - if (!filtering) - return true; - if (message[0] == '\n') { - assert.strictEqual(channelid, "", "control message channel"); - assert.equal(typeof control, "object", "control is a JSON object"); - assert.equal(typeof control.command, "string", "control has a command"); - } else { - assert.strictEqual(channelid, channel.id, "cockpit channel id"); - assert.equal(control, undefined, "control is undefined"); - filtered += 1; - - if (filtered != 1) { - channel.close(); - filtering = false; - done(); - return false; - } - - return true; - } - return false; - }, true); - - const channel = cockpit.channel({ payload: "null" }); - channel.send("one"); - channel.send("two"); - channel.send("three"); -}); - QUnit.test("inject message out", function (assert) { const done = assert.async(); assert.expect(4); diff --git a/pkg/base1/test-channel.ts b/pkg/base1/test-channel.ts new file mode 100644 index 00000000000..4262854fcc7 --- /dev/null +++ b/pkg/base1/test-channel.ts @@ -0,0 +1,534 @@ +import QUnit from "qunit-tests"; + +import { transport_globals } from 'cockpit/_internal/transport'; +import { Channel, ChannelControlMessage, ChannelOptions, ChannelPayload } from 'cockpit/channel'; + +// simple function to 'use' values (to avoid 'unused' warnings) +function use(..._args: unknown[]) { +} + +// Many of the functions in this file are not designed to run, but to be +// typechecked. Here's some assertions for the types we'll check. +function is_str(_x: string): void { +} + +function is_bytes(_x: Uint8Array): void { +} + +// verify that type assertions are fundamentally working the way we think +function verify_type_assertions() { + const bytes = new Uint8Array(); + const str = ""; + + // These are positive assertions + is_str(str); + is_bytes(bytes); + + // These are typing fails. tsc will verify that they fail. + // @ts-expect-error Cannot pass bytes to is_str() + is_str(bytes); + + // @ts-expect-error Cannot pass string to is_bytes() + is_bytes(str); +} + +function is_str_channel(x: Channel): void { + x.on('data', msg => is_str(msg)); + + // @ts-expect-error: of course string channels don't get binary data... + x.on('data', msg => is_bytes(msg)); +} + +function is_bytes_channel(x: Channel): void { + x.on('data', msg => is_bytes(msg)); + + // @ts-expect-error: of course binary channels don't get string data... + x.on('data', msg => is_str(msg)); +} + +// tsc seems to have inconsistent treatment of `void` argument types when +// things get complicated. If this ever gets fixed, search for 'VOID BUG' +// and adjust each test + +// https://github.com/microsoft/TypeScript/issues/29131 + +// This is equivalent to 'void', but tsc treats it weirdly +type AlwaysVoid

= P extends string ? void : void; +function simple_void(_x: void): void { } +function weird_void

(_x: AlwaysVoid

): void { } +class VoidCtor

{ constructor(x: AlwaysVoid

) { use(x) } } +class VoidCtorSubclass extends VoidCtor { } +function test_void_weirdness() { + const v = (() => {})(); // get a value of type 'void' + + // This works just fine + simple_void(); + // @ts-expect-error But this doesn't work + weird_void(); + weird_void(v); // but we can demonstrate that `void` is indeed accepted here + + // @ts-expect-error This doesn't work + use(new VoidCtor()); + use(new VoidCtor(v)); // ... even though 'void' is the argument type + + // ...but for some reason this works, even though it's the very same function + use(new VoidCtorSubclass()); +} + +function test_channel_api_types() { + // @ts-expect-error: It's not valid to create a channel with no args + const no_args_channel = new Channel(); + use(no_args_channel); + + // @ts-expect-error: It's not valid to create a channel with no payload + const no_payload_channel = new Channel({}); + use(no_payload_channel); + + // @ts-expect-error: It's not valid to create a channel with no payload + const text_no_payload_channel = new Channel({ binary: false }); + use(text_no_payload_channel); + + // @ts-expect-error: It's not valid to create a channel with no payload + const bytes_no_payload_channel = new Channel({ binary: true }); + use(bytes_no_payload_channel); + + // This how to create a valid text channel + const text_channel = new Channel({ payload: 'echo' }); + is_str_channel(text_channel); + + // This is also fine, if you like to be explicit + const explicit_type_text_channel = new Channel({ payload: 'echo' }); + is_str_channel(explicit_type_text_channel); + + // ...another way to be explicit + const explicit_opt_text_channel = new Channel({ payload: 'echo', binary: false }); + is_str_channel(explicit_opt_text_channel); + + // Or why not both at once? + const very_explicit_text_channel = new Channel({ payload: 'echo', binary: false }); + is_str_channel(very_explicit_text_channel); + + // Binary channels need to specify both the type and the flag + const binary_channel = new Channel({ payload: 'echo', binary: true }); + is_bytes_channel(binary_channel); + + // Unfortunately we can't detect Channel based on `binary: true` + // without engaging a seriously advanced level of typing gymnastics (which + // would introduce other drawbacks). + // @ts-expect-error It would be nice if this were possible... + const autodetect_binary_channel = new Channel({ payload: 'echo', binary: true }); + use(autodetect_binary_channel); + + // But, directly using the new channel in a typed context should hint the + // correct type without explicitly specifying it, which will be the usual case. + is_str_channel(new Channel({ payload: 'echo' })); + is_str_channel(new Channel({ payload: 'echo', binary: false })); + is_bytes_channel(new Channel({ payload: 'echo', binary: true })); + + // The opposite should all be impossible + // @ts-expect-error should not be able to convince a text channel that it's binary + is_bytes_channel(new Channel({ payload: 'echo' })); + // @ts-expect-error should not be able to convince a text channel that it's binary + is_bytes_channel(new Channel({ payload: 'echo', binary: false })); + // @ts-expect-error should not be able to convince a binary channel that it's text + is_str_channel(new Channel({ payload: 'echo', binary: true })); + + // Explicitly giving the wrong type parameter should also be forbidden + // @ts-expect-error should not be able to convince a text channel that it's binary + const not_text_channel = new Channel({ payload: 'echo' }); + use(not_text_channel); + + // @ts-expect-error should not be able to convince a text channel that it's binary + const not_explicit_text_channel = new Channel({ payload: 'echo', binary: false }); + use(not_explicit_text_channel); + + // @ts-expect-error should not be able to convince a binary channel that it's text + const not_binary_channel = new Channel({ payload: 'echo', binary: true }); + use(not_binary_channel); +} + +// This is how it looks to create a wrapper API that can open a channel in +// either text or binary mode. +function open_echo

(options: ChannelOptions

): Channel

{ + return new Channel({ ...options, payload: 'echo' }); +} + +// And this is how you use that API: +function test_open_echo_types() { + const string_echo = open_echo({}); + is_str_channel(string_echo); + + // @ts-expect-error This should be possible because `void` is valid for + // options but it oddly doesn't work (VOID BUG) + const string_echo_void = open_echo(); + is_str_channel(string_echo_void); + + const binary_echo = open_echo({ binary: true }); + is_bytes_channel(binary_echo); +} + +// Demonstrate how to properly do typing on a derived channel type which can be +// opened in either text or binary mode +class EchoChannel

extends Channel

{ + constructor(options: ChannelOptions

) { + super({ ...options, payload: 'echo' }); + } +} + +function test_echo_channel_types() { + const text_echo_channel = new EchoChannel({}); + is_str_channel(text_echo_channel); + + // @ts-expect-error This should also be possible because options can be void + // but is currently not working (VOID BUG) + const void_text_echo_channel = new EchoChannel(); + is_str_channel(void_text_echo_channel); + + const binary_echo_channel = new EchoChannel({ binary: true }); + is_bytes_channel(binary_echo_channel); +} + +// Various subclasses that further derive from EchoChannel to add a specific mode +export class TextEchoChannel extends EchoChannel { + constructor() { + super({ binary: false }); + } +} + +export class TextEchoChannelDefaultArg extends EchoChannel { + constructor() { + super({ }); + } +} + +export class TextEchoChannelTrivial extends EchoChannel { + // eslint-disable-next-line no-useless-constructor + constructor() { + super(); + } +} + +export class TextEchoChannelNoConstructor extends EchoChannel { + // this is fine, will be checked at instantiation +} + +export class BinaryEchoChannel extends EchoChannel { + constructor() { + super({ binary: true }); + } +} + +export class BinaryEchoChannelNoConstructor extends EchoChannel { + // this is fine, but users will be required to pass `{ binary: true } for themselves +} + +function test_fixed_type_echo_channels_types() { + const text_echo_channel = new TextEchoChannel(); + is_str_channel(text_echo_channel); + + const text_echo_channel_default_arg = new TextEchoChannelDefaultArg(); + is_str_channel(text_echo_channel_default_arg); + + const text_echo_channel_trivial = new TextEchoChannelTrivial(); + is_str_channel(text_echo_channel_trivial); + + // This is really the same as the other cases of passing no arguments (and + // indeed, we're directly calling the constructor on EchoChannel, but this + // time it works for some reason. VOID BUG doesn't affect this case? + const text_echo_channel_no_constructor = new TextEchoChannelNoConstructor(); + is_str_channel(text_echo_channel_no_constructor); + + // @ts-expect-error Of course this is invalid... + const text_binary_channel = new TextEchoChannelNoConstructor({ binary: true }); + is_str_channel(text_binary_channel); + + const binary_echo_channel = new BinaryEchoChannel(); + is_bytes_channel(binary_echo_channel); + + // We need to pass the `binary: true` flag ourselves since there's no + // constructor in this class to provide it for us. + const binary_echo_no_ctor = new BinaryEchoChannelNoConstructor({ binary: true }); + is_bytes_channel(binary_echo_no_ctor); + + // @ts-expect-error If we forget to pass the flag, it's an error + const binary_echo_no_arg = new BinaryEchoChannelNoConstructor(); + is_bytes_channel(binary_echo_no_arg); + + // @ts-expect-error If we pass the wrong flag, it's an error + const binary_echo_wrong_arg = new BinaryEchoChannelNoConstructor({ binary: false }); + is_bytes_channel(binary_echo_wrong_arg); +} + +// These three subclasses are incorrectly implemented and will trigger errors +export class BrokenTextEchoChannelWrongConstructor extends EchoChannel { + constructor() { + // @ts-expect-error must specify binary: false for string channel + super({ binary: true }); + } +} + +export class BrokenBinaryEchoChannelWrongConstructor extends EchoChannel { + constructor() { + // @ts-expect-error must specify binary: true for Uint8Array channel + super({ binary: false }); + } +} + +export class BrokenBinaryEchoChannelTrivialConstructor extends EchoChannel { + // eslint-disable-next-line no-useless-constructor + constructor() { + // @ts-expect-error must specify binary: true for Uint8Array channel + super(); + } +} + +// Demonstrate typing on a function that can open a channel in either mode +function open_echo_subclass

(options: ChannelOptions

): EchoChannel

{ + return new EchoChannel(options); +} + +function is_echo_channel

(_channel: EchoChannel

) { +} + +function test_open_echo_subclass_type() { + // @ts-expect-error VOID BUG is back again in this case... + const no_arg = open_echo_subclass(); + is_echo_channel(no_arg); + + // legit calls + const empty_arg = open_echo_subclass({}); + is_echo_channel(empty_arg); + + const false_arg = open_echo_subclass({ binary: false }); + is_echo_channel(false_arg); + + const true_arg = open_echo_subclass({ binary: true }); + is_echo_channel(true_arg); + + // inferred type from context + is_echo_channel(open_echo_subclass({})); + is_echo_channel(open_echo_subclass({ binary: false })); + is_echo_channel(open_echo_subclass({ binary: true })); + + // and the 'wrong' versions + // @ts-expect-error Can't have a binary channel with no args + is_echo_channel(open_echo_subclass({})); + // @ts-expect-error Can't have a binary channel with binary: false + is_echo_channel(open_echo_subclass({ binary: false })); + // @ts-expect-error Can't have a text channel with binary: true + is_echo_channel(open_echo_subclass({ binary: true })); +} + +// Demonstrate usage of above classes and functions +function test_payload_types() { + const text_channel = new Channel({ payload: 'echo' }); + text_channel.on('data', msg => { + is_str(msg); + + // @ts-expect-error This should be a string + is_bytes(msg); + }); + text_channel.send_data(''); + // @ts-expect-error Can't send binary data on text channels + text_channel.send_data(new Uint8Array()); + + const binary_channel = new Channel({ payload: 'echo', binary: true }); + binary_channel.on('data', msg => { + is_bytes(msg); + + // @ts-expect-error This should be a Uint8Array + is_str(msg); + }); + binary_channel.send_data(new Uint8Array()); + // @ts-expect-error Can't send text data on binary channels + binary_channel.send_data(''); +} + +export function test_unknown_payload(binary: boolean) { + // The type of a channel with unknown payload is Channel + const unknown0 = new Channel({ payload: 'echo', binary }); + + // Upcasting to the unknown type is always valid + const unknown1: Channel = + new Channel({ payload: 'echo' }); + use(unknown1); + const unknown2: Channel = + new Channel({ payload: 'echo', binary: true }); + use(unknown2); + + // @ts-expect-error Downcasting is not valid + const known1: Channel = unknown0; + use(known1); + // @ts-expect-error Downcasting is not valid + const known2: Channel = unknown0; + use(known2); + + // It's possible to directly construct the unknown type with any binary flag + const unknown3 = new Channel({ payload: 'echo' }); + use(unknown3); + const unknown4 = new Channel({ payload: 'echo', binary: true }); + use(unknown4); + const unknown5 = new Channel({ payload: 'echo', binary: false }); + use(unknown5); + + unknown0.on('data', msg => { + // @ts-expect-error msg on unknown payload channel is not str + is_str(msg); + // @ts-expect-error msg on unknown payload channel is not bytes + is_bytes(msg); + }); + + // Unfortunately, because of how the types work, either of these works. + // Fortunately, it's unproblematic at the protocol implementation level. + unknown0.send_data(new Uint8Array()); + unknown0.send_data(''); +} + +// This is all just to verify various properties of the static typing +// Nothing will run this, so we export it to avoid an 'unused' warning +export function test_typing() { + verify_type_assertions(); + test_void_weirdness(); + test_channel_api_types(); + test_open_echo_types(); + test_echo_channel_types(); + test_fixed_type_echo_channels_types(); + test_open_echo_subclass_type(); + test_payload_types(); +} + +// Actual dynamic tests start here +// This test must be the first test — it tests global transport startup +QUnit.test("test startup queue", async assert => { + assert.equal(transport_globals.default_transport, null, 'no transport yet'); + + const echo = new Channel({ payload: 'echo' }); + // queue up a bunch of data before our first `await` + echo.send_data('a'); + echo.send_control({ command: 'x' }); + echo.send_data('b'); + echo.send_control({ command: 'y' }); + echo.send_data('c'); + echo.send_control({ command: 'z' }); + echo.done(); + + const incoming_messages: (string | ChannelControlMessage)[] = []; + echo.on('data', msg => incoming_messages.push(msg)); + echo.on('control', msg => incoming_messages.push(msg)); + echo.on('done', msg => incoming_messages.push(msg)); + echo.on('close', msg => incoming_messages.push(msg)); + + // now we await. the Transport will come online here and our outgoing + // queue will drain then we'll receive our results. + assert.false(transport_globals.default_transport?.ready, 'transport not ready'); + assert.true(echo.toString().includes('waiting for transport'), 'waiting for transport'); + assert.equal(echo.id, null, 'null ID'); + + await echo.wait(); + + assert.true(transport_globals.default_transport?.ready, 'transport now ready'); + assert.false(echo.toString().includes('waiting'), 'no longer waiting for anything'); + assert.notEqual(echo.id, null, 'non-null ID'); + + // echo channel echos only data, not control + // but we will get done/close anyway + const expected_incoming_messages = [ + 'a', + 'b', + 'c', + { command: 'done', channel: echo.id }, + { command: 'close', channel: echo.id }, + ]; + + // wait until we have all of our expected messages, up to 1s + for (let i = 0; i < 100; i++) { + if (incoming_messages.length === expected_incoming_messages.length) + break; + await new Promise(resolve => window.setTimeout(resolve, 10)); + } + + assert.deepEqual(incoming_messages, expected_incoming_messages, 'received echoed messages'); + assert.true(echo.toString().includes('closed'), 'channel closed'); +}); + +QUnit.test("simple channel api", async assert => { + const binary_foo = new TextEncoder().encode('foo'); + + const echo = new Channel({ payload: 'echo' }); + assert.true(echo.toString().includes('waiting for open'), 'waiting for open'); + await echo.wait(); + assert.true(echo.toString().includes('opened'), 'opened'); + echo.send_data('foo'); + const unknown_echo: Channel = echo; + unknown_echo.send_data(binary_foo); + // make sure it comes back as a string + assert.equal(await new Promise(resolve => echo.on('data', resolve)), 'foo'); + echo.done(); + await new Promise(resolve => echo.on('done', resolve)); + await new Promise(resolve => echo.on('close', resolve)); + assert.true(echo.toString().includes('closed'), 'opened'); + + const binary_echo = new Channel({ payload: 'echo', binary: true }); + await echo.wait(); + const unknown_binary_echo: Channel = binary_echo; + unknown_binary_echo.send_data('foo'); // send a string on a binary channel + // make sure it comes back as binary + assert.deepEqual(await new Promise(resolve => binary_echo.on('data', resolve)), binary_foo); + binary_echo.done(); + await new Promise(resolve => binary_echo.on('done', resolve)); + await new Promise(resolve => binary_echo.on('close', resolve)); +}); + +QUnit.test("unsupported channel", async assert => { + const not_supported = new Channel({ payload: 'nonesuch' }); + let err = null; + try { + await not_supported.wait(); + } catch (exc) { + err = exc as ChannelControlMessage; // yaya + } + assert.equal(err?.problem, 'not-supported'); + assert.true(not_supported.toString().includes('error not-supported'), 'got error'); +}); + +QUnit.test("close with error", async assert => { + const channel = new Channel({ payload: 'nonesuch' }); + + let closed = null; + channel.on('close', msg => { closed = msg }); + channel.close('xyz', { extra: 55 }); + assert.deepEqual(closed, { command: 'close', problem: 'xyz', extra: 55 }); + + let err = null; + try { + await channel.wait(); + } catch (exc) { + err = exc as ChannelControlMessage; // yaya; + } + assert.equal(err?.problem, 'xyz'); +}); + +QUnit.test("no signals after manual close", async assert => { + let saw_data = false; + + const channel = new Channel({ payload: 'echo' }); + channel.on('data', () => { saw_data = true }); + channel.send_data('a'); + channel.send_data('b'); + channel.send_data('c'); + channel.close(); + + let err = null; + try { + await channel.wait(); + } catch (exc) { + err = exc; + } + assert.deepEqual(err, { command: 'close' }); + + // wait for any extra signals + await new Promise(resolve => window.setTimeout(resolve, 100)); + assert.false(saw_data, 'no data callbacks after close'); +}); + +QUnit.start(); diff --git a/pkg/base1/test-dbus-common.js b/pkg/base1/test-dbus-common.js index 96b735132d6..24839b397ce 100644 --- a/pkg/base1/test-dbus-common.js +++ b/pkg/base1/test-dbus-common.js @@ -14,11 +14,11 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; -import QUnit from "qunit-tests"; +import QUnit, { skipWithPybridge } from "qunit-tests"; function deep_update(target, data) { for (const prop in data) { @@ -104,7 +104,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable ], "round trip"); }); - QUnit.test.skipWithPybridge("integer bounds", async assert => { + skipWithPybridge("integer bounds", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); async function testNumber(type, value, valid) { @@ -163,7 +163,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable ], "round trip"); }); - QUnit.test.skipWithPybridge("variants", async assert => { + skipWithPybridge("variants", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); const reply = await dbus.call( "/otree/frobber", "com.redhat.Cockpit.DBusTests.Frobber", @@ -179,7 +179,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable ], "round trip"); }); - QUnit.test.skipWithPybridge("bad variants", async assert => { + skipWithPybridge("bad variants", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call( @@ -236,7 +236,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("call bad base64", async assert => { + skipWithPybridge("call bad base64", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call( @@ -322,7 +322,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable }); }); - QUnit.test.skipWithPybridge("empty base64", assert => { + skipWithPybridge("empty base64", assert => { const done = assert.async(); assert.expect(3); @@ -340,7 +340,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable }); }); - QUnit.test.skipWithPybridge("bad object path", async assert => { + skipWithPybridge("bad object path", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("invalid/path", "borkety.Bork", "Echo", [1]); @@ -351,7 +351,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad interface name", async assert => { + skipWithPybridge("bad interface name", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/path", "!invalid!interface!", "Echo", [1]); @@ -362,7 +362,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad method name", async assert => { + skipWithPybridge("bad method name", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/path", "borkety.Bork", "!Invalid!Method!", [1]); @@ -373,7 +373,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad flags", async assert => { + skipWithPybridge("bad flags", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/path", "borkety.Bork", "Method", [1], { flags: 5 }); @@ -384,7 +384,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad types", async assert => { + skipWithPybridge("bad types", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/bork", "borkety.Bork", "Echo", [1], { type: "!!%%" }); @@ -395,7 +395,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad type invalid", async assert => { + skipWithPybridge("bad type invalid", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/bork", "borkety.Bork", "Echo", [1], { type: 5 }); // invalid @@ -406,7 +406,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad dict type", async assert => { + skipWithPybridge("bad dict type", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/otree/frobber", "com.redhat.Cockpit.DBusTests.Frobber", "Nobody", @@ -418,7 +418,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad object path", async assert => { + skipWithPybridge("bad object path", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/otree/frobber", "com.redhat.Cockpit.DBusTests.Frobber", "Nobody", @@ -430,7 +430,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable } }); - QUnit.test.skipWithPybridge("bad signature", async assert => { + skipWithPybridge("bad signature", async assert => { const dbus = cockpit.dbus(bus_name, channel_options); try { await dbus.call("/otree/frobber", "com.redhat.Cockpit.DBusTests.Frobber", "Nobody", ["bad signature"], { type: "g" }); @@ -710,7 +710,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable dbus.removeEventListener("notify", onnotify); }); - QUnit.test.skipWithPybridge("path loop", async assert => { + skipWithPybridge("path loop", async assert => { const name = "yo" + new Date().getTime(); const cache = { }; @@ -733,7 +733,7 @@ export function common_dbus_tests(channel_options, bus_name) { // eslint-disable dbus.removeEventListener("notify", onnotify); }); - QUnit.test.skipWithPybridge("path signal", async assert => { + skipWithPybridge("path signal", async assert => { const name = "yo" + new Date().getTime(); const cache = { }; @@ -940,7 +940,7 @@ export function dbus_track_tests(channel_options, bus_name) { assert.equal(gone, false, "is not gone"); }); - QUnit.test.skipWithPybridge("receive readable fd", async assert => { + skipWithPybridge("receive readable fd", async assert => { const done = assert.async(); assert.expect(3); @@ -959,7 +959,7 @@ export function dbus_track_tests(channel_options, bus_name) { }; }); - QUnit.test.skipWithPybridge("receive readable fd and ensure opening more than once fails", async assert => { + skipWithPybridge("receive readable fd and ensure opening more than once fails", async assert => { const done = assert.async(); assert.expect(6); @@ -981,7 +981,7 @@ export function dbus_track_tests(channel_options, bus_name) { }; }); - QUnit.test.skipWithPybridge("receive readable fd and ensure writing fails", async assert => { + skipWithPybridge("receive readable fd and ensure writing fails", async assert => { const done = assert.async(); assert.expect(5); @@ -1002,7 +1002,7 @@ export function dbus_track_tests(channel_options, bus_name) { }; }); - QUnit.test.skipWithPybridge("receive writable fd", async assert => { + skipWithPybridge("receive writable fd", async assert => { const dbus = cockpit.dbus("com.redhat.Cockpit.DBusTests.Test", channel_options); const [fd] = await dbus.call("/otree/frobber", "com.redhat.Cockpit.DBusTests.Frobber", "MakeTestFd", ["writable"]); diff --git a/pkg/base1/test-dbus.js b/pkg/base1/test-dbus.js index a4f4f05d179..0a75ba1d4b4 100644 --- a/pkg/base1/test-dbus.js +++ b/pkg/base1/test-dbus.js @@ -1,5 +1,5 @@ import cockpit from "cockpit"; -import QUnit from "qunit-tests"; +import QUnit, { mock_info, skipWithPybridge } from "qunit-tests"; import { common_dbus_tests, dbus_track_tests } from "./test-dbus-common.js"; @@ -142,7 +142,7 @@ QUnit.test("owned message for absent service", assert => { }); }); -QUnit.test.skipWithPybridge("bad dbus address", function (assert) { +skipWithPybridge("bad dbus address", function (assert) { const done = assert.async(); assert.expect(1); @@ -153,7 +153,7 @@ QUnit.test.skipWithPybridge("bad dbus address", function (assert) { }); }); -QUnit.test.skipWithPybridge("bad dbus bus", function (assert) { +skipWithPybridge("bad dbus bus", function (assert) { const done = assert.async(); assert.expect(1); @@ -194,7 +194,7 @@ QUnit.test("wait fail", function (assert) { }); }); -QUnit.test.skipWithPybridge("no default name", function (assert) { +skipWithPybridge("no default name", function (assert) { const done = assert.async(); assert.expect(1); @@ -211,7 +211,7 @@ QUnit.test.skipWithPybridge("no default name", function (assert) { }); }); -QUnit.test.skipWithPybridge("no default name bad", function (assert) { +skipWithPybridge("no default name bad", function (assert) { const done = assert.async(); assert.expect(2); @@ -229,7 +229,7 @@ QUnit.test.skipWithPybridge("no default name bad", function (assert) { }); }); -QUnit.test.skipWithPybridge("no default name invalid", function (assert) { +skipWithPybridge("no default name invalid", function (assert) { const done = assert.async(); assert.expect(2); @@ -247,7 +247,7 @@ QUnit.test.skipWithPybridge("no default name invalid", function (assert) { }); }); -QUnit.test.skipWithPybridge("no default name missing", function (assert) { +skipWithPybridge("no default name missing", function (assert) { const done = assert.async(); assert.expect(2); @@ -265,7 +265,7 @@ QUnit.test.skipWithPybridge("no default name missing", function (assert) { }); }); -QUnit.test.skipWithPybridge("no default name second", function (assert) { +skipWithPybridge("no default name second", function (assert) { const done = assert.async(); assert.expect(2); @@ -290,7 +290,7 @@ QUnit.test.skipWithPybridge("no default name second", function (assert) { }); }); -QUnit.test.skipWithPybridge("override default name", function (assert) { +skipWithPybridge("override default name", function (assert) { const done = assert.async(); assert.expect(2); @@ -314,7 +314,7 @@ QUnit.test.skipWithPybridge("override default name", function (assert) { }); }); -QUnit.test.skipWithPybridge("watch no default name", function (assert) { +skipWithPybridge("watch no default name", function (assert) { const done = assert.async(); assert.expect(1); @@ -337,7 +337,7 @@ QUnit.test.skipWithPybridge("watch no default name", function (assert) { }); }); -QUnit.test.skipWithPybridge("watch missing name", function (assert) { +skipWithPybridge("watch missing name", function (assert) { const done = assert.async(); assert.expect(2); @@ -355,7 +355,7 @@ QUnit.test.skipWithPybridge("watch missing name", function (assert) { }); }); -QUnit.test.skipWithPybridge("shared client", function (assert) { +skipWithPybridge("shared client", function (assert) { const done = assert.async(); assert.expect(2); @@ -395,7 +395,7 @@ QUnit.test("not shared option", function (assert) { dbus2.close(); }); -QUnit.test.skipWithPybridge("emit signal type", function (assert) { +skipWithPybridge("emit signal type", function (assert) { const done = assert.async(); assert.expect(4); @@ -425,7 +425,7 @@ QUnit.test.skipWithPybridge("emit signal type", function (assert) { }); }); -QUnit.test.skipWithPybridge("emit signal no meta", function (assert) { +skipWithPybridge("emit signal no meta", function (assert) { const done = assert.async(); assert.expect(2); @@ -451,13 +451,13 @@ async function internal_test(assert, options) { QUnit.test("internal dbus", async assert => internal_test(assert, { bus: "internal" })); -QUnit.test.skipWithPybridge("internal dbus bus none", - async assert => internal_test(assert, { bus: "none" })); +skipWithPybridge("internal dbus bus none", + async assert => internal_test(assert, { bus: "none" })); -QUnit.test.skipWithPybridge("internal dbus bus none with address", - async assert => internal_test(assert, { bus: "none", address: "internal" })); +skipWithPybridge("internal dbus bus none with address", + async assert => internal_test(assert, { bus: "none", address: "internal" })); -QUnit.test.skipWithPybridge("separate dbus connections for channel groups", function (assert) { +skipWithPybridge("separate dbus connections for channel groups", function (assert) { const done = assert.async(); assert.expect(4); @@ -533,7 +533,7 @@ QUnit.test("nonexisting address", async assert => { await dbus.call("/org/freedesktop/DBus", "org.freedesktop.DBus", "Hello", []); assert.ok(false, "should not be reached"); } catch (ex) { - if (await QUnit.mock_info("pybridge")) { + if (await mock_info("pybridge")) { assert.equal(ex.problem, "protocol-error", "got right close code"); assert.equal(ex.message, "failed to connect to none bus: [Errno 2] sd_bus_start: No such file or directory", "error message"); diff --git a/pkg/base1/test-echo.js b/pkg/base1/test-echo.js index d876bb20c7a..b025e16ea45 100644 --- a/pkg/base1/test-echo.js +++ b/pkg/base1/test-echo.js @@ -1,5 +1,5 @@ import cockpit from "cockpit"; -import QUnit from "qunit-tests"; +import QUnit, { mock_info } from "qunit-tests"; QUnit.test("basic", function (assert) { const done = assert.async(); @@ -83,7 +83,7 @@ QUnit.test("fence", async assert => { const done = assert.async(); // This is implemented in the C bridge, but not in Python. - if (await QUnit.mock_info("pybridge")) { + if (await mock_info("pybridge")) { assert.ok(true, "skipping on python bridge, not implemented"); done(); return; diff --git a/pkg/base1/test-file.js b/pkg/base1/test-file.js index eab74cc5068..cbbad9e7fc1 100644 --- a/pkg/base1/test-file.js +++ b/pkg/base1/test-file.js @@ -67,6 +67,12 @@ QUnit.test("simple replace", async assert => { assert.equal(res, "4321\n", "correct content"); }); +QUnit.test("empty replace", async assert => { + await cockpit.file(`${dir}/bar`).replace(""); + const res = await cockpit.spawn(["cat", `${dir}/bar`]); + assert.equal(res, "", "correct content"); +}); + QUnit.test("stringify replace", async assert => { await cockpit.file(dir + "/bar", { syntax: JSON }).replace({ foo: 4321 }); const res = await cockpit.spawn(["cat", dir + "/bar"]); @@ -193,6 +199,28 @@ QUnit.test("modify", async assert => { assert.equal(n, 1, "callback called once"); assert.equal(await cockpit.spawn(["cat", dir + "/quux"]), "dcba\n", "correct content"); + + // make sure that writing "" results in an empty file, not a deleted one + n = 0; + await file.modify(old => { + n += 1; + assert.equal(old, "dcba\n", "correct old content"); + return ""; + }); + assert.equal(n, 1, "callback called once"); + + assert.equal(await cockpit.spawn(["cat", dir + "/quux"]), "", "correct content"); + + // make sure that writing null deletes the file + n = 0; + await file.modify(old => { + n += 1; + assert.equal(old, "", "correct old content"); + return null; + }); + assert.equal(n, 1, "callback called once"); + + assert.rejects(cockpit.spawn(["cat", dir + "/quux"]), /No such file or directory/, "file deleted"); }); QUnit.test("modify with conflict", async assert => { @@ -326,6 +354,22 @@ QUnit.test("watching without reading", assert => { }, { read: false }); }); +QUnit.test("watching without reading pre-created", async assert => { + const done = assert.async(); + assert.expect(3); + + // Pre-create fsinfo test file + const file = cockpit.file(dir + "/fsinfo"); + await file.replace("1234"); + const watch = file.watch((content, tag) => { + assert.equal(content, null, "non-existant because read is false"); + assert.notEqual(tag, null, "non empty tag"); + assert.equal(tag.startsWith("1:"), true, "tag always starts with 1:"); + watch.remove(); + done(); + }, { read: false }); +}); + QUnit.test("watching directory", assert => { const done = assert.async(); assert.expect(20); @@ -378,6 +422,38 @@ QUnit.test("watching directory", assert => { cockpit.spawn(["sh", "-c", `echo hello > ${dir}/world.txt`]); }); +QUnit.test("watching error", async assert => { + const dir = await cockpit.spawn([ + 'sh', '-c', + ` + cd "$(mktemp -d)" + echo -n "$(pwd)" + + mkdir dir + echo dir file > dir/dir-file.txt + echo do not read this > dir-file.xtx + chmod 0 dir + ` + ]); + + const file = cockpit.file(`${dir}/dir/file`); + + try { + const [content, tag, error] = await new Promise(resolve => { + file.watch((content, tag, error) => { + resolve([content, tag, error]); + }); + }); + assert.equal(content, null); + assert.equal(tag, null); + assert.equal(error.problem, 'access-denied'); + } finally { + file.close(); + await cockpit.spawn(["chmod", "-R", "u+rwX", dir]); + await cockpit.spawn(["rm", "-rf", dir]); + } +}); + QUnit.test("closing", assert => { const done = assert.async(); assert.expect(2); diff --git a/pkg/base1/test-format.js b/pkg/base1/test-format.ts similarity index 71% rename from pkg/base1/test-format.js rename to pkg/base1/test-format.ts index 0294fcd6c80..681c3d84390 100644 --- a/pkg/base1/test-format.js +++ b/pkg/base1/test-format.ts @@ -1,5 +1,5 @@ import cockpit from "cockpit"; -import QUnit from "qunit-tests"; +import QUnit, { f } from "qunit-tests"; QUnit.test("format", function (assert) { assert.equal(cockpit.format("My $adj message with ${amount} of things", { adj: "special", amount: "lots" }), @@ -42,7 +42,7 @@ QUnit.test("format_number", function (assert) { [-123.01, "-123", "-123"], [null, "", ""], [undefined, "", ""], - ]; + ] as const; const saved_language = cockpit.language; @@ -51,19 +51,22 @@ QUnit.test("format_number", function (assert) { cockpit.language = 'en'; for (let i = 0; i < checks.length; i++) { assert.strictEqual(cockpit.format_number(checks[i][0]), checks[i][1], - "format_number@en(" + checks[i][0] + ") = " + checks[i][1]); + f`format_number@en(${checks[i][0]})` + ); } cockpit.language = 'de'; for (let i = 0; i < checks.length; i++) { assert.strictEqual(cockpit.format_number(checks[i][0]), checks[i][2], - "format_number@de(" + checks[i][0] + ") = " + checks[i][2]); + f`format_number@de(${checks[i][0]})` + ); } cockpit.language = 'pt_BR'; for (let i = 0; i < checks.length; i++) { assert.strictEqual(cockpit.format_number(checks[i][0]), checks[i][2], - "format_number@pt_BR(" + checks[i][0] + ") = " + checks[i][2]); + f`format_number@pt_BR(${checks[i][0]})` + ); } /* restore this as not to break the other tests */ @@ -81,11 +84,18 @@ QUnit.test("format_number", function (assert) { QUnit.test("format_bytes", function (assert) { const checks = [ - [999, 1000, "999"], - [1934, undefined, "1.93 KB"], - [1934, 1000, "1.93 KB"], + [0, undefined, "0 B"], + [0, 1000, "0 B"], + [0, 1024, "0 B"], + [5, 1000, "5 B"], + [5, 1024, "5 B"], + [999, 1000, "999 B"], + [999, 1024, "999 B"], + [1023, 1024, "1023 B"], + [1934, undefined, "1.93 kB"], + [1934, 1000, "1.93 kB"], [2000, 1024, "1.95 KiB"], - [1999, 1000, "2.00 KB"], + [1999, 1000, "2.00 kB"], [1999, 1024, "1.95 KiB"], [1000000, 1000, "1 MB"], [1000001, 1000, "1.00 MB"], @@ -95,23 +105,36 @@ QUnit.test("format_bytes", function (assert) { [2000001, 1000, "2.00 MB"], [2000000, "MB", "2 MB"], [2000000, "MiB", "1.91 MiB"], - [2000000, "KB", "2000 KB"], + [2000000, "kB", "2000 kB"], [2000000, "KiB", "1953 KiB"], - [1, "KB", "0.001 KB"], - [0, "KB", "0 KB"], - [undefined, "KB", ""], - [null, "KB", ""], - ]; + [1, "kB", "0.001 kB"], + [0, "kB", "0 kB"], + [undefined, "kB", ""], + [null, "kB", ""], + ] as const; - assert.expect(checks.length * 2 + 2); + for (let i = 0; i < checks.length; i++) { + if (typeof checks[i][1] === 'string') { + // these tests are for backwards compatibility only + continue; + } + + const base2 = checks[i][1] == 1024; + assert.strictEqual(cockpit.format_bytes(checks[i][0], { base2 }), checks[i][2], + f`format_bytes(${checks[i][0]}, ${{ base2 }})`); + } + + // old API style (deprecated) for (let i = 0; i < checks.length; i++) { assert.strictEqual(cockpit.format_bytes(checks[i][0], checks[i][1]), checks[i][2], - "format_bytes(" + checks[i][0] + ", " + String(checks[i][1]) + ") = " + checks[i][2]); + f`format_bytes(${checks[i][0]}, ${checks[i][1]})` + ); } for (let i = 0; i < checks.length; i++) { const split = checks[i][2].split(" "); assert.deepEqual(cockpit.format_bytes(checks[i][0], checks[i][1], { separate: true }), split, - "format_bytes(" + checks[i][0] + ", " + String(checks[i][1]) + ", true) = " + split); + f`format_bytes(${checks[i][0]}, ${checks[i][1]}, ${{ separate: true }})` + ); } // backwards compatible API: format_bytes with a boolean options (used to be a single "separate" flag) @@ -119,47 +142,18 @@ QUnit.test("format_bytes", function (assert) { assert.deepEqual(cockpit.format_bytes(2500000, 1000, true), ["2.50", "MB"]); }); -QUnit.test("get_byte_units", function (assert) { - const mib = 1024 * 1024; - const gib = mib * 1024; - const tib = gib * 1024; - - const mib_unit = { factor: mib, name: "MiB" }; - const gib_unit = { factor: gib, name: "GiB" }; - const tib_unit = { factor: tib, name: "TiB" }; - - function selected(unit) { - return { factor: unit.factor, name: unit.name, selected: true }; - } - - const checks = [ - [0 * mib, 1024, [selected(mib_unit), gib_unit, tib_unit]], - [20 * mib, 1024, [selected(mib_unit), gib_unit, tib_unit]], - [200 * mib, 1024, [selected(mib_unit), gib_unit, tib_unit]], - [2000 * mib, 1024, [selected(mib_unit), gib_unit, tib_unit]], - [20000 * mib, 1024, [mib_unit, selected(gib_unit), tib_unit]], - [20 * gib, 1024, [mib_unit, selected(gib_unit), tib_unit]], - [200 * gib, 1024, [mib_unit, selected(gib_unit), tib_unit]], - [2000 * gib, 1024, [mib_unit, selected(gib_unit), tib_unit]], - [20000 * gib, 1024, [mib_unit, gib_unit, selected(tib_unit)]] - ]; - - assert.expect(checks.length); - for (let i = 0; i < checks.length; i++) { - assert.deepEqual(cockpit.get_byte_units(checks[i][0], checks[i][1]), checks[i][2], - "get_byte_units(" + checks[i][0] + ", " + checks[i][1] + ") = " + JSON.stringify(checks[i][2])); - } -}); - QUnit.test("format_bytes_per_sec", function (assert) { const checks = [ // default unit + [0, undefined, undefined, "0 B/s"], [5, undefined, undefined, "5 B/s"], [2555, undefined, undefined, "2.56 kB/s"], [12345678, undefined, undefined, "12.3 MB/s"], // explicit base-2 unit + [0, 1024, undefined, "0 B/s"], [2555, 1024, undefined, "2.50 KiB/s"], // explicit base-10 unit + [0, 1000, undefined, "0 B/s"], [2555, 1000, undefined, "2.56 kB/s"], [12345678, 1000, undefined, "12.3 MB/s"], // explicit unit @@ -171,18 +165,28 @@ QUnit.test("format_bytes_per_sec", function (assert) { // significant integer digits exceed custom precision [25555000, "kB/s", { precision: 2 }, "25555 kB/s"], [25555678, "kB/s", { precision: 2 }, "25556 kB/s"], - ]; + ] as const; - assert.expect(checks.length + 2); for (let i = 0; i < checks.length; i++) { - assert.strictEqual(cockpit.format_bytes_per_sec(checks[i][0], checks[i][1], checks[i][2]), checks[i][3], - `format_bytes_per_sec(${checks[i][0]}, ${checks[i][1]}, ${checks[i][2]}) = ${checks[i][3]}`); + if (typeof checks[i][1] === 'string') { + // these tests are for backwards compatibility only + continue; + } + + const base2 = checks[i][1] == 1024; + assert.strictEqual(cockpit.format_bytes_per_sec(checks[i][0], { base2, ...checks[i][2] }), checks[i][3], + f`format_bytes_per_sec(${checks[i][0]}, ${{ base2, ...checks[i][2] }})`); } - // separate unit + // old API style (deprecated) + for (let i = 0; i < checks.length; i++) { + assert.strictEqual(cockpit.format_bytes_per_sec(checks[i][0], checks[i][1], checks[i][2]), checks[i][3], + f`format_bytes_per_sec(${checks[i][0]}, ${checks[i][1]}, ${checks[i][2]})`); + } + // separate unit (very deprecated) assert.deepEqual(cockpit.format_bytes_per_sec(2555, 1024, { separate: true }), ["2.50", "KiB/s"]); - // backwards compatible API for separate flag + // backwards compatible API for separate flag (oh so very deprecated) assert.deepEqual(cockpit.format_bytes_per_sec(2555, 1024, true), ["2.50", "KiB/s"]); }); @@ -195,12 +199,12 @@ QUnit.test("format_bits_per_sec", function (assert) { [2555, "2.56 Kbps"], [2000, "2 Kbps"], [2003, "2.00 Kbps"] - ]; + ] as const; assert.expect(checks.length); for (let i = 0; i < checks.length; i++) { assert.strictEqual(cockpit.format_bits_per_sec(checks[i][0]), checks[i][1], - "format_bits_per_sec(" + checks[i][0] + ") = " + checks[i][1]); + f`format_bits_per_sec(${checks[i][0]})`); } }); diff --git a/pkg/base1/test-fsinfo.ts b/pkg/base1/test-fsinfo.ts new file mode 100644 index 00000000000..88e86af5f15 --- /dev/null +++ b/pkg/base1/test-fsinfo.ts @@ -0,0 +1,160 @@ +import QUnit, { f } from "qunit-tests"; + +import cockpit from "cockpit"; +import { FsInfoClient, FsInfoState, fsinfo } from "cockpit/fsinfo"; + +function fsinfo_err(errno: "ENOENT" | "EPERM" | "EACCES" | "ENOTDIR" | "ELOOP") { + const problems = { + ENOENT: 'not-found', + EPERM: 'access-denied', + EACCES: 'access-denied', + ENOTDIR: 'not-directory', + ELOOP: 'internal-error', + }; + const strerr = { + ENOENT: 'No such file or directory', + EPERM: 'Access denied', + EACCES: 'Permission denied', + ENOTDIR: 'Not a directory', + ELOOP: 'Too many levels of symbolic links', + }; + + return { error: { errno, problem: problems[errno], message: strerr[errno] } }; +} + +QUnit.test("fsinfo trivial", async assert => { + // Make sure that it's not an error to open with no attributes requested + assert.deepEqual(await fsinfo("/", []), {}, "Yup, '/' is still there."); +}); + +QUnit.test("fsinfo errors", async assert => { + assert.timeout(5000); + + // Just test that errors are reported correctly in general. The unit test + // is more explicit about the specific error conditions that need to be + // checked for. + let thrown: string = ''; + try { + await fsinfo("rel", []); + assert.ok(false, "fsinfo() error checking"); + } catch (exc: any) { // eslint-disable-line @typescript-eslint/no-explicit-any + thrown = exc.problem; + } + assert.equal(thrown, 'protocol-error', "fsinfo() error checking"); +}); + +QUnit.test("FsInfoClient errors", async assert => { + const client = new FsInfoClient("rel", []); + try { + // We want to get 'close' with a problem code, and no 'change' + const expected = await new Promise((resolve, reject) => { + client.on('close', (msg) => resolve(msg.problem)); + client.on('change', reject); + }); + assert.equal(expected, 'protocol-error', "FsInfoClient error checking"); + } finally { + // Calling .close() after we got 'close' event is still OK. + client.close(); + } +}); + +QUnit.test("fsinfo cases", async assert => { + assert.timeout(5000); + + const dir = await cockpit.spawn([ + 'sh', '-c', + ` + cd "$(mktemp -d)" + echo -n "$(pwd)" + + # a normal directory + mkdir dir + echo dir file > dir/dir-file.txt + echo do not read this > dir-file.xtx + + # a directory without +x (search) + mkdir no-x-dir + echo file > no-x-dir/no-x-dir-file.txt + chmod 644 no-x-dir + + # a directory without +r (read) + mkdir no-r-dir + echo file > no-r-dir/no-r-dir-file.txt + chmod 311 no-r-dir + + # a normal file + echo normal file > file + + # a non-readable file + echo inaccessible file > no-r-file + chmod 0 no-r-file + + # a device + ln -sf /dev/null dev + + # a dangling symlink + ln -sf does-not-exist dangling + + # a symlink pointing to itself + ln -sf loopy loopy + ` + ]); + + const cases: Record = { + dir: { info: { type: 'dir', entries: { 'dir-file.txt': { type: 'reg' } } } }, + + // can't stat() the file + 'no-x-dir': { info: { type: "dir", entries: { "no-x-dir-file.txt": {} } } }, + + // can't read the directory, so no entries + 'no-r-dir': { info: { type: "dir" } }, + + // normal file, can read its contents + file: { info: { type: "reg" } }, + + // can't read file, so no contents + 'no-r-file': { info: { type: "reg" } }, + + // a device + dev: { info: { type: "chr" } }, + + // a dangling symlink + dangling: fsinfo_err('ENOENT'), + + // a link pointing at itself + loopy: fsinfo_err('ELOOP'), + } as const; + + try { + // Check the async one-shot non-watching API first + for (const [name, expected_state] of Object.entries(cases)) { + try { + const state = await fsinfo(`${dir}/${name}`, ['type', 'entries']); + assert.deepEqual(state, expected_state.info, f`fsinfo(${name})`); + } catch (exc) { + assert.deepEqual(exc, expected_state.error, f`fsinfo(${name})`); + } + } + + // Now test the client (watcher) + for (let [name, expected_state] of Object.entries(cases)) { + const client = new FsInfoClient(`${dir}/${name}`, ['type', 'entries']); + + // Watching requires read access to the directory + if (name.includes('no-r')) { + expected_state = fsinfo_err('EACCES'); + } + + // await the first state change: it's guaranteed to be the complete data + const value = await new Promise(resolve => { client.on('change', resolve) }); + assert.deepEqual(value, expected_state, f`FsInfoClient(${name})`); + + client.close(); + } + } finally { + await cockpit.spawn(["chmod", "-R", "u+rwX", dir]); + await cockpit.spawn(["rm", "-rf", dir]); + } +}); + +QUnit.start(); diff --git a/pkg/base1/test-http.js b/pkg/base1/test-http.js index c968eec3148..1f00c05cf47 100644 --- a/pkg/base1/test-http.js +++ b/pkg/base1/test-http.js @@ -1,5 +1,5 @@ import cockpit from "cockpit"; -import QUnit from "qunit-tests"; +import QUnit, { mock_info } from "qunit-tests"; const EXPECT_MOCK_STREAM = "0 1 2 3 4 5 6 7 8 9 "; @@ -65,6 +65,9 @@ QUnit.test("simple request", assert => { plot: { label: "Plots" }, + remote: { + label: "Remote channel" + }, service: { label: "Generic Service Monitor" }, @@ -131,6 +134,47 @@ QUnit.test("streaming", assert => { }); }); +QUnit.test("split UTF8 frames", assert => { + const done = assert.async(); + assert.expect(1); + + cockpit.http(test_server) + .get("/mock/split-utf8") + .then(resp => assert.equal(resp, "initialfirst half é second halffinal", "correct response")) + .finally(done); +}); + +QUnit.test("truncated UTF8 frame", assert => { + const done = assert.async(); + assert.expect(3); + let received = ""; + + cockpit.http(test_server) + .get("/mock/truncated-utf8") + .stream(block => { received += block }) + .then(() => assert.ok(false, "should not have succeeded")) + // does not include the first byte of é + .catch(ex => { + // does not include the first byte of é + assert.equal(received, "initialfirst half ", "received expected data"); + assert.equal(ex.problem, "protocol-error", "error code"); + assert.ok(ex.message.includes("unexpected end of data"), ex.message); + }) + .finally(done); +}); + +QUnit.test("binary data", async assert => { + const data = await cockpit.http({ ...test_server, binary: true }).get("/mock/binary-data"); + assert.deepEqual(data, new Uint8Array([255, 1, 255, 2])); +}); + +QUnit.test("invalid UTF-8", assert => { + assert.rejects( + cockpit.http(test_server).get("/mock/binary-data"), + ex => ex.problem == "protocol-error" && ex.message.includes("can't decode byte 0xff"), + "rejects non-UTF-8 data on text channel"); +}); + QUnit.test("close", assert => { const done = assert.async(); assert.expect(3); @@ -257,7 +301,7 @@ QUnit.test("http keep alive", async assert => { assert.expect(1); // connection sharing is not implemented in the pybridge - if (await QUnit.mock_info("pybridge")) { + if (await mock_info("pybridge")) { assert.rejects( cockpit.http({ port: test_server.port, connection: "one" }).get("/mock/connection"), ex => ex.problem == "protocol-error" && ex.status == undefined, @@ -278,7 +322,7 @@ QUnit.test("http connection different", async assert => { assert.expect(1); // connection sharing is not implemented in the pybridge - if (await QUnit.mock_info("pybridge")) { + if (await mock_info("pybridge")) { assert.ok(true); return; } @@ -296,7 +340,7 @@ QUnit.test("http connection without address", async assert => { assert.expect(1); // connection sharing is not implemented in the pybridge - if (await QUnit.mock_info("pybridge")) { + if (await mock_info("pybridge")) { assert.ok(true); return; } @@ -363,7 +407,7 @@ QUnit.test("wrong options", async assert => { "rejects request with both port and unix option"); // This is disallowed in the pybridge, but allowed in the C bridge - if (await QUnit.mock_info("pybridge")) { + if (await mock_info("pybridge")) { assert.rejects( cockpit.http({ unix: "/nonexisting/socket", tls: {} }).get("/"), ex => ex.problem == "protocol-error" && ex.status == undefined, @@ -375,7 +419,7 @@ QUnit.test("wrong options", async assert => { QUnit.test("parallel stress test", async assert => { // This is way too slow under valgrind - if (await QUnit.mock_info("skip_slow_tests")) { + if (await mock_info("skip_slow_tests")) { assert.ok(true, "skipping on python bridge, not implemented"); return; } diff --git a/pkg/base1/test-journal-renderer.js b/pkg/base1/test-journal-renderer.js index e39555ec47d..cd810a6c65d 100644 --- a/pkg/base1/test-journal-renderer.js +++ b/pkg/base1/test-journal-renderer.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import QUnit from "qunit-tests"; diff --git a/pkg/base1/test-promise.js b/pkg/base1/test-promise.ts similarity index 100% rename from pkg/base1/test-promise.js rename to pkg/base1/test-promise.ts diff --git a/pkg/base1/test-spawn-proc.js b/pkg/base1/test-spawn-proc.js index 965c0e53840..f0d5b1ce466 100644 --- a/pkg/base1/test-spawn-proc.js +++ b/pkg/base1/test-spawn-proc.js @@ -173,7 +173,11 @@ QUnit.test("pty", async assert => { }); QUnit.test("pty window size", async assert => { - const proc = cockpit.spawn(['tput', 'lines', 'cols'], { pty: true, window: { rows: 77, cols: 88 } }); + const proc = cockpit.spawn(['tput', 'lines', 'cols'], { + pty: true, + environ: ["TERM=vt100"], + window: { rows: 77, cols: 88 } + }); assert.equal(await proc, '77\r\n88\r\n', 'Correct rows and columns'); }); diff --git a/pkg/base1/test-stream.js b/pkg/base1/test-stream.js index f8d5a157b6e..d36c3548897 100644 --- a/pkg/base1/test-stream.js +++ b/pkg/base1/test-stream.js @@ -1,5 +1,5 @@ import cockpit from "cockpit"; -import QUnit from "qunit-tests"; +import QUnit, { mock_info } from "qunit-tests"; const QS_REQUEST = "HEAD /mock/qs HTTP/1.0\nHOST: localhost\n\n"; @@ -12,7 +12,7 @@ QUnit.test("TCP stream port without a service", async assert => { const done = assert.async(); assert.expect(2); - const is_pybridge = await QUnit.mock_info("pybridge"); + const is_pybridge = await mock_info("pybridge"); const channel = cockpit.channel({ payload: "stream", address: "127.0.0.99", port: 2222 }); diff --git a/pkg/base1/test-timeformat.ts b/pkg/base1/test-timeformat.ts new file mode 100644 index 00000000000..21c98f337a5 --- /dev/null +++ b/pkg/base1/test-timeformat.ts @@ -0,0 +1,127 @@ +import QUnit from 'qunit-tests'; + +import cockpit from "cockpit"; +import * as timeformat from "timeformat"; + +// this is a date for the current time zone +const d1 = new Date("2024-01-02 03:04:05"); +// same date in UTC +const d1_utc = 1704164645000; + +QUnit.test("absolute formatters, English", assert => { + cockpit.language = "en"; + assert.equal(timeformat.time(d1), "3:04 AM"); + assert.equal(timeformat.timeSeconds(d1), "3:04:05 AM"); + assert.equal(timeformat.date(d1), "January 2, 2024"); + assert.equal(timeformat.dateShort(d1), "1/2/2024"); + assert.equal(timeformat.dateTime(d1), "Jan 2, 2024, 3:04 AM"); + assert.equal(timeformat.dateTimeSeconds(d1), "Jan 2, 2024, 3:04:05 AM"); + assert.equal(timeformat.dateTimeNoYear(d1), "Jan 02, 03:04 AM"); + assert.equal(timeformat.weekdayDate(d1), "Tuesday, January 2, 2024"); + + const utc_offset = (new Date()).getTimezoneOffset(); + assert.equal(timeformat.dateTimeUTC(d1_utc + utc_offset), "Jan 2, 2024, 3:04 AM"); + + // all of these work with numbers as time argument + assert.equal(timeformat.dateTimeSeconds(d1.valueOf()), "Jan 2, 2024, 3:04:05 AM"); +}); + +QUnit.test("absolute formatters, German", assert => { + cockpit.language = "de"; + assert.equal(timeformat.time(d1), "03:04"); + assert.equal(timeformat.timeSeconds(d1), "03:04:05"); + assert.equal(timeformat.date(d1), "2. Januar 2024"); + assert.equal(timeformat.dateShort(d1), "2.1.2024"); + assert.equal(timeformat.dateTime(d1), "02.01.2024, 03:04"); + assert.equal(timeformat.dateTimeSeconds(d1), "02.01.2024, 03:04:05"); + assert.equal(timeformat.dateTimeNoYear(d1), "02. Jan., 03:04"); + assert.equal(timeformat.weekdayDate(d1), "Dienstag, 2. Januar 2024"); + + // all of these work with numbers as time argument + assert.equal(timeformat.dateTimeSeconds(d1.valueOf()), "02.01.2024, 03:04:05"); +}); + +QUnit.test("absolute formatters, per-country locale", assert => { + cockpit.language = "en_GB"; + assert.equal(timeformat.timeSeconds(d1), "03:04:05"); + assert.equal(timeformat.date(d1), "2 January 2024"); + assert.equal(timeformat.dateShort(d1), "02/01/2024"); + + cockpit.language = "pt"; + assert.equal(timeformat.date(d1), "2 de janeiro de 2024"); + + cockpit.language = "pt_BR"; + assert.equal(timeformat.date(d1), "2 de janeiro de 2024"); + + cockpit.language = "zh_CN"; + assert.equal(timeformat.weekdayDate(d1), "2024年1月2日星期二"); + + cockpit.language = "zh_TW"; + assert.equal(timeformat.weekdayDate(d1), "2024年1月2日 星期二"); +}); + +const SEC = 1000; +const MIN = 60 * SEC; +const HOUR = 60 * MIN; +const DAY = 24 * HOUR; + +QUnit.test("relative formatter, English", assert => { + const now = Date.now(); + + cockpit.language = "en"; + assert.equal(timeformat.distanceToNow(now + 4.5 * SEC), "in less than a minute"); + assert.equal(timeformat.distanceToNow(now - 4.5 * SEC), "less than a minute ago"); + + assert.equal(timeformat.distanceToNow(now + 4 * MIN - 5 * SEC), "in 4 minutes"); + assert.equal(timeformat.distanceToNow(now - 4 * MIN - 5 * SEC), "4 minutes ago"); + assert.equal(timeformat.distanceToNow(now - 4 * MIN + 5 * SEC), "4 minutes ago"); + + assert.equal(timeformat.distanceToNow(now - 32 * MIN), "32 minutes ago"); + assert.equal(timeformat.distanceToNow(now + 32 * MIN), "in 32 minutes"); + + assert.equal(timeformat.distanceToNow(now + 3 * HOUR + 57 * MIN), "in 4 hours"); + assert.equal(timeformat.distanceToNow(now - 3 * HOUR - 57 * MIN), "4 hours ago"); + + assert.equal(timeformat.distanceToNow(now + 25 * HOUR), "tomorrow"); + assert.equal(timeformat.distanceToNow(now - 25 * HOUR), "yesterday"); + + assert.equal(timeformat.distanceToNow(now + 4 * DAY - 2 * HOUR), "in 4 days"); + assert.equal(timeformat.distanceToNow(now + 4 * DAY + 2 * HOUR), "in 4 days"); + assert.equal(timeformat.distanceToNow(now - 4 * DAY - 2 * HOUR), "4 days ago"); + assert.equal(timeformat.distanceToNow(now - 4 * DAY + 2 * HOUR), "4 days ago"); + + assert.equal(timeformat.distanceToNow(now + 20 * DAY), "in 3 weeks"); + assert.equal(timeformat.distanceToNow(now + 21 * DAY), "in 3 weeks"); + assert.equal(timeformat.distanceToNow(now - 21 * DAY), "3 weeks ago"); + + assert.equal(timeformat.distanceToNow(now + 60 * DAY), "in 2 months"); + assert.equal(timeformat.distanceToNow(now - 60 * DAY), "2 months ago"); + + assert.equal(timeformat.distanceToNow(now + 1200 * DAY), "in 3 years"); + assert.equal(timeformat.distanceToNow(now - 1200 * DAY), "3 years ago"); +}); + +QUnit.test("relative formatter, German", assert => { + const now = Date.now(); + + // no need to be as thorough as with English, just spot check that it's translated + cockpit.language = "de"; + /* TODO: this first needs to be translated in po/de.po + assert.equal(timeformat.distanceToNow(now + 4.5 * SEC), "in weniger als 1 Minute"); + assert.equal(timeformat.distanceToNow(now - 4.5 * SEC), "vor weniger als 1 Minute"); + */ + + assert.equal(timeformat.distanceToNow(now + 25 * HOUR), "morgen"); + assert.equal(timeformat.distanceToNow(now - 25 * HOUR), "gestern"); + assert.equal(timeformat.distanceToNow(now - 4 * DAY), "vor 4 Tagen"); + assert.equal(timeformat.distanceToNow(now + 21 * DAY), "in 3 Wochen"); +}); + +QUnit.test("firstDayOfWeek", assert => { + cockpit.language = "en"; + assert.equal(timeformat.firstDayOfWeek(), 0); + cockpit.language = "de"; + assert.equal(timeformat.firstDayOfWeek(), 1); +}); + +QUnit.start(); diff --git a/pkg/base1/test-types.ts b/pkg/base1/test-types.ts new file mode 100644 index 00000000000..fe2f6d24c92 --- /dev/null +++ b/pkg/base1/test-types.ts @@ -0,0 +1,21 @@ +import cockpit from 'cockpit'; +import QUnit from 'qunit-tests'; + +function as_str(value: string | number): string { + cockpit.assert(typeof value === "string"); + return value; // only (statically) possible because of the assert +} + +QUnit.test("cockpit.assert success", function(assert) { + as_str("abc"); + assert.ok(true); +}); + +QUnit.test("cockpit.assert fail", function(assert) { + assert.throws(function() { + as_str(123); + }); + assert.ok(true); +}); + +QUnit.start(); diff --git a/pkg/base1/test-user.js b/pkg/base1/test-user.js index f38c485cbb2..8b1446dbf07 100644 --- a/pkg/base1/test-user.js +++ b/pkg/base1/test-user.js @@ -22,6 +22,7 @@ QUnit.test("user object", async assert => { assert.equal(typeof user.shell, "string", "user shell"); assert.equal(typeof user.home, "string", "user home"); assert.equal(typeof user.id, "number", "user id"); + assert.equal(typeof user.gid, "number", "group id"); assert.ok(Array.isArray(user.groups), "user groups"); }); diff --git a/pkg/base1/test-utf8.js b/pkg/base1/test-utf8.js deleted file mode 100644 index 6a09d82cfa6..00000000000 --- a/pkg/base1/test-utf8.js +++ /dev/null @@ -1,129 +0,0 @@ -import cockpit from "cockpit"; -import QUnit from "qunit-tests"; - -QUnit.test("utf8 basic", function (assert) { - const str = "Base 64 \u2014 Mozilla Developer Network"; - const expect = [66, 97, 115, 101, 32, 54, 52, 32, 226, 128, 148, 32, 77, - 111, 122, 105, 108, 108, 97, 32, 68, 101, 118, 101, 108, - 111, 112, 101, 114, 32, 78, 101, 116, 119, 111, 114, 107]; - - const encoded = cockpit.utf8_encoder().encode(str); - assert.deepEqual(encoded, expect, "encoded"); - - assert.equal(cockpit.utf8_decoder().decode(encoded), str, "decoded"); -}); - -// Copyright 2014 Joshua Bell. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0.html -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Inspired by: -// http://ecmanaut.blogspot.com/2006/07/encoding-decoding-utf8-in-javascript.html - -// Helpers for test_utf_roundtrip. - -QUnit.test("utf8 round trip", function (assert) { - const MIN_CODEPOINT = 0; - const MAX_CODEPOINT = 0x10FFFF; - const BLOCK_SIZE = 0x1000; - const SKIP_SIZE = 31; - const encoder = cockpit.utf8_encoder(); - const decoder = cockpit.utf8_decoder(); - - function cpname(n) { - if (n + 0 !== n) - return n.toString(); - const w = (n <= 0xFFFF) ? 4 : 6; - return 'U+' + ('000000' + n.toString(16).toUpperCase()).slice(-w); - } - - function genblock(from, len, skip) { - const block = []; - for (let i = 0; i < len; i += skip) { - let cp = from + i; - if (cp >= 0xD800 && cp <= 0xDFFF) - continue; - if (cp < 0x10000) { - block.push(String.fromCharCode(cp)); - continue; - } - cp = cp - 0x10000; - block.push(String.fromCharCode(0xD800 + (cp >> 10))); - block.push(String.fromCharCode(0xDC00 + (cp & 0x3FF))); - } - return block.join(''); - } - - for (let i = MIN_CODEPOINT; i < MAX_CODEPOINT; i += BLOCK_SIZE) { - const block_tag = cpname(i) + " - " + cpname(i + BLOCK_SIZE - 1); - const block = genblock(i, BLOCK_SIZE, SKIP_SIZE); - const encoded = encoder.encode(block); - const decoded = decoder.decode(encoded); - - const length = block.length; - for (let j = 0; j < length; j++) { - if (block[j] != decoded[j]) - assert.deepEqual(block, decoded, "round trip " + block_tag); - } - } - - assert.ok(true, "round trip all code points"); -}); - -QUnit.test("utf8 samples", function (assert) { - // z, cent, CJK water, G-Clef, Private-use character - const sample = "z\xA2\u6C34\uD834\uDD1E\uDBFF\uDFFD"; - const expected = [0x7A, 0xC2, 0xA2, 0xE6, 0xB0, 0xB4, 0xF0, 0x9D, 0x84, 0x9E, 0xF4, 0x8F, 0xBF, 0xBD]; - - const encoded = cockpit.utf8_encoder().encode(sample); - assert.deepEqual(encoded, expected, "encoded"); - - const decoded = cockpit.utf8_decoder().decode(expected); - assert.deepEqual(decoded, sample, "decoded"); -}); - -QUnit.test("utf8 stream", function (assert) { - // z, cent, CJK water, G-Clef, Private-use character - const sample = "z\xA2\u6C34\uD834\uDD1E\uDBFF\uDFFD"; - const expected = [0x7A, 0xC2, 0xA2, 0xE6, 0xB0, 0xB4, 0xF0, 0x9D, 0x84, 0x9E, 0xF4, 0x8F, 0xBF, 0xBD]; - - const decoder = cockpit.utf8_decoder(); - let decoded = ""; - - for (let i = 0; i < expected.length; i += 2) - decoded += decoder.decode(expected.slice(i, i + 2), { stream: true }); - decoded += decoder.decode(); - - assert.deepEqual(decoded, sample, "decoded"); -}); - -QUnit.test("utf8 invalid", function (assert) { - const sample = "Base 64 \ufffd\ufffd Mozilla Developer Network"; - const data = [66, 97, 115, 101, 32, 54, 52, 32, 226, /* 128 */ 148, 32, 77, - 111, 122, 105, 108, 108, 97, 32, 68, 101, 118, 101, 108, - 111, 112, 101, 114, 32, 78, 101, 116, 119, 111, 114, 107]; - - const decoded = cockpit.utf8_decoder().decode(data); - - assert.deepEqual(decoded, sample, "decoded"); -}); - -QUnit.test("utf8 fatal", function (assert) { - const data = [66, 97, 115, 101, 32, 54, 52, 32, 226, /* 128 */ 148, 32, 77, - 111, 122, 105, 108, 108, 97, 32, 68, 101, 118, 101, 108, - 111, 112, 101, 114, 32, 78, 101, 116, 119, 111, 114, 107]; - - assert.throws(function() { cockpit.utf8_decoder(true).decode(data) }, "fatal throws error"); -}); - -QUnit.start(); diff --git a/pkg/kdump/config-client-suse.js b/pkg/kdump/config-client-suse.js index bbd550730df..fb565c603bd 100644 --- a/pkg/kdump/config-client-suse.js +++ b/pkg/kdump/config-client-suse.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import { ConfigFile } from './config-client.js'; diff --git a/pkg/kdump/config-client.js b/pkg/kdump/config-client.js index 066c8697eac..6894de2dda4 100644 --- a/pkg/kdump/config-client.js +++ b/pkg/kdump/config-client.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from 'cockpit'; diff --git a/pkg/kdump/index.html b/pkg/kdump/index.html index e1d43dd9786..2ed087a8242 100644 --- a/pkg/kdump/index.html +++ b/pkg/kdump/index.html @@ -15,7 +15,7 @@ Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License -along with Cockpit; If not, see . +along with Cockpit; If not, see . --> diff --git a/pkg/kdump/kdump-client.js b/pkg/kdump/kdump-client.js index 71d1beeb635..d4c3d989a42 100644 --- a/pkg/kdump/kdump-client.js +++ b/pkg/kdump/kdump-client.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from 'cockpit'; @@ -175,7 +175,7 @@ export class KdumpClient { return target; // copy first target - cockpit.extend(target, Object.values(settings.targets)[0]); + Object.assign(target, Object.values(settings.targets)[0]); target.multipleTargets = Object.keys(settings.targets).length > 1; return target; } diff --git a/pkg/kdump/kdump-view.jsx b/pkg/kdump/kdump-view.jsx index bd4172d6e80..51d2de71dd3 100644 --- a/pkg/kdump/kdump-view.jsx +++ b/pkg/kdump/kdump-view.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import '../lib/patternfly/patternfly-5-cockpit.scss'; @@ -483,20 +483,20 @@ ${enableCrashKernel} { _("Reading...") } ); - } else if (this.props.reservedMemory == 0) { + } else if (this.props.reservedMemory === 0) { // nothing reserved reservedMemory = {_("None")} ; - } else if (this.props.reservedMemory == "error") { - // error while reading - } else { - // assume we have a proper value + } else if (Number.isInteger(this.props.reservedMemory)) { // TODO: hint at using debug_mem_level to identify actual memory required? - reservedMemory = {this.props.reservedMemory}; + reservedMemory = {cockpit.format_bytes(this.props.reservedMemory, { base2: true })}; + } else { + // error while reading + reservedMemory = null; } - const serviceRunning = this.props.kdumpStatus && - this.props.kdumpStatus.installed && - this.props.kdumpStatus.state == "running"; + const serviceRunning = this.props.kdumpStatus?.target && + this.props.kdumpStatus?.installed && + this.props.kdumpStatus?.state === "running"; let testButton; if (serviceRunning) { diff --git a/pkg/kdump/kdump.js b/pkg/kdump/kdump.js index f2eab375c5f..df5fa54aa21 100644 --- a/pkg/kdump/kdump.js +++ b/pkg/kdump/kdump.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import '../lib/patternfly/patternfly-5-cockpit.scss'; @@ -99,11 +99,11 @@ const initStore = function(rootElement) { // https://access.redhat.com/solutions/59432 states limit to be 896MiB and the auto at 768MiB max // default unit is MiB if (value >= 1024 * 1024) - dataStore.kdumpMemory = cockpit.format_bytes(value, 1024); + dataStore.kdumpMemory = value; else if (value >= 1024) - dataStore.kdumpMemory = cockpit.format_bytes(value * 1024, 1024); + dataStore.kdumpMemory = value * 1024; else - dataStore.kdumpMemory = cockpit.format_bytes(value * 1024 * 1024, 1024); + dataStore.kdumpMemory = value * 1024 * 1024; } else { dataStore.kdumpMemory = content.trim(); } diff --git a/pkg/kdump/kdump.scss b/pkg/kdump/kdump.scss index 260c027fac8..077cb55f98f 100644 --- a/pkg/kdump/kdump.scss +++ b/pkg/kdump/kdump.scss @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ @use "page"; diff --git a/pkg/kdump/manifest.json b/pkg/kdump/manifest.json index cdc3b626f37..323d6ebc944 100644 --- a/pkg/kdump/manifest.json +++ b/pkg/kdump/manifest.json @@ -8,7 +8,7 @@ "docs": [ { "label": "Configuring kdump", - "url": "https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/managing_systems_using_the_rhel_8_web_console/configuring-kdump-in-the-web-console_system-management-using-the-rhel-8-web-console" + "url": "https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/9/html/managing_monitoring_and_updating_the_kernel/configuring-kdump-in-the-web-console_managing-monitoring-and-updating-the-kernel" } ], "keywords": [ diff --git a/pkg/kdump/test-config-client.js b/pkg/kdump/test-config-client.js index 7ff9a187865..e61a5fb228e 100644 --- a/pkg/kdump/test-config-client.js +++ b/pkg/kdump/test-config-client.js @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import QUnit from "qunit-tests"; @@ -44,37 +44,52 @@ const changedConfig = [ "" ].join("\n"); -QUnit.test("config_update", function (assert) { - const done = assert.async(); - assert.expect(6); - const dataWasChanged = new Promise(resolve => { this.dataWasChangedResolve = resolve }); - let config; - const configChanged = (event, settings) => { - assert.equal(settings._internal.foo.value, "moo", "value changed correctly"); - assert.equal("key" in settings._internal, false, "setting with comment deleted correctly"); - assert.equal("will" in settings._internal, false, "setting without comment deleted correctly"); - assert.equal(settings._internal.hooray.value, "value", "value added correctly"); - assert.equal(config._rawContent, changedConfig, "raw text for changed config is correct"); - this.dataWasChangedResolve(); - }; +QUnit.module("kdump", hooks => { + let filename = ""; - const filename = "cockpit_config_read"; - const configFile = cockpit.file(filename); - configFile - .replace(basicConfig) - .then(() => { - assert.equal(configFile.path, filename, "file has correct path"); - config = new kdump.ConfigFile(filename); - config.wait().then(() => { - config.settings._internal.foo.value = "moo"; - delete config.settings._internal.key; - delete config.settings._internal.will; - config.settings._internal.hooray = { value: "value" }; - config.addEventListener('kdumpConfigChanged', configChanged); - config.write(config.settings) - .then(() => dataWasChanged.then(done)); + hooks.before(async () => { + filename = await cockpit.spawn(["/usr/bin/mktemp", "--suffix", "kdump-test"]); + filename = filename.trim(); + }); + + hooks.after(() => cockpit.spawn(["rm", "-f", filename])); + + QUnit.test("config_update", function(assert) { + const done = assert.async(); + assert.expect(6); + const dataWasChanged = new Promise(resolve => { this.dataWasChangedResolve = resolve }); + let config; + const configChanged = (event, settings) => { + assert.equal(settings._internal.foo.value, "moo", "value changed correctly"); + assert.equal("key" in settings._internal, false, "setting with comment deleted correctly"); + assert.equal("will" in settings._internal, false, "setting without comment deleted correctly"); + assert.equal(settings._internal.hooray.value, "value", "value added correctly"); + assert.equal(config._rawContent, changedConfig, "raw text for changed config is correct"); + this.dataWasChangedResolve(); + }; + + const configFile = cockpit.file(filename); + configFile + .replace(basicConfig) + .then(() => { + assert.equal(configFile.path, filename, "file has correct path"); + config = new kdump.ConfigFile(filename); + config.wait().then(() => { + config.settings._internal.foo.value = "moo"; + delete config.settings._internal.key; + delete config.settings._internal.will; + config.settings._internal.hooray = { value: "value" }; + config.addEventListener('kdumpConfigChanged', configChanged); + config.write(config.settings) + .then(() => { + // Close watch channel + config.removeEventListener('kdumpConfigChanged', configChanged); + config.close(); + dataWasChanged.then(done); + }); + }); }); - }); + }); }); window.setTimeout(function() { diff --git a/pkg/lib/README b/pkg/lib/README index bb0c6ef41c9..7677e539fd0 100644 --- a/pkg/lib/README +++ b/pkg/lib/README @@ -1,5 +1,5 @@ # Cockpit shared components -This directory contains React components, JavaScript modules, webpack/esbuild +This directory contains React components, JavaScript modules, esbuild plugins, and build tools which are shared between all Cockpit projects. External projects usually clone this directory into their own source tree. diff --git a/pkg/lib/cockpit-components-context-menu.jsx b/pkg/lib/cockpit-components-context-menu.tsx similarity index 55% rename from pkg/lib/cockpit-components-context-menu.jsx rename to pkg/lib/cockpit-components-context-menu.tsx index 55345a36a2e..dd728168b46 100644 --- a/pkg/lib/cockpit-components-context-menu.jsx +++ b/pkg/lib/cockpit-components-context-menu.tsx @@ -14,50 +14,44 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ -import cockpit from "cockpit"; import React from "react"; -import PropTypes from "prop-types"; -import { Menu, MenuContent, MenuList, MenuItem } from "@patternfly/react-core/dist/esm/components/Menu"; +import { Menu, MenuContent } from "@patternfly/react-core/dist/esm/components/Menu"; import "context-menu.scss"; -const _ = cockpit.gettext; - /* - * A context menu component that contains copy and paste fields. + * A context menu component * - * It requires three properties: - * - getText, method which is called when copy is clicked - * - setText, method which is called when paste is clicked - * - parentId, area in which it listens to left button clicks + * It has two properties: + * - parentId (required), area in which it listens to left button click + * - children (optional), a MenuList to be rendered in the context menu */ -export const ContextMenu = ({ parentId, getText, setText }) => { +export const ContextMenu = ({ parentId, children } : { + parentId: string, + children?: React.ReactNode, +}) => { const [visible, setVisible] = React.useState(false); - const [event, setEvent] = React.useState(null); - const root = React.useRef(null); + const [event, setEvent] = React.useState(null); + const root = React.useRef(null); React.useEffect(() => { - const _handleContextMenu = (event) => { + const _handleContextMenu = (event: MouseEvent) => { event.preventDefault(); setVisible(true); setEvent(event); }; - const _handleClick = (event) => { - if (event && event.button === 0) { - const wasOutside = !(event.target.contains === root.current); - - if (wasOutside) - setVisible(false); - } + const _handleClick = (event: MouseEvent) => { + if (event.button === 0) + setVisible(false); }; - const parent = document.getElementById(parentId); + const parent = document.getElementById(parentId)!; parent.addEventListener('contextmenu', _handleContextMenu); document.addEventListener('click', _handleClick); @@ -68,7 +62,7 @@ export const ContextMenu = ({ parentId, getText, setText }) => { }, [parentId]); React.useEffect(() => { - if (!event) + if (!event || !root.current) return; const clickX = event.clientX; @@ -103,22 +97,7 @@ export const ContextMenu = ({ parentId, getText, setText }) => { return visible &&

- - -
{ _("Copy") }
-
{ _("Ctrl+Insert") }
-
- -
{ _("Paste") }
-
{ _("Shift+Insert") }
-
-
+ {children}
; }; - -ContextMenu.propTypes = { - getText: PropTypes.func.isRequired, - setText: PropTypes.func.isRequired, - parentId: PropTypes.string.isRequired -}; diff --git a/pkg/lib/cockpit-components-dialog.jsx b/pkg/lib/cockpit-components-dialog.jsx index 041a5694195..90e0371ecf5 100644 --- a/pkg/lib/cockpit-components-dialog.jsx +++ b/pkg/lib/cockpit-components-dialog.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; @@ -213,11 +213,20 @@ DialogFooter.propTypes = { */ class Dialog extends React.Component { componentDidMount() { + // For the scenario that cockpit-storage is used inside anaconda Web UI + // We need to know if there is an open dialog in order to create the backdrop effect + // on the parent window + window.sessionStorage.setItem("cockpit_has_modal", true); + // if we used a button to open this, make sure it's not focused anymore if (document.activeElement) document.activeElement.blur(); } + componentWillUnmount() { + window.sessionStorage.setItem("cockpit_has_modal", false); + } + render() { let help = null; let footer = null; diff --git a/pkg/lib/cockpit-components-dropdown.tsx b/pkg/lib/cockpit-components-dropdown.tsx new file mode 100644 index 00000000000..4f12a3fbba2 --- /dev/null +++ b/pkg/lib/cockpit-components-dropdown.tsx @@ -0,0 +1,83 @@ +/* + * This file is part of Cockpit. + * + * Copyright (C) 2024 Red Hat, Inc. + * + * Cockpit is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * Cockpit is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Cockpit; If not, see . + */ + +import React, { useState } from 'react'; +import PropTypes from "prop-types"; + +import { MenuToggle } from "@patternfly/react-core/dist/esm/components/MenuToggle"; +import { Dropdown, DropdownList, DropdownPopperProps } from "@patternfly/react-core/dist/esm/components/Dropdown"; + +import { EllipsisVIcon } from '@patternfly/react-icons'; + +/* + * A dropdown with a Kebab button, commonly used in Cockpit pages provided as + * component so not all pages have to re-invent the wheel. + * + * isOpen/setIsOpen are optional -- you need to handle the state externally if you render the KebabDropdown in an + * "unstable" environment such as a dynamic list. When not given, the dropdown will manage its own state. + * + * This component expects a list of (non-deprecated!) DropdownItem's, if you + * require a separator between DropdownItem's use PatternFly's Divivder + * component. + */ +export const KebabDropdown = ({ dropdownItems, position = "end", isDisabled = false, toggleButtonId, isOpen, setIsOpen } : { + dropdownItems: React.ReactNode, + position?: DropdownPopperProps['position'], + isDisabled?: boolean, + toggleButtonId?: string; + isOpen?: boolean, setIsOpen?: React.Dispatch>, +}) => { + const [isKebabOpenInternal, setKebabOpenInternal] = useState(false); + const isKebabOpen = isOpen ?? isKebabOpenInternal; + const setKebabOpen = setIsOpen ?? setKebabOpenInternal; + + return ( + setKebabOpen(isOpen)} + onSelect={() => setKebabOpen(false)} + toggle={(toggleRef) => ( + setKebabOpen(!isKebabOpen)} + isExpanded={isKebabOpen} + > + + + )} + isOpen={isKebabOpen} + popperProps={{ position }} + > + + {dropdownItems} + + + ); +}; + +KebabDropdown.propTypes = { + dropdownItems: PropTypes.array.isRequired, + isDisabled: PropTypes.bool, + toggleButtonId: PropTypes.string, + position: PropTypes.oneOf(['right', 'left', 'center', 'start', 'end']), + isOpen: PropTypes.bool, + setIsOpen: PropTypes.func, +}; diff --git a/pkg/lib/cockpit-components-dynamic-list.jsx b/pkg/lib/cockpit-components-dynamic-list.jsx index 46b483b3148..4cad793d666 100644 --- a/pkg/lib/cockpit-components-dynamic-list.jsx +++ b/pkg/lib/cockpit-components-dynamic-list.jsx @@ -25,7 +25,7 @@ import './cockpit-components-dynamic-list.scss'; * [ * { name: "Name must not be empty }, // first row * { }, // second row - * { name: "Name cannot containt number", email: "Email must contain '@'" } // third row + * { name: "Name cannot contain a number", email: "Email must contain '@'" } // third row * ] */ export class DynamicListForm extends React.Component { @@ -43,7 +43,7 @@ export class DynamicListForm extends React.Component { removeItem(idx) { const validationFailedDelta = this.props.validationFailed ? [...this.props.validationFailed] : []; // We also need to remove any error messages which the item (row) may have contained - validationFailedDelta.splice(idx, 1); + delete validationFailedDelta[idx]; this.props.onValidationChange?.(validationFailedDelta); this.setState(state => { @@ -57,7 +57,7 @@ export class DynamicListForm extends React.Component { addItem() { this.setState(state => { - return { list: [...state.list, {key: this.keyCounter++, ...this.props.default}] }; + return { list: [...state.list, { key: this.keyCounter++, ...this.props.default }] }; }, () => this.props.onChange(this.state.list)); } diff --git a/pkg/lib/cockpit-components-empty-state.jsx b/pkg/lib/cockpit-components-empty-state.jsx index 0a78d0530f6..82d7b97eb9a 100644 --- a/pkg/lib/cockpit-components-empty-state.jsx +++ b/pkg/lib/cockpit-components-empty-state.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import React from "react"; @@ -24,7 +24,7 @@ import { EmptyStateActions, EmptyState, EmptyStateBody, EmptyStateFooter, EmptyS import { Spinner } from "@patternfly/react-core/dist/esm/components/Spinner/index.js"; import "./cockpit-components-empty-state.css"; -export const EmptyStatePanel = ({ title, paragraph, loading, icon, action, isActionInProgress, onAction, secondary, headingLevel, titleSize }) => { +export const EmptyStatePanel = ({ title, paragraph, loading = false, icon, action, isActionInProgress = false, onAction, actionVariant = "primary", secondary, headingLevel = "h1" }) => { const slimType = title || paragraph ? "" : "slim"; return ( @@ -34,7 +34,7 @@ export const EmptyStatePanel = ({ title, paragraph, loading, icon, action, isAct {(action || secondary) && { action && (typeof action == "string" - ? @@ -47,17 +47,12 @@ export const EmptyStatePanel = ({ title, paragraph, loading, icon, action, isAct EmptyStatePanel.propTypes = { loading: PropTypes.bool, - icon: PropTypes.oneOfType([PropTypes.string, PropTypes.func]), + icon: PropTypes.oneOfType([PropTypes.string, PropTypes.object, PropTypes.func]), title: PropTypes.string, paragraph: PropTypes.node, action: PropTypes.node, + actionVariant: PropTypes.string, isActionInProgress: PropTypes.bool, onAction: PropTypes.func, secondary: PropTypes.node, }; - -EmptyStatePanel.defaultProps = { - headingLevel: "h1", - titleSize: "lg", - isActionInProgress: false, -}; diff --git a/pkg/lib/cockpit-components-file-autocomplete.jsx b/pkg/lib/cockpit-components-file-autocomplete.jsx index afe4d15832d..33db69edc7b 100644 --- a/pkg/lib/cockpit-components-file-autocomplete.jsx +++ b/pkg/lib/cockpit-components-file-autocomplete.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; @@ -23,6 +23,8 @@ import { Select, SelectOption } from "@patternfly/react-core/dist/esm/deprecated import PropTypes from "prop-types"; import { debounce } from 'throttle-debounce'; +import { fsinfo } from "cockpit/fsinfo"; + const _ = cockpit.gettext; export class FileAutoComplete extends React.Component { @@ -58,7 +60,7 @@ export class FileAutoComplete extends React.Component { path = value.slice(0, value.length - 1); const match = this.state.displayFiles - .find(entry => (entry.type == 'directory' && entry.path == path + '/') || (entry.type == 'file' && entry.path == path)); + .find(entry => (entry.type == 'dir' && entry.path == path + '/') || (entry.type == 'reg' && entry.path == path)); if (match) { // If match file path is a prefix of another file, do not update current directory, @@ -67,7 +69,7 @@ export class FileAutoComplete extends React.Component { const isPrefix = this.state.displayFiles.filter(entry => entry.path.startsWith(value)).length > 1; // If the inserted string corresponds to a directory listed in the results // update the current directory and refetch results - if (match.type == 'directory' && !isPrefix) + if (match.type == 'dir' && !isPrefix) cb(match.path); else this.setState({ value: match.path }); @@ -91,7 +93,7 @@ export class FileAutoComplete extends React.Component { onCreateOption(newValue) { this.setState(prevState => ({ - displayFiles: [...prevState.displayFiles, { type: "file", path: newValue }] + displayFiles: [...prevState.displayFiles, { type: "reg", path: newValue }] })); } @@ -99,29 +101,17 @@ export class FileAutoComplete extends React.Component { if (this.state.directory == path) return; - const channel = cockpit.channel({ - payload: "fslist1", - path, - superuser: this.props.superuser, - watch: false, - }); - const results = []; - - channel.addEventListener("ready", () => { - this.finishUpdate(results, null, path); - }); - - channel.addEventListener("close", (ev, data) => { - this.finishUpdate(results, data.message, path); - }); - - channel.addEventListener("message", (ev, data) => { - const item = JSON.parse(data); - if (item && item.path && item.event == 'present') { - item.path = item.path + (item.type == 'directory' ? '/' : ''); - results.push(item); - } - }); + fsinfo(path, ['type', 'entries'], { superuser: this.props.superuser }) + .then(info => { + const results = []; + for (const name in info.entries ?? {}) { + const type = info.entries[name].type; + if (!this.props.onlyDirectories || type == 'dir') + results.push({ type, path: name + (type == 'dir' ? '/' : '') }); + } + this.finishUpdate(results, null, path); + }) + .catch(error => this.finishUpdate([], error.message, path)); } finishUpdate(results, error, directory) { @@ -136,7 +126,7 @@ export class FileAutoComplete extends React.Component { if (directory) { listItems.unshift({ - type: "directory", + type: "dir", path: directory }); } @@ -203,10 +193,12 @@ FileAutoComplete.propTypes = { placeholder: PropTypes.string, superuser: PropTypes.string, isOptionCreatable: PropTypes.bool, + onlyDirectories: PropTypes.bool, onChange: PropTypes.func, value: PropTypes.string, }; FileAutoComplete.defaultProps = { isOptionCreatable: false, + onlyDirectories: false, onChange: () => '', }; diff --git a/pkg/lib/cockpit-components-firewalld-request.jsx b/pkg/lib/cockpit-components-firewalld-request.jsx index 517b8bb9dbe..41530b55d23 100644 --- a/pkg/lib/cockpit-components-firewalld-request.jsx +++ b/pkg/lib/cockpit-components-firewalld-request.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import React, { useState } from 'react'; import { Alert, AlertActionCloseButton, AlertActionLink } from "@patternfly/react-core/dist/esm/components/Alert/index.js"; diff --git a/pkg/lib/cockpit-components-form-helper.jsx b/pkg/lib/cockpit-components-form-helper.tsx similarity index 78% rename from pkg/lib/cockpit-components-form-helper.jsx rename to pkg/lib/cockpit-components-form-helper.tsx index 86e7f118658..a5a286bb46a 100644 --- a/pkg/lib/cockpit-components-form-helper.jsx +++ b/pkg/lib/cockpit-components-form-helper.tsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import React from "react"; @@ -22,7 +22,15 @@ import React from "react"; import { FormHelperText } from "@patternfly/react-core/dist/esm/components/Form/index.js"; import { HelperText, HelperTextItem } from "@patternfly/react-core/dist/esm/components/HelperText"; -export const FormHelper = ({ helperText, helperTextInvalid, variant, icon, fieldId }) => { +export const FormHelper = ({ helperText, helperTextInvalid, variant, icon, fieldId } : + { + helperText?: string | null | undefined, + helperTextInvalid?: string | null | undefined, + variant?: "error" | "default" | "indeterminate" | "warning" | "success", + icon?: string, + fieldId?: string, + } +) => { const formHelperVariant = variant || (helperTextInvalid ? "error" : "default"); if (!(helperText || helperTextInvalid)) @@ -32,7 +40,7 @@ export const FormHelper = ({ helperText, helperTextInvalid, variant, icon, field {formHelperVariant === "error" ? helperTextInvalid : helperText} diff --git a/pkg/lib/cockpit-components-inline-notification.jsx b/pkg/lib/cockpit-components-inline-notification.jsx deleted file mode 100644 index 94985ce22ef..00000000000 --- a/pkg/lib/cockpit-components-inline-notification.jsx +++ /dev/null @@ -1,96 +0,0 @@ -/* - * This file is part of Cockpit. - * - * Copyright (C) 2016 Red Hat, Inc. - * - * Cockpit is free software; you can redistribute it and/or modify it - * under the terms of the GNU Lesser General Public License as published by - * the Free Software Foundation; either version 2.1 of the License, or - * (at your option) any later version. - * - * Cockpit is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . - */ -import React from 'react'; -import PropTypes from 'prop-types'; -import cockpit from 'cockpit'; - -import { Alert, AlertActionCloseButton } from "@patternfly/react-core/dist/esm/components/Alert/index.js"; -import { Button } from "@patternfly/react-core/dist/esm/components/Button/index.js"; -import './cockpit-components-inline-notification.css'; - -const _ = cockpit.gettext; - -function mouseClick(fun) { - return function (event) { - if (!event || event.button !== 0) - return; - event.preventDefault(); - return fun(event); - }; -} - -export class InlineNotification extends React.Component { - constructor(props) { - super(props); - this.state = { - isDetail: false, - }; - - this.toggleDetail = this.toggleDetail.bind(this); - } - - toggleDetail () { - this.setState({ - isDetail: !this.state.isDetail, - }); - } - - render () { - const { text, detail, type, onDismiss } = this.props; - - let detailButton = null; - if (detail) { - let detailButtonText = _("show more"); - if (this.state.isDetail) { - detailButtonText = _("show less"); - } - - detailButton = (); - } - const extraProps = {}; - if (onDismiss) - extraProps.actionClose = ; - - return ( - {text} {detailButton} } {...extraProps}> - {this.state.isDetail && (

{detail}

)} -
- ); - } -} - -InlineNotification.propTypes = { - onDismiss: PropTypes.func, - isInline: PropTypes.bool, - text: PropTypes.string.isRequired, // main information to render - detail: PropTypes.string, // optional, more detailed information. If empty, the more/less button is not rendered. - type: PropTypes.string, -}; - -export const ModalError = ({ dialogError, dialogErrorDetail, id, isExpandable }) => { - return ( - - { typeof dialogErrorDetail === 'string' ?

{dialogErrorDetail}

: dialogErrorDetail } -
- ); -}; diff --git a/pkg/lib/cockpit-components-inline-notification.tsx b/pkg/lib/cockpit-components-inline-notification.tsx new file mode 100644 index 00000000000..1503f52bdba --- /dev/null +++ b/pkg/lib/cockpit-components-inline-notification.tsx @@ -0,0 +1,83 @@ +/* + * This file is part of Cockpit. + * + * Copyright (C) 2016 Red Hat, Inc. + * + * Cockpit is free software; you can redistribute it and/or modify it + * under the terms of the GNU Lesser General Public License as published by + * the Free Software Foundation; either version 2.1 of the License, or + * (at your option) any later version. + * + * Cockpit is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with Cockpit; If not, see . + */ +import React, { useState } from 'react'; +import PropTypes from 'prop-types'; +import cockpit from 'cockpit'; + +import { Alert, AlertActionCloseButton, AlertProps } from "@patternfly/react-core/dist/esm/components/Alert/index.js"; +import { Button } from "@patternfly/react-core/dist/esm/components/Button/index.js"; +import './cockpit-components-inline-notification.css'; + +const _ = cockpit.gettext; + +export const InlineNotification = ({ text, detail, type = "danger", onDismiss, isInline = true, isLiveRegion = false }: { + text: string; + detail?: string; + type?: AlertProps["variant"]; + onDismiss?: (ev?: Event) => void; + isInline?: boolean; + isLiveRegion?: boolean; +}) => { + const [isDetail, setIsDetail] = useState(false); + + const detailButton = (detail && + + ); + + return ( + {text} {detailButton} } + { ...onDismiss && { actionClose: } }> + {isDetail && (

{detail}

)} +
+ ); +}; + +InlineNotification.propTypes = { + onDismiss: PropTypes.func, + isInline: PropTypes.bool, + text: PropTypes.string.isRequired, // main information to render + detail: PropTypes.string, // optional, more detailed information. If empty, the more/less button is not rendered. + type: PropTypes.string, + isLiveRegion: PropTypes.bool, +}; + +export const ModalError = ({ dialogError, dialogErrorDetail, id, isExpandable }: { + dialogError: string, + dialogErrorDetail?: string, + id?: string, + isExpandable?: boolean, +}) => { + return ( + + { typeof dialogErrorDetail === 'string' ?

{dialogErrorDetail}

: dialogErrorDetail } +
+ ); +}; diff --git a/pkg/lib/cockpit-components-install-dialog.jsx b/pkg/lib/cockpit-components-install-dialog.jsx index 0d933ae5f52..921247d44e4 100644 --- a/pkg/lib/cockpit-components-install-dialog.jsx +++ b/pkg/lib/cockpit-components-install-dialog.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; diff --git a/pkg/lib/cockpit-components-listing-panel.jsx b/pkg/lib/cockpit-components-listing-panel.jsx index 3f2b784b815..4e0a836884f 100644 --- a/pkg/lib/cockpit-components-listing-panel.jsx +++ b/pkg/lib/cockpit-components-listing-panel.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import PropTypes from 'prop-types'; diff --git a/pkg/lib/cockpit-components-logs-panel.jsx b/pkg/lib/cockpit-components-logs-panel.jsx index 9887252bd5d..4fe66ba50a1 100644 --- a/pkg/lib/cockpit-components-logs-panel.jsx +++ b/pkg/lib/cockpit-components-logs-panel.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; diff --git a/pkg/lib/cockpit-components-modifications.jsx b/pkg/lib/cockpit-components-modifications.jsx index 85b67c35a76..4d472fd13cb 100644 --- a/pkg/lib/cockpit-components-modifications.jsx +++ b/pkg/lib/cockpit-components-modifications.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import PropTypes from 'prop-types'; diff --git a/pkg/lib/cockpit-components-password.jsx b/pkg/lib/cockpit-components-password.jsx index d0c8cc722a1..65d41f5d6e4 100644 --- a/pkg/lib/cockpit-components-password.jsx +++ b/pkg/lib/cockpit-components-password.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from 'cockpit'; import React, { useState } from 'react'; @@ -27,11 +27,11 @@ import { Popover } from "@patternfly/react-core/dist/esm/components/Popover/inde import { Progress, ProgressMeasureLocation, ProgressSize } from "@patternfly/react-core/dist/esm/components/Progress/index.js"; import { TextInput } from "@patternfly/react-core/dist/esm/components/TextInput/index.js"; import { EyeIcon, EyeSlashIcon, HelpIcon } from '@patternfly/react-icons'; +import { Flex, FlexItem } from "@patternfly/react-core/dist/esm/layouts/Flex/index.js"; import { FormHelper } from "cockpit-components-form-helper"; import './cockpit-components-password.scss'; -import { Flex, FlexItem } from '@patternfly/react-core'; const _ = cockpit.gettext; @@ -123,7 +123,6 @@ export const PasswordFormFields = ({ } - validated={error_password ? "warning" : "default"} id={idPrefix + "-pw1-group"} fieldId={idPrefix + "-pw1"}> diff --git a/pkg/lib/cockpit-components-plot.jsx b/pkg/lib/cockpit-components-plot.jsx index 527f513ec38..0849f737ae5 100644 --- a/pkg/lib/cockpit-components-plot.jsx +++ b/pkg/lib/cockpit-components-plot.jsx @@ -14,7 +14,7 @@ * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License - * along with Cockpit; If not, see . + * along with Cockpit; If not, see . */ import cockpit from "cockpit"; @@ -23,7 +23,9 @@ import React, { useState, useRef, useLayoutEffect } from 'react'; import { useEvent } from "hooks.js"; import { Button } from "@patternfly/react-core/dist/esm/components/Button/index.js"; -import { Dropdown, DropdownItem, DropdownSeparator, DropdownToggle } from '@patternfly/react-core/dist/esm/deprecated/components/Dropdown/index.js'; +import { Dropdown, DropdownItem, DropdownList } from '@patternfly/react-core/dist/esm/components/Dropdown/index.js'; +import { Divider } from '@patternfly/react-core/dist/esm/components/Divider/index.js'; +import { MenuToggle } from '@patternfly/react-core/dist/esm/components/MenuToggle/index.js'; import { AngleLeftIcon, AngleRightIcon, SearchMinusIcon } from '@patternfly/react-icons'; @@ -254,22 +256,32 @@ export const ZoomControls = ({ plot_state }) => { if (!zoom_state) return null; + const dropdownItems = [ + { zoom_state.goto_now(); setIsOpen(false) }}> + {_("Go to now")} + , + , + range_item(5 * 60, _("5 minutes")), + range_item(60 * 60, _("1 hour")), + range_item(6 * 60 * 60, _("6 hours")), + range_item(24 * 60 * 60, _("1 day")), + range_item(7 * 24 * 60 * 60, _("1 week")) + ]; + return ( -
+
setIsOpen(isOpen)}>{format_range(zoom_state.x_range)}} - dropdownItems={[ - { zoom_state.goto_now(); setIsOpen(false) }}> - {_("Go to now")} - , - , - range_item(5 * 60, _("5 minutes")), - range_item(60 * 60, _("1 hour")), - range_item(6 * 60 * 60, _("6 hours")), - range_item(24 * 60 * 60, _("1 day")), - range_item(7 * 24 * 60 * 60, _("1 week")) - ]} /> + toggle={(toggleRef) => ( + setIsOpen(!isOpen)} isExpanded={isOpen}> + {format_range(zoom_state.x_range)} + + )} + > + + {dropdownItems} + + { "\n" }