diff --git a/.aegir.js b/.aegir.js new file mode 100644 index 00000000..5e4c3224 --- /dev/null +++ b/.aegir.js @@ -0,0 +1,22 @@ +'use strict' + +const createServer = require('./src').createServer + +const server = createServer() +module.exports = { + karma: { + files: [{ + pattern: 'test/fixtures/**/*', + watched: false, + served: true, + included: false + }], + singleRun: true + }, + hooks: { + browser: { + pre: server.start.bind(server), + post: server.stop.bind(server) + } + } +} diff --git a/.appveyor.yml b/.appveyor.yml index b3d47701..a2b02e3e 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -1,23 +1,28 @@ +version: "{build}" + environment: matrix: - nodejs_version: "6" - nodejs_version: "8" -# cache: -# - node_modules - -platform: - - x64 +matrix: + fast_finish: true install: - - ps: Install-Product node $env:nodejs_version $env:platform + # Install Node.js + - ps: Install-Product node $env:nodejs_version + + # Upgrade npm + - npm install -g npm + + # Output our current versions for debugging - node --version - npm --version + + # Install our package dependencies - npm install test_script: - - npm test + - npm run test build: off - -version: "{build}" diff --git a/.gitignore b/.gitignore index 4cdb1c97..011edeae 100644 --- a/.gitignore +++ b/.gitignore @@ -36,3 +36,5 @@ node_modules dist docs + +.idea diff --git a/README.md b/README.md index d8859fa5..a1987b16 100644 --- a/README.md +++ b/README.md @@ -10,12 +10,33 @@ [![Appveyor CI](https://ci.appveyor.com/api/projects/status/4p9r12ch0jtthnha?svg=true)](https://ci.appveyor.com/project/wubalubadubdub/js-ipfsd-ctl-a9ywu) [![Dependency Status](https://david-dm.org/ipfs/js-ipfsd-ctl.svg?style=flat-square)](https://david-dm.org/ipfs/js-ipfsd-ctl) [![js-standard-style](https://img.shields.io/badge/code%20style-standard-brightgreen.svg?style=flat-square)](https://github.com/feross/standard) -> Control an ipfs node daemon using Node.js +> Control an IPFS daemon using JavaScript in Node.js or in the Browser. + +``` + +-----+ + | H | + | T | + +-----------------------------+ | T | + | Node.js | +-----------------------+ | P | +-----------------------------+ + | | | | | | | BROWSER | + | +-----------------------+ | | IPFS Daemon | | S | | | + | | Local Daemon Ctrl | | | | | E | | +----------------------+ | + | | +------- -------- R -----|---- Remote Daemon Ctrl | | + | +-----------------------+ | +-----|-----------|-----+ | V | | | | | + | | | | | E | | +----------------------+ | + | +-----------------------+ | | | | R | | | + | | IPFS API | | | | +-----+ | +----------------------+ | + | | -------------+ | | | IPFS API | | + | +-----------------------+ | +-----------------------|---- | | + | | | +----------------------+ | + +-----------------------------+ +-----------------------------+ +``` ## Table of Contents - [Install](#install) - [Usage](#usage) +- [API](#api) - [Contribute](#contribute) - [License](#license) @@ -30,35 +51,210 @@ npm install --save ipfsd-ctl IPFS daemons are already easy to start and stop, but this module is here to do it from JavaScript itself. +### Spawn an IPFS daemon from Node.js + ```js // Start a disposable node, and get access to the api -// print the node id, and kill the temporary daemon - -// IPFS_PATH will point to /tmp/ipfs_***** and will be -// cleaned up when the process exits. +// print the node id, and stop the temporary daemon -var ipfsd = require('ipfsd-ctl') +const DaemonFactory = require('ipfsd-ctl') +const df = DaemonFactory.create() -ipfsd.disposableApi(function (err, ipfs) { - ipfs.id(function (err, id) { +df.spawn(function (err, ipfsd) { + if (err) { throw err } + + ipfsd.api.id(function (err, id) { + if (err) { throw err } + console.log(id) - process.exit() + ipfsd.stop() }) }) ``` -If you need want to use an existing ipfs installation you can set `$IPFS_EXEC=/path/to/ipfs` to ensure it uses that. +### Spawn an IPFS daemon from the Browser using the provided remote endpoint + +```js +// Start a remote disposable node, and get access to the api +// print the node id, and stop the temporary daemon + +const DaemonFactory = require('ipfsd-ctl') + +const port = 9999 +const server = DaemonFactory.createServer(port) +const df = DaemonFactory.create({ remote: true, port: port }) + +server.start((err) => { + if (err) { throw err } + + df.spawn((err, ipfsd) => { + if (err) { throw err } + + ipfsd.api.id(function (err, id) { + if (err) { throw err } + + console.log(id) + ipfsd.stop(server.stop) + }) + }) +}) +``` + +## Disposable vs non Disposable nodes + +`ipfsd-ctl` can create two types of node controllers, `disposable` and `non-disposable`. A disposable node will be created on a temporary repo which will be optionally initialized and started (the default), as well cleaned up on process exit. A non-disposable node on the other hand, requires the user to initialize and start the node, as well as stop and cleanup after wards. Additionally, a non-disposable will allow you to pass a custom repo using the `repoPath` option, if the `repoPath` is not defined, it will use the default repo for the node type (`$HOME/.ipfs` or `$HOME/.jsipfs`). The `repoPath` parameter is ignored for disposable nodes, as there is a risk of deleting a live repo. + +## IPFS executables + +`ipfsd-ctl` no longer installs go-ipfs nor js-ipfs dependencies, instead it expects them to be provided by the parent project. In order to be able to use both go and js daemons, please make sure that your project includes these two npm packages as dependencies. + +- `ipfs` - the js-ipfs implementation +- `go-ipfs-dep` - the packaged go-ipfs implementation + +## API + +### Daemon Factory Class + +#### `DaemonFactory` - `const df = DaemonFactory.create([options])` + +`DaemonFactory.create([options])` returns an object that will expose the `df.spawn` method + +- `options` - an optional object with the following properties + - `remote` bool - indicates if the factory should spawn local or remote nodes. By default, local nodes are spawned in Node.js and remote nodes are spawned in Browser environments. + - `port` number - the port number to use for the remote factory. It should match the port on which `DaemonFactory.server` was started. Defaults to 9999. + - `type` - the daemon type to create with this factory. See the section bellow for the supported types + - `exec` - path to the desired IPFS executable to spawn, otherwise `ipfsd-ctl` will try to locate the correct one based on the `type`. In the case of `proc` type, exec is required and expects an IPFS coderef. + +`ipfsd-ctl` allows spawning different IPFS implementations, such as: + +- **`go`** - calling `DaemonFactory.create({type: 'go'})` will spawn a `go-ipfs` daemon. +- **`js`** - calling `DaemonFactory.create({type: 'js'})` will spawn a `js-ipfs` daemon. +- **`proc`** - calling `DaemonFactory.create({type: 'proc', exec: require('ipfs') })` will spawn an `in process js-ipfs node` using the provided code reference that implements the core IPFS API. Note that, `exec` option to `df.spawn()` is required if `type: 'proc'` is used. + +#### DaemonFactory endpoint for remote spawning - `const server = `DaemonFactory.createServer([options]) ` + +`DaemonFactory.createServer` create an instance of the bundled REST API used by the remote controller. + +- exposes `start` and `stop` methods to start and stop the http server endpoint. + +#### Spawn a new daemon with `df.spawn` + +Spawn either a js-ipfs or go-ipfs daemon + +`df.spawn([options], callback)` + +`options` is an optional object the following properties: + - `init` bool (default true) - should the node be initialized + - `start` bool (default true) - should the node be started + - `repoPath` string - the repository path to use for this node, ignored if node is disposable + - `disposable` bool (default false) - a new repo is created and initialized for each invocation, as well as cleaned up automatically once the process exits + - `args` - array of cmd line arguments to be passed to ipfs daemon + - `config` - ipfs configuration options + +`callback` - is a function with the signature `function (err, ipfsd)` where: + - `err` - is the error set if spawning the node is unsuccessful + - `ipfsd` - is the daemon controller instance: + - `api` - a property of `ipfsd`, an instance of [ipfs-api](https://github.com/ipfs/js-ipfs-api) attached to the newly created ipfs node + +### IPFS Daemon Controller (`ipfsd`) + +The IPFS daemon controller (`ipfsd`) allows you to interact with the spawned IPFS daemon. + +#### `ipfsd.apiAddr` (getter) + +Get the address (multiaddr) of connected IPFS API. Returns a multiaddr + +#### `ipfsd.gatewayAddr` (getter) -For more details see https://ipfs.github.io/js-ipfsd-ctl/. +Get the address (multiaddr) of connected IPFS HTTP Gateway. Returns a multiaddr. +#### `ipfsd.repoPath` (getter) + +Get the current repo path. Returns string. + +#### `ipfsd.started` (getter) + +Is the node started. Returns a boolean. + +#### `init([initOpts], callback)` + +Initialize a repo. + +`initOpts` (optional) is an object with the following properties: + - `keysize` (default 2048) - The bit size of the identity key. + - `directory` (default IPFS_PATH if defined, or ~/.ipfs for go-ipfs and ~/.jsipfs for js-ipfs) - The location of the repo. + +`callback` is a function with the signature `function (Error, ipfsd)` where `err` is an Error in case something goes wrong and `ipfsd` is the daemon controller instance. + +#### `ipfsd.cleanup(callback)` + +Delete the repo that was being used. If the node was marked as `disposable` this will be called automatically when the process is exited. + +`callback` is a function with the signature `function(err)`. + +#### `ipfsd.start(flags, callback)` + +Start the daemon. + +`flags` - Flags array to be passed to the `ipfs daemon` command. + +`callback` is a function with the signature `function(err, ipfsApi)` that receives an instance of `ipfs-api` on success or an instance of `Error` on failure + + +#### `ipfsd.stop([callback])` + +Stop the daemon. + +`callback` is a function with the signature `function(err)` callback - function that receives an instance of `Error` on failure + +#### `ipfsd.killProcess([callback])` + +Kill the `ipfs daemon` process. + +First a `SIGTERM` is sent, after 10.5 seconds `SIGKILL` is sent if the process hasn't exited yet. + +`callback` is a function with the signature `function()` called once the process is killed + +#### `ipfsd.pid()` + +Get the pid of the `ipfs daemon` process. Returns the pid number + +#### `ipfsd.getConfig([key], callback)` + +Returns the output of an `ipfs config` command. If no `key` is passed, the whole config is returned as an object. + +`key` (optional) - A specific config to retrieve. + +`callback` is a function with the signature `function(err, (Object|string))` that receives an object or string on success or an `Error` instance on failure + +#### `ipfsd.setConfig(key, value, callback)` + +Set a config value. + +`key` - the key of the config entry to change/set + +`value` - the config value to change/set + +`callback` is a function with the signature `function(err)` callback - function that receives an `Error` instance on failure + +#### `ipfsd.version(callback)` + +Get the version of ipfs + +`callback` is a function with the signature `function(err, version)` + +### IPFS Client (`ipfsd.api`) + +An instance of [ipfs-api](https://github.com/ipfs/js-ipfs-api#api) that is used to interact with the daemon. + +This instance is returned for each successfully started IPFS daemon, when either `df.spawn({start: true})` (the default) is called, or `ipfsd.start()` is invoked in the case of nodes that were spawned with `df.spawn({start: false})`. + ### Packaging -`ipfsd-ctl` can be packaged in Electron applications, but the ipfs binary -has to be excluded from asar (Electron Archives), +`ipfsd-ctl` can be packaged in Electron applications, but the ipfs binary has to be excluded from asar (Electron Archives). [read more about unpack files from asar](https://electron.atom.io/docs/tutorial/application-packaging/#adding-unpacked-files-in-asar-archive). -`ipfsd-ctl` will try to detect if used from within an `app.asar` archive -and tries to resolve ipfs from `app.asar.unpacked`. The ipfs binary is part of -the `go-ipfs-dep` module. + +`ipfsd-ctl` will try to detect if used from within an `app.asar` archive and tries to resolve ipfs from `app.asar.unpacked`. The ipfs binary is part of the `go-ipfs-dep` module. ```bash electron-packager ./ --asar.unpackDir=node_modules/go-ipfs-dep diff --git a/circle.yml b/circle.yml index 00096937..58355193 100644 --- a/circle.yml +++ b/circle.yml @@ -2,6 +2,10 @@ machine: node: version: stable + +test: + post: + - npm run coverage -- --upload --providers coveralls dependencies: pre: diff --git a/examples/disposableApi.js b/examples/disposableApi.js deleted file mode 100644 index 0889d698..00000000 --- a/examples/disposableApi.js +++ /dev/null @@ -1,18 +0,0 @@ -/* eslint no-console: 0 */ -'use strict' - -// Start a disposable node, and get access to the api -// print the node id - -// IPFS_PATH will point to /tmp/ipfs_***** and will be -// cleaned up when the process exits. - -const ipfsd = require('../') - -ipfsd.disposableApi((err, ipfs) => { - if (err) throw err - ipfs.id((err, id) => { - if (err) throw err - console.log(id) - }) -}) diff --git a/examples/electron-asar/app.js b/examples/electron-asar/app.js index 61fbb99c..3f2a58b8 100644 --- a/examples/electron-asar/app.js +++ b/examples/electron-asar/app.js @@ -1,8 +1,13 @@ /* eslint no-console: 0 */ 'use strict' -const { app, ipcMain, BrowserWindow } = require('electron') -const ipfsd = require('ipfsd-ctl') +const electron = require('electron') +const app = electron.app +const ipcMain = electron.ipcMain +const BrowserWindow = electron.BrowserWindow + +const DaemonFactory = require('ipfsd-ctl') +const df = DaemonFactory.create() app.on('ready', () => { const win = new BrowserWindow({ @@ -15,20 +20,22 @@ ipcMain.on('start', ({ sender }) => { console.log('starting disposable IPFS') sender.send('message', 'starting disposable IPFS') - ipfsd.disposableApi((err, ipfs) => { + df.spawn((err, ipfsd) => { if (err) { sender.send('error', err) throw err } + console.log('get id') sender.send('message', 'get id') - ipfs.id(function (err, id) { + ipfsd.api.id((err, id) => { if (err) { sender.send('error', err) throw err } console.log('got id', id) sender.send('id', JSON.stringify(id)) + ipfsd.stop() }) }) }) diff --git a/examples/electron-asar/package.json b/examples/electron-asar/package.json index 5be4c215..1d014f1b 100644 --- a/examples/electron-asar/package.json +++ b/examples/electron-asar/package.json @@ -3,7 +3,9 @@ "private": true, "main": "./app.js", "dependencies": { - "ipfsd-ctl": "*" + "ipfsd-ctl": "file:../..", + "go-ipfs-dep": "0.4.13", + "ipfs-repo": "^0.18.5" }, "devDependencies": { "electron": "^1.7.6", diff --git a/examples/id.js b/examples/id.js deleted file mode 100644 index 68c61b59..00000000 --- a/examples/id.js +++ /dev/null @@ -1,22 +0,0 @@ -/* eslint no-console: 0 */ -'use strict' - -var ipfsd = require('../') - -ipfsd.disposableApi(function (err, ipfs) { - if (err) throw err - ipfs.id(function (err, id) { - if (err) throw err - console.log('alice') - console.log(id) - }) -}) - -ipfsd.disposableApi(function (err, ipfs) { - if (err) throw err - ipfs.id(function (err, id) { - if (err) throw err - console.log('bob') - console.log(id) - }) -}) diff --git a/examples/id/id.js b/examples/id/id.js new file mode 100644 index 00000000..c3a89ba7 --- /dev/null +++ b/examples/id/id.js @@ -0,0 +1,57 @@ +/* eslint no-console: 0 */ +'use strict' + +const IPFS = require('ipfs') + +const DaemonFactory = require('ipfsd-ctl') + +DaemonFactory + .create({ type: 'go' }) + .spawn((err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + console.log('alice') + console.log(id) + ipfsd.stop() + }) + }) + +DaemonFactory + .create({ type: 'js' }) + .spawn((err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + console.log('bob') + console.log(id) + ipfsd.stop() + }) + }) + +DaemonFactory + .create({ type: 'proc' }) + .spawn({ exec: IPFS }, (err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + console.log('bob') + console.log(id) + ipfsd.stop(() => process.exit(0)) + }) + }) diff --git a/examples/id/package.json b/examples/id/package.json new file mode 100644 index 00000000..3294355e --- /dev/null +++ b/examples/id/package.json @@ -0,0 +1,14 @@ +{ + "name": "id", + "version": "1.0.0", + "description": "", + "main": "id.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "dependencies": { + "ipfsd-ctl": "file:../.." + }, + "author": "", + "license": "MIT" +} diff --git a/examples/local-disposable/local-disposable.js b/examples/local-disposable/local-disposable.js new file mode 100644 index 00000000..6950e0c4 --- /dev/null +++ b/examples/local-disposable/local-disposable.js @@ -0,0 +1,65 @@ +/* eslint no-console: 0 */ +'use strict' + +// Start a disposable node, and get access to the api +// print the node id + +const IPFS = require('ipfs') + +const DaemonFactory = require('ipfsd-ctl') + +// start a go daemon +DaemonFactory + .create({ type: 'go' }) + .spawn((err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + + console.log('go-ipfs') + console.log(id) + ipfsd.stop() + }) + }) + +// start a js daemon +DaemonFactory + .create({ type: 'js' }) + .spawn((err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + + console.log('js-ipfs') + console.log(id) + ipfsd.stop() + }) + }) + +DaemonFactory + .create({ type: 'proc' }) + .spawn({ exec: IPFS }, (err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + + console.log('js-ipfs') + console.log(id) + ipfsd.stop(() => process.exit(0)) + }) + }) diff --git a/examples/local-disposable/package.json b/examples/local-disposable/package.json new file mode 100644 index 00000000..4f992a61 --- /dev/null +++ b/examples/local-disposable/package.json @@ -0,0 +1,14 @@ +{ + "name": "local-disposable", + "version": "1.0.0", + "description": "", + "main": "local-disposable.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "dependencies": { + "ipfsd-ctl": "file:../.." + }, + "author": "", + "license": "MIT" +} diff --git a/examples/local.js b/examples/local.js deleted file mode 100644 index 79cdfe97..00000000 --- a/examples/local.js +++ /dev/null @@ -1,12 +0,0 @@ -/* eslint no-console: 0 */ -'use strict' - -var ipfsd = require('../') - -// opens an api connection to local running ipfs node - -ipfsd.local(function (err, ipfs) { - if (err) throw err - - console.log(ipfs) -}) diff --git a/examples/local/local.js b/examples/local/local.js new file mode 100644 index 00000000..a27833e1 --- /dev/null +++ b/examples/local/local.js @@ -0,0 +1,44 @@ +/* eslint no-console: 0 */ +'use strict' + +const IPFS = require('ipfs') + +const DaemonFactory = require('ipfsd-ctl') + +// opens an api connection to local running go-ipfs node +DaemonFactory + .create({ type: 'go' }) + .spawn({ disposable: true }, (err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id(function (err, id) { + if (err) { + throw err + } + + console.log('js-ipfs') + console.log(id) + ipfsd.stop() + }) + }) + +// creates an in-process running js-ipfs node +DaemonFactory + .create({ type: 'proc' }) + .spawn({ disposable: true, exec: IPFS }, (err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id(function (err, id) { + if (err) { + throw err + } + + console.log('in-proc-ipfs') + console.log(id) + ipfsd.stop(() => process.exit(0)) + }) + }) diff --git a/examples/local/package.json b/examples/local/package.json new file mode 100644 index 00000000..d4bd3e00 --- /dev/null +++ b/examples/local/package.json @@ -0,0 +1,14 @@ +{ + "name": "local", + "version": "1.0.0", + "description": "", + "main": "local.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "dependencies": { + "ipfsd-ctl": "file:../.." + }, + "author": "", + "license": "MIT" +} diff --git a/examples/remote-disposable/package.json b/examples/remote-disposable/package.json new file mode 100644 index 00000000..61885524 --- /dev/null +++ b/examples/remote-disposable/package.json @@ -0,0 +1,14 @@ +{ + "name": "remote-disposable", + "version": "1.0.0", + "description": "", + "main": "remote-disposable.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "dependencies": { + "ipfsd-ctl": "file:../.." + }, + "author": "", + "license": "MIT" +} diff --git a/examples/remote-disposable/remote-disposable.js b/examples/remote-disposable/remote-disposable.js new file mode 100644 index 00000000..ccd7ec21 --- /dev/null +++ b/examples/remote-disposable/remote-disposable.js @@ -0,0 +1,30 @@ +/* eslint no-console: 0 */ +'use strict' + +// Start a remote disposable node, and get access to the api +// print the node id, and stop the temporary daemon + +const DaemonFactory = require('ipfsd-ctl') +const df = DaemonFactory.create({ remote: true }) +const server = DaemonFactory.createServer() + +server.start((err) => { + if (err) { + throw err + } + + df.spawn((err, ipfsd) => { + if (err) { + throw err + } + + ipfsd.api.id((err, id) => { + if (err) { + throw err + } + + console.log(id) + ipfsd.stop(() => server.stop()) + }) + }) +}) diff --git a/package.json b/package.json index bd1f783e..f533bb00 100644 --- a/package.json +++ b/package.json @@ -5,14 +5,28 @@ "main": "src/index.js", "scripts": { "lint": "aegir lint", - "coverage": "aegir coverage", - "test": "aegir test -t node", + "coverage": "COVERAGE=true aegir coverage --timeout 50000", + "test": "aegir test -t node -t browser --no-cors", + "test:node": "aegir test -t node", + "test:browser": "aegir test -t browser --no-cors", "docs": "aegir docs", "release": "aegir release -t node", "release-minor": "aegir release --type minor -t node", "release-major": "aegir release --type major -t node", "coverage-publish": "aegir coverage -u" }, + "browser": { + "./src/utils/create-repo-nodejs.js": "./src/utils/create-repo-browser.js", + "./src/daemon-node.js": false, + "./src/remote-node/routes.js": false, + "./src/exec.js": false, + "hapi": false, + "glob": false, + "fs": false, + "joi": false, + "stream": "readable-stream", + "http": "stream-http" + }, "engines": { "node": ">=6.0.0", "npm": ">=3.0.0" @@ -51,27 +65,40 @@ "license": "MIT", "dependencies": { "async": "^2.6.0", + "boom": "^7.1.1", "detect-node": "^2.0.3", - "eslint-config-standard-jsx": "^4.0.2", - "go-ipfs-dep": "0.4.13", - "ipfs-api": "^17.1.3", + "hapi": "^16.6.2", + "hat": "0.0.3", + "ipfs-api": "^17.2.5", + "ipfs-repo": "^0.18.5", + "joi": "^13.0.2", + "lodash.clone": "^4.5.0", + "lodash.defaultsdeep": "^4.6.0", "multiaddr": "^3.0.1", "once": "^1.4.0", + "readable-stream": "^2.3.3", "rimraf": "^2.6.2", + "safe-json-parse": "^4.0.0", "shutdown": "^0.3.0", + "stream-http": "^2.7.2", "subcomandante": "^1.0.5", + "superagent": "^3.8.2", "truthy": "0.0.1" }, "devDependencies": { "aegir": "^12.2.0", "chai": "^4.1.2", + "detect-port": "^1.2.2", "dirty-chai": "^2.0.1", - "ipfs": "^0.26.0", + "go-ipfs-dep": "0.4.13", + "ipfs": "^0.27.5", "is-running": "1.0.5", "mkdirp": "^0.5.1", - "multihashes": "~0.4.12", "pre-commit": "^1.2.2", - "safe-buffer": "^5.1.1" + "proxyquire": "^1.8.0", + "sinon": "^4.1.3", + "superagent-mocker": "^0.5.2", + "supertest": "^3.0.0" }, "repository": { "type": "git", diff --git a/src/daemon-ctrl.js b/src/daemon-ctrl.js new file mode 100644 index 00000000..0ee0b8ad --- /dev/null +++ b/src/daemon-ctrl.js @@ -0,0 +1,140 @@ +'use strict' + +const defaults = require('lodash.defaultsdeep') +const clone = require('lodash.clone') +const waterfall = require('async/waterfall') +const join = require('path').join + +const Node = require('./daemon-node') +const ProcNode = require('./in-proc-node') + +const defaultOptions = { + type: 'go', + disposable: true, + start: true, + init: true +} + +const defaultConfig = { + API: { + HTTPHeaders: { + 'Access-Control-Allow-Origin': ['*'], + 'Access-Control-Allow-Methods': [ + 'PUT', + 'POST', + 'GET' + ] + } + }, + Addresses: { + Swarm: [`/ip4/127.0.0.1/tcp/0`], + API: `/ip4/127.0.0.1/tcp/0`, + Gateway: `/ip4/127.0.0.1/tcp/0` + } +} + +/** + * Control go-ipfs nodes directly from JavaScript. + * + * @namespace DaemonController + */ +class DaemonController { + /** + * Create a DaemonController instance + * + * @param {Object} opts + * - `type` string - one of 'go', 'js' or 'proc', + * the type of the daemon to spawn + * - `exec` string (optional) - the path of the daemon + * executable or IPFS class in the case of `proc` + * + * @return {*} + */ + constructor (opts) { + opts = opts || {} + this.type = opts.type || 'go' + this.exec = opts.exec + } + + /** + * Get the version of the currently used go-ipfs binary. + * + * @memberof IpfsDaemonController + * @param {Object} [opts={}] + * @param {function(Error, string)} callback + * @returns {undefined} + */ + version (opts, callback) { + opts = opts || {} + opts.type = this.type + const node = new Node(opts) + node.version(callback) + } + + /** + * Spawn an IPFS node, either js-ipfs or go-ipfs + * + * Options are: + * - `init` bool - should the node be initialized + * - `start` bool - should the node be started + * - `repoPath` string - the repository path to use for this node, ignored if node is disposable + * - `disposable` bool - a new repo is created and initialized for each invocation + * - `config` - ipfs configuration options + * - `args` - array of cmd line arguments to be passed to ipfs daemon + * - `exec` string (optional) - path to the desired IPFS executable to spawn, + * this will override the `exec` set when creating the daemon controller factory instance + * + * @param {Object} [opts={}] - various config options and ipfs config parameters + * @param {Function} callback(err, [`ipfs-api instance`, `Node (ctrl) instance`]) - a callback that receives an array with an `ipfs-instance` attached to the node and a `Node` + * @return {undefined} + */ + spawn (opts, callback) { + if (typeof opts === 'function') { + callback = opts + opts = defaultOptions + } + + const options = defaults({}, opts, defaultOptions) + options.init = (typeof options.init !== 'undefined' ? options.init : true) + if (!options.disposable) { + const nonDisposableConfig = clone(defaultConfig) + delete nonDisposableConfig.Addresses + + options.init = false + options.start = false + + const defaultRepo = join(process.env.HOME || process.env.USERPROFILE, + options.isJs ? '.jsipfs' : '.ipfs') + options.repoPath = options.repoPath || (process.env.IPFS_PATH || defaultRepo) + options.config = defaults({}, options.config, nonDisposableConfig) + } else { + options.config = defaults({}, options.config, defaultConfig) + } + + let node + options.type = this.type + options.exec = options.exec || this.exec + if (this.type === 'proc') { + if (typeof options.exec !== 'function') { + return callback(new Error(`'type' proc requires 'exec' to be a coderef`)) + } + + node = new ProcNode(options) + } else { + node = new Node(options) + } + + waterfall([ + (cb) => options.init ? node.init(cb) : cb(null, node), + (node, cb) => options.start ? node.start(options.args, cb) : cb(null, null) + ], (err) => { + if (err) { + return callback(err) + } + + callback(null, node) + }) + } +} + +module.exports = DaemonController diff --git a/src/daemon.js b/src/daemon-node.js similarity index 52% rename from src/daemon.js rename to src/daemon-node.js index e5cf42ab..7909cd1f 100644 --- a/src/daemon.js +++ b/src/daemon-node.js @@ -7,101 +7,50 @@ const multiaddr = require('multiaddr') const rimraf = require('rimraf') const shutdown = require('shutdown') const path = require('path') -const join = path.join const once = require('once') -const os = require('os') -const isWindows = os.platform() === 'win32' +const truthy = require('truthy') +const utils = require('./utils') +const flatten = require('./utils').flatten -const exec = require('./exec') +const tryJsonParse = utils.tryJsonParse +const parseConfig = utils.parseConfig +const tempDir = utils.tempDir +const findIpfsExecutable = utils.findIpfsExecutable +const setConfigValue = utils.setConfigValue +const configureNode = utils.configureNode +const run = utils.run -const ipfsDefaultPath = findIpfsExecutable() - -const GRACE_PERIOD = 7500 // amount of ms to wait before sigkill - -function findIpfsExecutable () { - const rootPath = process.env.testpath ? process.env.testpath : __dirname - - let appRoot = path.join(rootPath, '..') - // If inside .asar try to load from .asar.unpacked - // this only works if asar was built with - // asar --unpack-dir=node_modules/go-ipfs-dep/* (not tested) - // or - // electron-packager ./ --asar.unpackDir=node_modules/go-ipfs-dep - if (appRoot.includes(`.asar${path.sep}`)) { - appRoot = appRoot.replace(`.asar${path.sep}`, `.asar.unpacked${path.sep}`) - } - const appName = isWindows ? 'ipfs.exe' : 'ipfs' - const depPath = path.join('go-ipfs-dep', 'go-ipfs', appName) - const npm3Path = path.join(appRoot, '../', depPath) - const npm2Path = path.join(appRoot, 'node_modules', depPath) - - if (fs.existsSync(npm3Path)) { - return npm3Path - } - if (fs.existsSync(npm2Path)) { - return npm2Path - } - - throw new Error('Cannot find the IPFS executable') -} - -function setConfigValue (node, key, value, callback) { - exec( - node.exec, - ['config', key, value, '--json'], - { env: node.env }, - callback - ) -} - -function configureNode (node, conf, callback) { - async.eachOfSeries(conf, (value, key, cb) => { - setConfigValue(node, key, JSON.stringify(value), cb) - }, callback) -} - -function tryJsonParse (input, callback) { - let res - try { - res = JSON.parse(input) - } catch (err) { - return callback(err) - } - callback(null, res) -} - -// Consistent error handling -function parseConfig (path, callback) { - async.waterfall([ - (cb) => fs.readFile(join(path, 'config'), cb), - (file, cb) => tryJsonParse(file.toString(), cb) - ], callback) -} +const GRACE_PERIOD = 10500 // amount of ms to wait before sigkill /** - * Controll a go-ipfs node. + * Controll a go-ipfs or js-ipfs node. */ class Node { /** * Create a new node. * - * @param {string} path * @param {Object} [opts] * @param {Object} [opts.env={}] - Additional environment settings, passed to executing shell. - * @param {boolean} [disposable=false] - Should this be a temporary node. * @returns {Node} */ - constructor (path, opts, disposable) { - this.path = path - this.opts = opts || {} - this.exec = process.env.IPFS_EXEC || ipfsDefaultPath + constructor (opts) { + const rootPath = process.env.testpath ? process.env.testpath : __dirname + const type = truthy(process.env.IPFS_TYPE) + + this.opts = opts || { type: type || 'go' } + this.opts.config = flatten(this.opts.config) + + const tmpDir = tempDir(opts.type === 'js') + this.path = this.opts.disposable ? tmpDir : (this.opts.repoPath || tmpDir) + this.disposable = this.opts.disposable + this.exec = this.opts.exec || process.env.IPFS_EXEC || findIpfsExecutable(this.opts.type, rootPath) this.subprocess = null this.initialized = fs.existsSync(path) this.clean = true - this.env = Object.assign({}, process.env, { IPFS_PATH: path }) - this.disposable = disposable this._apiAddr = null this._gatewayAddr = null + this._started = false + this.api = null if (this.opts.env) { Object.assign(this.env, this.opts.env) @@ -126,8 +75,31 @@ class Node { return this._gatewayAddr } - _run (args, opts, callback) { - return exec(this.exec, args, opts, callback) + /** + * Get the current repo path + * + * @return {string} + */ + get repoPath () { + return this.path + } + + /** + * Is the node started + * + * @return {boolean} + */ + get started () { + return this._started + } + + /** + * Is the environment + * + * @return {object} + */ + get env () { + return this.path ? Object.assign({}, process.env, { IPFS_PATH: this.path }) : process.env } /** @@ -149,15 +121,14 @@ class Node { if (initOpts.directory && initOpts.directory !== this.path) { this.path = initOpts.directory - this.env.IPFS_PATH = this.path } - this._run(['init', '-b', keySize], { env: this.env }, (err, result) => { + run(this, ['init', '-b', keySize], { env: this.env }, (err, result) => { if (err) { return callback(err) } - configureNode(this, this.opts, (err) => { + configureNode(this, this.opts.config, (err) => { if (err) { return callback(err) } @@ -169,7 +140,7 @@ class Node { }) if (this.disposable) { - shutdown.addHandler('disposable', 1, this.shutdown.bind(this)) + shutdown.addHandler('disposable', 1, this.cleanup.bind(this)) } } @@ -181,8 +152,8 @@ class Node { * @param {function(Error)} callback * @returns {undefined} */ - shutdown (callback) { - if (this.clean || !this.disposable) { + cleanup (callback) { + if (this.clean) { return callback() } @@ -196,7 +167,7 @@ class Node { * @param {function(Error, IpfsApi)} callback * @returns {undefined} */ - startDaemon (flags, callback) { + start (flags, callback) { if (typeof flags === 'function') { callback = flags flags = [] @@ -205,31 +176,17 @@ class Node { flags = [] } - const args = ['daemon'].concat(flags) + const args = ['daemon'].concat(flags || []) callback = once(callback) - // Check if there were explicit options to want or not want. Otherwise, - // assume values will be in the local daemon config - // TODO: This should check the local daemon config - const want = { - gateway: typeof this.opts['Addresses.Gateway'] === 'string' - ? this.opts['Addresses.Gateway'].length > 0 - : true, - api: typeof this.opts['Addresses.API'] === 'string' - ? this.opts['Addresses.API'].length > 0 - : true - } - parseConfig(this.path, (err, conf) => { if (err) { return callback(err) } let output = '' - let returned = false - - this.subprocess = this._run(args, { env: this.env }, { + this.subprocess = run(this, args, { env: this.env }, { error: (err) => { // Only look at the last error const input = String(err) @@ -238,7 +195,7 @@ class Node { .filter(Boolean) .slice(-1)[0] || '' - if (input.match('daemon is running')) { + if (input.match(/(?:daemon is running|Daemon is ready)/)) { // we're good return callback(null, this.api) } @@ -250,31 +207,26 @@ class Node { data: (data) => { output += String(data) - const apiMatch = want.api - ? output.trim().match(/API server listening on (.*)/) - : true + const apiMatch = output.trim().match(/API (?:server|is) listening on[:]? (.*)/) + const gwMatch = output.trim().match(/Gateway (?:.*) listening on[:]?(.*)/) - const gwMatch = want.gateway - ? output.trim().match(/Gateway (.*) listening on (.*)/) - : true - - if (apiMatch && gwMatch && !returned) { - returned = true - - if (want.api) { - this._apiAddr = multiaddr(apiMatch[1]) - this.api = ipfs(apiMatch[1]) - this.api.apiHost = this.apiAddr.nodeAddress().address - this.api.apiPort = this.apiAddr.nodeAddress().port - } + if (apiMatch && apiMatch.length > 0) { + this._apiAddr = multiaddr(apiMatch[1]) + this.api = ipfs(apiMatch[1]) + this.api.apiHost = this.apiAddr.nodeAddress().address + this.api.apiPort = this.apiAddr.nodeAddress().port + } - if (want.gateway) { - this._gatewayAddr = multiaddr(gwMatch[2]) - this.api.gatewayHost = this.gatewayAddr.nodeAddress().address - this.api.gatewayPort = this.gatewayAddr.nodeAddress().port - } + if (gwMatch && gwMatch.length > 0) { + this._gatewayAddr = multiaddr(gwMatch[1]) + this.api.gatewayHost = this.gatewayAddr.nodeAddress().address + this.api.gatewayPort = this.gatewayAddr.nodeAddress().port + } - callback(null, this.api) + if (output.match(/(?:daemon is running|Daemon is ready)/)) { + // we're good + this._started = true + return callback(null, this.api) } } }) @@ -287,7 +239,7 @@ class Node { * @param {function(Error)} callback * @returns {undefined} */ - stopDaemon (callback) { + stop (callback) { callback = callback || function noop () {} if (!this.subprocess) { @@ -300,7 +252,7 @@ class Node { /** * Kill the `ipfs daemon` process. * - * First `SIGTERM` is sent, after 7.5 seconds `SIGKILL` is sent + * First `SIGTERM` is sent, after 10.5 seconds `SIGKILL` is sent * if the process hasn't exited yet. * * @param {function()} callback - Called when the process was killed. @@ -317,6 +269,7 @@ class Node { subprocess.once('close', () => { clearTimeout(timeout) this.subprocess = null + this._started = false callback() }) @@ -327,10 +280,11 @@ class Node { /** * Get the pid of the `ipfs daemon` process. * - * @returns {number} + * @param {function()} callback - receives the pid + * @returns {undefined} */ - daemonPid () { - return this.subprocess && this.subprocess.pid + pid (callback) { + callback(this.subprocess && this.subprocess.pid) } /** @@ -347,9 +301,13 @@ class Node { callback = key key = 'show' } + if (!key) { + key = 'show' + } async.waterfall([ - (cb) => this._run( + (cb) => run( + this, ['config', key], { env: this.env }, cb @@ -372,26 +330,7 @@ class Node { * @returns {undefined} */ setConfig (key, value, callback) { - this._run( - ['config', key, value, '--json'], - { env: this.env }, - callback - ) - } - - /** - * Replace the configuration with a given file - * - * @param {string} file - path to the new config file - * @param {function(Error)} callback - * @returns {undefined} - */ - replaceConf (file, callback) { - this._run( - ['config', 'replace', file], - { env: this.env }, - callback - ) + setConfigValue(this, key, value, callback) } /** @@ -401,7 +340,7 @@ class Node { * @returns {undefined} */ version (callback) { - this._run(['version'], { env: this.env }, callback) + run(this, ['version'], { env: this.env }, callback) } } diff --git a/src/in-proc-node.js b/src/in-proc-node.js new file mode 100644 index 00000000..8ba6aa48 --- /dev/null +++ b/src/in-proc-node.js @@ -0,0 +1,280 @@ +'use strict' + +const createRepo = require('./utils').createRepo +const multiaddr = require('multiaddr') +const flatten = require('./utils').flatten +const async = require('async') +const defaults = require('lodash.defaultsdeep') + +/** + * Controll a go-ipfs or js-ipfs node. + */ +class Node { + /** + * Create a new node. + * + * @param {Object} [opts] + * @param {Object} [opts.env={}] - Additional environment settings, passed to executing shell. + * @returns {Node} + */ + constructor (opts) { + this.opts = opts || {} + + const IPFS = this.opts.exec + + this.opts.args = this.opts.args || [] + this.path = this.opts.repoPath + this.repo = createRepo(this.path) + this.disposable = this.opts.disposable + this.clean = true + this._apiAddr = null + this._gatewayAddr = null + this._started = false + this.initialized = false + this.api = null + + this.opts.EXPERIMENTAL = defaults({}, opts.EXPERIMENTAL, { + pubsub: false, + sharding: false, + relay: { + enabled: false, + hop: { + enabled: false + } + } + }) + + this.opts.EXPERIMENTAL.pubsub = (this.opts.args.indexOf('--enable-pubsub-experiment') > -1) + this.opts.EXPERIMENTAL.sharding = (this.opts.args.indexOf('--enable-sharding-experiment') > -1) + this.exec = new IPFS({ + repo: this.repo, + init: false, + start: false, + EXPERIMENTAL: this.opts.EXPERIMENTAL, + libp2p: this.opts.libp2p + }) + } + + /** + * Get the address of connected IPFS API. + * + * @returns {Multiaddr} + */ + get apiAddr () { + return this._apiAddr + } + + /** + * Get the address of connected IPFS HTTP Gateway. + * + * @returns {Multiaddr} + */ + get gatewayAddr () { + return this._gatewayAddr + } + + /** + * Get the current repo path + * + * @return {string} + */ + get repoPath () { + return this.path + } + + /** + * Is the node started + * + * @return {boolean} + */ + get started () { + return this._started + } + + /** + * Is the environment + * + * @return {object} + */ + get env () { + throw new Error('Not implemented!') + } + + /** + * Initialize a repo. + * + * @param {Object} [initOpts={}] + * @param {number} [initOpts.keysize=2048] - The bit size of the identiy key. + * @param {string} [initOpts.directory=IPFS_PATH] - The location of the repo. + * @param {function (Error, Node)} callback + * @returns {undefined} + */ + init (initOpts, callback) { + if (!callback) { + callback = initOpts + initOpts = {} + } + + initOpts.bits = initOpts.keysize || 2048 + this.exec.init(initOpts, (err) => { + if (err) { + return callback(err) + } + + const conf = flatten(this.opts.config) + async.eachOf(conf, (val, key, cb) => { + this.setConfig(key, val, cb) + }, (err) => { + if (err) { + return callback(err) + } + + this.initialized = true + callback(null, this) + }) + }) + } + + /** + * Delete the repo that was being used. + * If the node was marked as `disposable` this will be called + * automatically when the process is exited. + * + * @param {function(Error)} callback + * @returns {undefined} + */ + cleanup (callback) { + if (this.clean) { + return callback() + } + + this.repo.teardown(callback) + } + + /** + * Start the daemon. + * + * @param {Array} [flags=[]] - Flags to be passed to the `ipfs daemon` command. + * @param {function(Error, IpfsApi)} callback + * @returns {undefined} + */ + start (flags, callback) { + if (typeof flags === 'function') { + callback = flags + flags = undefined // not used + } + + this.exec.start((err) => { + if (err) { + return callback(err) + } + + this._started = true + this.api = this.exec + this.exec.config.get((err, conf) => { + if (err) { + return callback(err) + } + + this._apiAddr = conf.Addresses.API + this._gatewayAddr = conf.Addresses.Gateway + + this.api.apiHost = multiaddr(conf.Addresses.API).nodeAddress().host + this.api.apiPort = multiaddr(conf.Addresses.API).nodeAddress().port + + callback(null, this.api) + }) + }) + } + + /** + * Stop the daemon. + * + * @param {function(Error)} [callback] + * @returns {undefined} + */ + stop (callback) { + callback = callback || function noop () {} + + if (!this.exec) { + return callback() + } + + this.exec.stop((err) => { + if (err) { + return callback(err) + } + + this._started = false + if (this.disposable) { + return this.cleanup(callback) + } + + return callback() + }) + } + + /** + * Kill the `ipfs daemon` process. + * + * First `SIGTERM` is sent, after 10.5 seconds `SIGKILL` is sent + * if the process hasn't exited yet. + * + * @param {function()} callback - Called when the process was killed. + * @returns {undefined} + */ + killProcess (callback) { + this.stop(callback) + } + + /** + * Get the pid of the `ipfs daemon` process. + * + * @returns {number} + */ + pid () { + throw new Error('not implemented') + } + + /** + * Call `ipfs config` + * + * If no `key` is passed, the whole config is returned as an object. + * + * @param {string} [key] - A specific config to retrieve. + * @param {function(Error, (Object|string))} callback + * @returns {undefined} + */ + getConfig (key, callback) { + if (typeof key === 'function') { + callback = key + key = undefined + } + + this.exec.config.get(key, callback) + } + + /** + * Set a config value. + * + * @param {string} key + * @param {string} value + * @param {function(Error)} callback + * @returns {undefined} + */ + setConfig (key, value, callback) { + this.exec.config.set(key, value, callback) + } + + /** + * Get the version of ipfs + * + * @param {function(Error, string)} callback + * @returns {undefined} + */ + version (callback) { + this.exec.version(callback) + } +} + +module.exports = Node diff --git a/src/index.js b/src/index.js index 38dc0bbe..e5065509 100644 --- a/src/index.js +++ b/src/index.js @@ -1,120 +1,28 @@ 'use strict' -const os = require('os') -const join = require('path').join +const LocalController = require('./daemon-ctrl') +const remote = require('./remote-node') +const isNode = require('detect-node') +const defaults = require('lodash.defaultsdeep') -const Node = require('./daemon') +class DaemonFactory { + static create (opts) { + const options = defaults({}, opts, { remote: !isNode }) -// Note how defaultOptions are Addresses.Swarm and not Addresses: { Swarm : <> } -const defaultOptions = { - 'Addresses.Swarm': ['/ip4/0.0.0.0/tcp/0'], - 'Addresses.Gateway': '', - 'Addresses.API': '/ip4/127.0.0.1/tcp/0', - disposable: true, - init: true -} - -function tempDir () { - return join(os.tmpdir(), `ipfs_${String(Math.random()).substr(2)}`) -} - -/** - * Control go-ipfs nodes directly from JavaScript. - * - * @namespace IpfsDaemonController - */ -const IpfsDaemonController = { - /** - * Get the version of the currently used go-ipfs binary. - * - * @memberof IpfsDaemonController - * @param {function(Error, string)} callback - * @returns {undefined} - */ - version (callback) { - (new Node()).version(callback) - }, - - /** - * Create a new local node. - * - * @memberof IpfsDaemonController - * @param {string} [path] - Location of the repo. Defaults to `$IPFS_PATH`, or `$HOME/.ipfs`, or `$USER_PROFILE/.ipfs`. - * @param {Object} [opts={}] - * @param {function(Error, Node)} callback - * @returns {undefined} - */ - local (path, opts, callback) { - if (typeof opts === 'function') { - callback = opts - opts = {} - } - - if (!callback) { - callback = path - path = process.env.IPFS_PATH || - join(process.env.HOME || - process.env.USERPROFILE, '.ipfs') - } - - process.nextTick(() => callback(null, new Node(path, opts))) - }, - - /** - * Create a new disposable node. - * This means the repo is created in a temporary location and cleaned up on process exit. - * - * @memberof IpfsDaemonController - * @param {Object} [opts={}] - * @param {function(Error, Node)} callback - * @returns {undefined} - */ - disposable (opts, callback) { - if (typeof opts === 'function') { - callback = opts - opts = defaultOptions - } - - let options = {} - Object.assign(options, defaultOptions, opts || {}) - - const repoPath = options.repoPath || tempDir() - const disposable = options.disposable - delete options.disposable - delete options.repoPath - - const node = new Node(repoPath, options, disposable) - - if (typeof options.init === 'boolean' && - options.init === false) { - process.nextTick(() => callback(null, node)) - } else { - node.init((err) => callback(err, node)) + if (options.type === 'proc') { + options.remote = false } - }, - /** - * Create a new disposable node and already started the daemon. - * - * @memberof IpfsDaemonController - * @param {Object} [opts={}] - * @param {function(Error, Node)} callback - * @returns {undefined} - */ - disposableApi (opts, callback) { - if (typeof opts === 'function') { - callback = opts - opts = defaultOptions + if (options.remote) { + return new remote.RemoteController(options) } - this.disposable(opts, (err, node) => { - if (err) { - return callback(err) - } + return new LocalController(options) + } - node.startDaemon(callback) - }) + static createServer (port) { + return new remote.Server(port) } } -module.exports = IpfsDaemonController +module.exports = DaemonFactory diff --git a/src/remote-node/client.js b/src/remote-node/client.js new file mode 100644 index 00000000..fb56b260 --- /dev/null +++ b/src/remote-node/client.js @@ -0,0 +1,319 @@ +'use strict' + +const request = require('superagent') +const IpfsApi = require('ipfs-api') +const multiaddr = require('multiaddr') + +function createApi (apiAddr, gwAddr) { + let api + if (apiAddr) { + api = IpfsApi(apiAddr) + api.apiHost = multiaddr(apiAddr).nodeAddress().address + api.apiPort = multiaddr(apiAddr).nodeAddress().port + } + + if (api && gwAddr) { + api.gatewayHost = multiaddr(gwAddr).nodeAddress().address + api.gatewayPort = multiaddr(gwAddr).nodeAddress().port + } + + return api +} + +class Node { + constructor (baseUrl, id, apiAddr, gwAddrs) { + this.baseUrl = baseUrl + this._id = id + this._apiAddr = multiaddr(apiAddr) + this._gwAddr = multiaddr(gwAddrs) + this.initialized = false + this.started = false + this.api = createApi(apiAddr, gwAddrs) + } + + /** + * Get the address of connected IPFS API. + * + * @returns {Multiaddr} + */ + get apiAddr () { + return this._apiAddr + } + + /** + * Set the address of connected IPFS API. + * + * @param {Multiaddr} addr + * @returns {void} + */ + set apiAddr (addr) { + this._apiAddr = addr + } + + /** + * Get the address of connected IPFS HTTP Gateway. + * + * @returns {Multiaddr} + */ + get gatewayAddr () { + return this._gwAddr + } + + /** + * Set the address of connected IPFS Gateway. + * + * @param {Multiaddr} addr + * @returns {void} + */ + set gatewayAddr (addr) { + this._gwAddr = addr + } + + /** + * Initialize a repo. + * + * @param {Object} [initOpts={}] + * @param {number} [initOpts.keysize=2048] - The bit size of the identiy key. + * @param {string} [initOpts.directory=IPFS_PATH] - The location of the repo. + * @param {function (Error, Node)} cb + * @returns {undefined} + */ + init (initOpts, cb) { + if (typeof initOpts === 'function') { + cb = initOpts + initOpts = {} + } + + request + .post(`${this.baseUrl}/init`) + .query({ id: this._id }) + .send({ initOpts }) + .end((err, res) => { + if (err) { + return cb(new Error(err.response ? err.response.body.message : err)) + } + + this.initialized = res.body.initialized + cb(null, res.body) + }) + } + + /** + * Delete the repo that was being used. + * If the node was marked as `disposable` this will be called + * automatically when the process is exited. + * + * @param {function(Error)} cb + * @returns {undefined} + */ + cleanup (cb) { + request + .post(`${this.baseUrl}/cleanup`) + .query({ id: this._id }) + .end((err) => { cb(err) }) + } + + /** + * Start the daemon. + * + * @param {Array} [flags=[]] - Flags to be passed to the `ipfs daemon` command. + * @param {function(Error, IpfsApi)} cb + * @returns {undefined} + */ + start (flags, cb) { + if (typeof flags === 'function') { + cb = flags + flags = [] + } + + request + .post(`${this.baseUrl}/start`) + .query({ id: this._id }) + .send({ flags }) + .end((err, res) => { + if (err) { + return cb(new Error(err.response ? err.response.body.message : err)) + } + + this.started = true + + const apiAddr = res.body.api ? res.body.api.apiAddr : '' + const gatewayAddr = res.body.api ? res.body.api.gatewayAddr : '' + + this.api = createApi(apiAddr, gatewayAddr) + return cb(null, this.api) + }) + } + + /** + * Stop the daemon. + * + * @param {function(Error)} [cb] + * @returns {undefined} + */ + stop (cb) { + cb = cb || (() => {}) + request + .post(`${this.baseUrl}/stop`) + .query({ id: this._id }) + .end((err) => { + if (err) { + return cb(new Error(err.response.body.message)) + } + + this.started = false + cb(null) + }) + } + + /** + * Kill the `ipfs daemon` process. + * + * First `SIGTERM` is sent, after 7.5 seconds `SIGKILL` is sent + * if the process hasn't exited yet. + * + * @param {function()} [cb] - Called when the process was killed. + * @returns {undefined} + */ + killProcess (cb) { + cb = cb || (() => {}) + request + .post(`${this.baseUrl}/kill`) + .query({ id: this._id }) + .end((err) => { + if (err) { + return cb(new Error(err.response.body.message)) + } + + this.started = false + cb(null) + }) + } + + /** + * Get the pid of the `ipfs daemon` process. + * + * @param {Function} cb + * @returns {number} + */ + pid (cb) { + request + .get(`${this.baseUrl}/pid`) + .query({ id: this._id }) + .end((err, res) => { + if (err) { + return cb(new Error(err.response ? err.response.body.message : err)) + } + + cb(null, res.body.pid) + }) + } + + /** + * Call `ipfs config` + * + * If no `key` is passed, the whole config is returned as an object. + * + * @param {string} [key] - A specific config to retrieve. + * @param {function(Error, (Object|string))} cb + * @returns {undefined} + */ + getConfig (key, cb) { + if (typeof key === 'function') { + cb = key + key = undefined + } + + const qr = { id: this._id } + qr.key = key + request + .get(`${this.baseUrl}/config`) + .query(qr) + .end((err, res) => { + if (err) { + return cb(new Error(err.response ? err.response.body.message : err)) + } + + cb(null, res.body.config) + }) + } + + /** + * Set a config value. + * + * @param {string} key + * @param {string} value + * @param {function(Error)} cb + * @returns {undefined} + */ + setConfig (key, value, cb) { + request.put(`${this.baseUrl}/config`) + .send({ key, value }) + .query({ id: this._id }) + .end((err) => { + if (err) { + return cb(new Error(err.response ? err.response.body.message : err)) + } + + cb(null) + }) + } +} + +class RemoteFactory { + constructor (opts) { + opts = opts || {} + if (!opts.host) { + opts.host = 'localhost' + } + + if (!opts.port) { + opts.port = 9999 + } + + if (typeof opts.host === 'number') { + opts.port = opts.host + opts.host = 'localhost' + } + + this.port = opts.port + this.host = opts.host + this.type = opts.type || 'go' + + if (this.type === 'proc') { + throw new Error(`'proc' is not allowed in remote mode`) + } + + this.baseUrl = `${opts.secure ? 'https://' : 'http://'}${this.host}:${this.port}` + } + + spawn (opts, cb) { + if (typeof opts === 'function') { + cb = opts + opts = {} + } + + opts = opts || {} + request + .post(`${this.baseUrl}/spawn`) + .send({ opts, type: this.type }) + .end((err, res) => { + if (err) { + return cb(new Error(err.response ? err.response.body.message : err)) + } + + const apiAddr = res.body.api ? res.body.api.apiAddr : '' + const gatewayAddr = res.body.api ? res.body.api.gatewayAddr : '' + + const node = new Node( + this.baseUrl, + res.body.id, + apiAddr, + gatewayAddr) + + cb(null, node) + }) + } +} + +module.exports = RemoteFactory diff --git a/src/remote-node/index.js b/src/remote-node/index.js new file mode 100644 index 00000000..65705cec --- /dev/null +++ b/src/remote-node/index.js @@ -0,0 +1,9 @@ +'use strict' + +const Server = require('./server') +const RemoteController = require('./client') + +module.exports = { + Server, + RemoteController +} diff --git a/src/remote-node/routes.js b/src/remote-node/routes.js new file mode 100644 index 00000000..2ca40467 --- /dev/null +++ b/src/remote-node/routes.js @@ -0,0 +1,255 @@ +'use strict' + +const CtrlFactory = require('../daemon-ctrl') +const hat = require('hat') +const boom = require('boom') +const Joi = require('joi') +const defaults = require('lodash.defaultsdeep') + +const config = { + validate: { + query: { + id: Joi.string().alphanum().required() + } + } +} + +let nodes = {} +module.exports = (server) => { + /** + * Spawn an IPFS node + * The repo is created in a temporary location and cleaned up on process exit. + **/ + server.route({ + method: 'POST', + path: '/spawn', + handler: (request, reply) => { + const payload = request.payload || {} + const ctrl = new CtrlFactory(payload.type) + ctrl.spawn(payload.opts, (err, ipfsd) => { + if (err) { + return reply(boom.badRequest(err)) + } + + const id = hat() + nodes[id] = ipfsd + + let api = null + if (nodes[id].started) { + api = { + apiAddr: nodes[id].apiAddr ? nodes[id].apiAddr.toString() : '', + gatewayAddr: nodes[id].gatewayAddr ? nodes[id].gatewayAddr.toString() : '' + } + } + reply({ id, api }) + }) + } + }) + + /** + * Get the address of connected IPFS API. + */ + server.route({ + method: 'GET', + path: '/api-addr', + handler: (request, reply) => { + const id = request.query.id + reply({ apiAddr: nodes[id].apiAddr.toString() }) + }, + config + }) + + /** + * Get the address of connected IPFS HTTP Gateway. + */ + server.route({ + method: 'GET', + path: '/getaway-addr', + handler: (request, reply) => { + const id = request.query.id + reply({ getawayAddr: nodes[id].gatewayAddr.toString() }) + }, + config + }) + + /** + * Initialize a repo. + **/ + server.route({ + method: 'POST', + path: '/init', + handler: (request, reply) => { + const id = request.query.id + const payload = request.payload || {} + nodes[id].init(payload.initOpts, (err, node) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply({ initialized: node.initialized }) + }) + }, + config + }) + + /** + * Delete the repo that was being used. + * If the node was marked as `disposable` this will be called + * automatically when the process is exited. + **/ + server.route({ + method: 'POST', + path: '/cleanup', + handler: (request, reply) => { + const id = request.query.id + nodes[id].cleanup((err) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply().code(200) + }) + }, + config + }) + + /** + * Start the daemon. + **/ + server.route({ + method: 'POST', + path: '/start', + handler: (request, reply) => { + const id = request.query.id + const payload = request.payload || {} + const flags = payload.flags || [] + nodes[id].start(flags, (err) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply({ + api: { + apiAddr: nodes[id].apiAddr.toString(), + gatewayAddr: nodes[id].gatewayAddr.toString() + } + }) + }) + }, + config + }) + + /** + * Stop the daemon. + */ + server.route({ + method: 'POST', + path: '/stop', + handler: (request, reply) => { + const id = request.query.id + nodes[id].stop((err) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply().code(200) + }) + }, + config + }) + + /** + * Kill the `ipfs daemon` process. + * + * First `SIGTERM` is sent, after 7.5 seconds `SIGKILL` is sent + * if the process hasn't exited yet. + */ + server.route({ + method: 'POST', + path: '/kill', + handler: (request, reply) => { + const id = request.query.id + nodes[id].killProcess((err) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply().code(200) + }) + }, + config + }) + + /** + * Get the pid of the `ipfs daemon` process. + * + * @returns {number} + */ + server.route({ + method: 'GET', + path: '/pid', + handler: (request, reply) => { + const id = request.query.id + reply({ pid: nodes[id].pid }) + }, + config + }) + + /** + * Call `ipfs config` + * + * If no `key` is passed, the whole config is returned as an object. + */ + server.route({ + method: 'GET', + path: '/config', + handler: (request, reply) => { + const id = request.query.id + const key = request.query.key + nodes[id].getConfig(key, (err, config) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply({ config }) + }) + }, + config: defaults({}, { + validate: { + query: { + key: Joi.string().optional() + } + } + }, config) + }) + + /** + * Set a config value. + */ + server.route({ + method: 'PUT', + path: '/config', + handler: (request, reply) => { + const id = request.query.id + const key = request.payload.key + const val = request.payload.value + + nodes[id].setConfig(key, val, (err) => { + if (err) { + return reply(boom.badRequest(err)) + } + + reply().code(200) + }) + }, + config: defaults({}, { + validate: { + payload: { + key: Joi.string(), + value: Joi.any() + } + } + }, config) + + }) +} diff --git a/src/remote-node/server.js b/src/remote-node/server.js new file mode 100644 index 00000000..6703fcf9 --- /dev/null +++ b/src/remote-node/server.js @@ -0,0 +1,34 @@ +'use strict' + +const Hapi = require('hapi') +const routes = require('./routes') + +class Server { + constructor (port) { + this.server = null + this.port = typeof port === 'undefined' ? 9999 : port + } + + start (cb) { + cb = cb || (() => {}) + + this.server = new Hapi.Server() + this.server.connection({ port: this.port, host: 'localhost', routes: { cors: true } }) + + routes(this.server) + this.server.start((err) => { + if (err) { + return cb(err) + } + + cb(null, this.server) + }) + } + + stop (cb) { + cb = cb || (() => {}) + this.server.stop(cb) + } +} + +module.exports = Server diff --git a/src/utils/create-repo-browser.js b/src/utils/create-repo-browser.js new file mode 100644 index 00000000..18b220b7 --- /dev/null +++ b/src/utils/create-repo-browser.js @@ -0,0 +1,28 @@ +/* global self */ +'use strict' + +const IPFSRepo = require('ipfs-repo') +const hat = require('hat') + +const idb = self.indexedDB || + self.mozIndexedDB || + self.webkitIndexedDB || + self.msIndexedDB + +function createTempRepo (repoPath) { + repoPath = repoPath || '/ipfs-' + hat() + + const repo = new IPFSRepo(repoPath) + + repo.teardown = (done) => { + repo.close(() => { + idb.deleteDatabase(repoPath) + idb.deleteDatabase(repoPath + '/blocks') + done() + }) + } + + return repo +} + +module.exports = createTempRepo diff --git a/src/utils/create-repo-nodejs.js b/src/utils/create-repo-nodejs.js new file mode 100644 index 00000000..79d91fe3 --- /dev/null +++ b/src/utils/create-repo-nodejs.js @@ -0,0 +1,41 @@ +'use strict' + +const IPFSRepo = require('ipfs-repo') +const os = require('os') +const path = require('path') +const hat = require('hat') +const series = require('async/series') +const rimraf = require('rimraf') +const fs = require('fs') + +const clean = (dir) => { + try { + fs.accessSync(dir) + } catch (err) { + // Does not exist so all good + return + } + + rimraf.sync(dir) +} + +function createTempRepo (repoPath) { + repoPath = repoPath || path.join(os.tmpdir(), '/ipfs-test-' + hat()) + + const repo = new IPFSRepo(repoPath) + + repo.teardown = (done) => { + series([ + // ignore err, might have been closed already + (cb) => repo.close(() => cb()), + (cb) => { + clean(repoPath) + cb() + } + ], done) + } + + return repo +} + +module.exports = createTempRepo diff --git a/src/utils/index.js b/src/utils/index.js new file mode 100644 index 00000000..63a462c2 --- /dev/null +++ b/src/utils/index.js @@ -0,0 +1,118 @@ +'use strict' + +const async = require('async') +const fs = require('fs') +const hat = require('hat') +const os = require('os') +const path = require('path') +const exec = require('../exec') +const safeParse = require('safe-json-parse/callback') +const createRepo = require('./create-repo-nodejs') + +const join = path.join +const isWindows = os.platform() === 'win32' + +exports.createRepo = createRepo + +// taken from https://github.com/hughsk/flat +exports.flatten = (target) => { + const output = {} + const step = (object, prev) => { + object = object || {} + Object.keys(object).forEach(function (key) { + const value = object[key] + const isarray = Array.isArray(value) + const type = Object.prototype.toString.call(value) + const isbuffer = Buffer.isBuffer(value) + const isobject = ( + type === '[object Object]' || + type === '[object Array]' + ) + + const newKey = prev + ? prev + '.' + key + : key + + if (!isarray && !isbuffer && isobject && Object.keys(value).length) { + return step(value, newKey) + } + + output[newKey] = value + }) + } + + step(target) + + return output +} + +// Consistent error handling +exports.parseConfig = (path, callback) => { + async.waterfall([ + (cb) => fs.readFile(join(path, 'config'), cb), + (file, cb) => safeParse(file.toString(), cb) + ], callback) +} + +exports.tempDir = (isJs) => { + return join(os.tmpdir(), `${isJs ? 'jsipfs' : 'ipfs'}_${hat()}`) +} + +exports.findIpfsExecutable = (type, rootPath) => { + const execPath = { + go: path.join('go-ipfs-dep', 'go-ipfs', isWindows ? 'ipfs.exe' : 'ipfs'), + js: path.join('ipfs', 'src', 'cli', 'bin.js') + } + + let appRoot = rootPath ? path.join(rootPath, '..') : process.cwd() + // If inside .asar try to load from .asar.unpacked + // this only works if asar was built with + // asar --unpack-dir=node_modules/go-ipfs-dep/* (not tested) + // or + // electron-packager ./ --asar.unpackDir=node_modules/go-ipfs-dep + if (appRoot.includes(`.asar${path.sep}`)) { + appRoot = appRoot.replace(`.asar${path.sep}`, `.asar.unpacked${path.sep}`) + } + const depPath = execPath[type] + const npm3Path = path.join(appRoot, '../', depPath) + const npm2Path = path.join(appRoot, 'node_modules', depPath) + + if (fs.existsSync(npm3Path)) { + return npm3Path + } + if (fs.existsSync(npm2Path)) { + return npm2Path + } + + throw new Error('Cannot find the IPFS executable') +} + +function run (node, args, opts, callback) { + let executable = node.exec + if (isWindows && node.opts.type !== 'go') { + args = args || [] + args.unshift(node.exec) + executable = process.execPath + } + + return exec(executable, args, opts, callback) +} + +exports.run = run + +function setConfigValue (node, key, value, callback) { + run( + node, + ['config', key, value, '--json'], + { env: node.env }, + callback + ) +} + +exports.setConfigValue = setConfigValue + +exports.configureNode = (node, conf, callback) => { + async.eachOfSeries(conf, (value, key, cb) => { + setConfigValue(node, key, JSON.stringify(value), cb) + }, callback) +} diff --git a/test/add-retrive.js b/test/add-retrive.js new file mode 100644 index 00000000..36f1c84a --- /dev/null +++ b/test/add-retrive.js @@ -0,0 +1,46 @@ +/* eslint-env mocha */ +'use strict' + +const async = require('async') +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +module.exports = () => { + describe('should add and retrieve content', function () { + const blorb = Buffer.from('blorb') + let store + let retrieve + + before(function (done) { + this.timeout(30 * 1000) + async.waterfall([ + (cb) => this.ipfsd.api.block.put(blorb, cb), + (block, cb) => { + store = block.cid.toBaseEncodedString() + this.ipfsd.api.block.get(store, cb) + }, + (_block, cb) => { + retrieve = _block.data + cb() + } + ], done) + }) + + it('should be able to store objects', () => { + expect(store) + .to.eql('QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ') + }) + + it('should be able to retrieve objects', () => { + expect(retrieve.toString()).to.be.eql('blorb') + }) + + it('should have started the daemon and returned an api with host/port', function () { + expect(this.ipfsd.api).to.have.property('id') + expect(this.ipfsd.api).to.have.property('apiHost') + expect(this.ipfsd.api).to.have.property('apiPort') + }) + }) +} diff --git a/test/api.js b/test/api.js new file mode 100644 index 00000000..2f58318b --- /dev/null +++ b/test/api.js @@ -0,0 +1,141 @@ +/* eslint-env mocha */ +/* eslint max-nested-callbacks: ["error", 8] */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const multiaddr = require('multiaddr') +const os = require('os') +const path = require('path') + +const isNode = require('detect-node') +const isWindows = os.platform() === 'win32' + +module.exports = (df, type) => { + return () => { + const API_PORT = type === 'js' ? '5002' : '5001' + const GW_PORT = type === 'js' ? '9090' : '8080' + + const config = { + Addresses: { + API: `/ip4/127.0.0.1/tcp/${API_PORT}`, + Gateway: `/ip4/127.0.0.1/tcp/${GW_PORT}` + } + } + + describe('ipfs-api version', () => { + let ipfsd + let api + + before(function (done) { + this.timeout(50 * 1000) + df.spawn({ start: false, config }, (err, daemon) => { + expect(err).to.not.exist() + ipfsd = daemon + ipfsd.start((err, res) => { + expect(err).to.not.exist() + api = res + done() + }) + }) + }) + + after((done) => ipfsd.stop(done)) + + // skip on windows for now + // https://github.com/ipfs/js-ipfsd-ctl/pull/155#issuecomment-326970190 + // fails on windows see https://github.com/ipfs/js-ipfs-api/issues/408 + if (isWindows || !isNode) { + return it.skip('uses the correct ipfs-api') + } + + it('uses the correct ipfs-api', (done) => { + api.util.addFromFs(path.join(__dirname, 'fixtures/'), { + recursive: true + }, (err, res) => { + expect(err).to.not.exist() + + const added = res[res.length - 1] + + // Temporary: Need to see what is going on on windows + expect(res).to.deep.equal([ + { + path: 'fixtures/test.txt', + hash: 'Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD', + size: 19 + }, + { + path: 'fixtures', + hash: 'QmXkiTdnfRJjiQREtF5dWf2X4V9awNHQSn9YGofwVY4qUU', + size: 73 + } + ]) + + expect(res.length).to.equal(2) + expect(added).to.have.property('path', 'fixtures') + expect(added).to.have.property( + 'hash', + 'QmXkiTdnfRJjiQREtF5dWf2X4V9awNHQSn9YGofwVY4qUU' + ) + expect(res[0]).to.have.property('path', 'fixtures/test.txt') + expect(res[0]).to.have.property( + 'hash', + 'Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD' + ) + done() + }) + }) + }) + + describe('validate api', () => { + it('starts the daemon and returns valid API and gateway addresses', function (done) { + this.timeout(50 * 1000) + df.spawn({ config }, (err, res) => { + expect(err).to.not.exist() + const ipfsd = res + + // Check for props in daemon + expect(ipfsd).to.have.property('apiAddr') + expect(ipfsd).to.have.property('gatewayAddr') + expect(ipfsd.apiAddr).to.not.be.null() + expect(multiaddr.isMultiaddr(ipfsd.apiAddr)).to.equal(true) + expect(ipfsd.gatewayAddr).to.not.be.null() + expect(multiaddr.isMultiaddr(ipfsd.gatewayAddr)).to.equal(true) + + // Check for props in ipfs-api instance + expect(ipfsd.api).to.have.property('apiHost') + expect(ipfsd.api).to.have.property('apiPort') + expect(ipfsd.api).to.have.property('gatewayHost') + expect(ipfsd.api).to.have.property('gatewayPort') + expect(ipfsd.api.apiHost).to.equal('127.0.0.1') + expect(ipfsd.api.apiPort).to.equal(API_PORT) + expect(ipfsd.api.gatewayHost).to.equal('127.0.0.1') + expect(ipfsd.api.gatewayPort).to.equal(GW_PORT) + + ipfsd.stop(done) + }) + }) + + it('allows passing flags', function (done) { + // skip in js, since js-ipfs doesn't fail on unrecognized args, it prints the help instead + if (type) { + this.skip() + } else { + df.spawn({ start: false }, (err, ipfsd) => { + expect(err).to.not.exist() + ipfsd.start(['--should-not-exist'], (err) => { + expect(err).to.exist() + expect(err.message) + .to.match(/Unrecognized option 'should-not-exist'/) + + done() + }) + }) + } + }) + }) + } +} diff --git a/test/browser.js b/test/browser.js new file mode 100644 index 00000000..960fc61e --- /dev/null +++ b/test/browser.js @@ -0,0 +1,4 @@ +/* eslint-env mocha */ +'use strict' + +require('./daemon') diff --git a/test/daemon.js b/test/daemon.js new file mode 100644 index 00000000..bfc79301 --- /dev/null +++ b/test/daemon.js @@ -0,0 +1,27 @@ +/* eslint-env mocha */ +'use strict' + +const daemon = require('./spawning') +const api = require('./api') +const DaemonFactory = require('../src') +const IPFS = require('ipfs') + +describe('ipfsd-ctl', () => { + // clean up IPFS env + afterEach(() => Object.keys(process.env) + .forEach((key) => { + if (key.includes('IPFS')) { + delete process.env[key] + } + })) + + describe('Go daemon', () => { + const df = DaemonFactory.create({ type: 'go' }) + daemon(df, 'go')() + api(df, 'go')() + }) + + describe('In-process daemon', () => { + daemon(DaemonFactory.create({ remote: false, type: 'proc', exec: IPFS }), 'proc')() + }) +}) diff --git a/test/exec.spec.js b/test/exec.js similarity index 86% rename from test/exec.spec.js rename to test/exec.js index be6fc3c5..c07274fb 100644 --- a/test/exec.spec.js +++ b/test/exec.js @@ -8,6 +8,10 @@ const cp = require('child_process') const path = require('path') const exec = require('../src/exec') +const os = require('os') + +const isWindows = os.platform() === 'win32' + const survivor = path.join(__dirname, 'survivor') function token () { @@ -28,7 +32,7 @@ function psExpect (pid, expect, grace, callback) { function isRunningGrep (pattern, callback) { const cmd = 'ps aux' - cp.exec(cmd, (err, stdout, stderr) => { + cp.exec(cmd, { maxBuffer: 1024 * 500 }, (err, stdout, stderr) => { if (err) { return callback(err) } @@ -57,7 +61,15 @@ function makeCheck (n, done) { // exiting as it once was when the test was designed // - [ ] Need test vector or figure out why tail changed // Ref: https://github.com/ipfs/js-ipfsd-ctl/pull/160#issuecomment-325669206 -describe.skip('exec', () => { +// UPDATE: 12/06/2017 - `tail` seems to work fine on all ci systems. +// I'm leaving it enabled for now. This does need a different approach for windows though. +describe('exec', () => { + // TODO: skip on windows for now + // TODO: running under coverage messes up the process hierarchies + if (isWindows || process.env['COVERAGE']) { + return + } + it('SIGTERM kills hang', (done) => { const tok = token() diff --git a/test/node.js b/test/node.js new file mode 100644 index 00000000..af53ce47 --- /dev/null +++ b/test/node.js @@ -0,0 +1,25 @@ +/* eslint-env mocha */ + +'use strict' + +require('./daemon') +require('./exec') +require('./utils') +require('./remote/routes') +require('./remote/client') +require('./remote/server') + +const startStop = require('./start-stop') +const install = require('./npm-installs') + +describe('node', () => { + describe('cleanup', () => { + startStop('go')() + startStop('js')() + }) + + describe('install', () => { + install('go')() + install('js')() + }) +}) diff --git a/test/npm-installs.js b/test/npm-installs.js new file mode 100644 index 00000000..c3bcbd67 --- /dev/null +++ b/test/npm-installs.js @@ -0,0 +1,71 @@ +/* eslint-env mocha */ +/* eslint max-nested-callbacks: ["error", 8] */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) +const fs = require('fs') +const rimraf = require('rimraf') +const mkdirp = require('mkdirp') +const path = require('path') +const os = require('os') +const isWindows = os.platform() === 'win32' + +module.exports = (type) => { + return () => { + describe('ipfs executable path', () => { + const tmp = os.tmpdir() + const appName = type === 'js' + ? 'bin.js' + : isWindows ? 'ipfs.exe' : 'ipfs' + + const oldPath = process.env.testpath + before(() => { process.env.testpath = path.join(tmp, 'ipfsd-ctl-test/node_modules/ipfsd-ctl/lib') }) // fake __dirname + after(() => { process.env.testpath = oldPath }) + + it('has the correct path when installed with npm3', (done) => { + let execPath = type === 'js' + ? 'ipfsd-ctl-test/node_modules/ipfs/src/cli' + : 'ipfsd-ctl-test/node_modules/go-ipfs-dep/go-ipfs' + + let npm3Path = path.join(tmp, execPath) + + mkdirp(npm3Path, (err) => { + expect(err).to.not.exist() + + fs.writeFileSync(path.join(npm3Path, appName)) + delete require.cache[require.resolve('../src/daemon-node.js')] + const Daemon = require('../src/daemon-node.js') + + const node = new Daemon({ type }) + expect(node.exec) + .to.eql(path.join(tmp, `${execPath}/${appName}`)) + rimraf(path.join(tmp, 'ipfsd-ctl-test'), done) + }) + }) + + it('has the correct path when installed with npm2', (done) => { + let execPath = type === 'js' + ? 'ipfsd-ctl-test/node_modules/ipfsd-ctl/node_modules/ipfs/src/cli' + : 'ipfsd-ctl-test/node_modules/ipfsd-ctl/node_modules/go-ipfs-dep/go-ipfs' + + let npm2Path = path.join(tmp, execPath) + + mkdirp(npm2Path, (err) => { + expect(err).to.not.exist() + + fs.writeFileSync(path.join(npm2Path, appName)) + delete require.cache[require.resolve('../src/daemon-node.js')] + const Daemon = require('../src/daemon-node.js') + + const node = new Daemon({ type }) + expect(node.exec) + .to.eql(path.join(tmp, `${execPath}/${appName}`)) + rimraf(path.join(tmp, 'ipfsd-ctl-test'), done) + }) + }) + }) + } +} diff --git a/test/npm-installs.spec.js b/test/npm-installs.spec.js deleted file mode 100644 index c7c2b183..00000000 --- a/test/npm-installs.spec.js +++ /dev/null @@ -1,57 +0,0 @@ -/* eslint-env mocha */ -/* eslint max-nested-callbacks: ["error", 8] */ -'use strict' - -const chai = require('chai') -const dirtyChai = require('dirty-chai') -const expect = chai.expect -chai.use(dirtyChai) -const fs = require('fs') -const rimraf = require('rimraf') -const mkdirp = require('mkdirp') -const path = require('path') -const os = require('os') -const isWindows = os.platform() === 'win32' - -describe('ipfs executable path', () => { - const tmp = os.tmpdir() - const appName = isWindows ? 'ipfs.exe' : 'ipfs' - - it('has the correct path when installed with npm3', (done) => { - process.env.testpath = path.join(tmp, 'ipfsd-ctl-test/node_modules/ipfsd-ctl/lib') // fake __dirname - let npm3Path = path.join(tmp, 'ipfsd-ctl-test/node_modules/go-ipfs-dep/go-ipfs') - - mkdirp(npm3Path, (err) => { - expect(err).to.not.exist() - - fs.writeFileSync(path.join(npm3Path, appName)) - delete require.cache[require.resolve('../src/daemon.js')] - const Daemon = require('../src/daemon.js') - - const node = new Daemon() - expect(node.exec) - .to.eql(path.join(tmp, `ipfsd-ctl-test/node_modules/go-ipfs-dep/go-ipfs/${appName}`)) - rimraf(path.join(tmp, 'ipfsd-ctl-test'), done) - }) - }) - - it('has the correct path when installed with npm2', (done) => { - process.env.testpath = path.join(tmp, 'ipfsd-ctl-test/node_modules/ipfsd-ctl/lib') // fake __dirname - - let npm2Path = path.join(tmp, 'ipfsd-ctl-test/node_modules/ipfsd-ctl/node_modules/go-ipfs-dep/go-ipfs') - - mkdirp(npm2Path, (err) => { - expect(err).to.not.exist() - - fs.writeFileSync(path.join(npm2Path, appName)) - delete require.cache[require.resolve('../src/daemon.js')] - const Daemon = require('../src/daemon.js') - - const node = new Daemon() - - expect(node.exec) - .to.eql(path.join(tmp, `ipfsd-ctl-test/node_modules/ipfsd-ctl/node_modules/go-ipfs-dep/go-ipfs/${appName}`)) - rimraf(path.join(tmp, 'ipfsd-ctl-test'), done) - }) - }) -}) diff --git a/test/remote/client.js b/test/remote/client.js new file mode 100644 index 00000000..8cea5a7d --- /dev/null +++ b/test/remote/client.js @@ -0,0 +1,455 @@ +/* eslint max-nested-callbacks: ["error", 6] */ +/* eslint-env mocha */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const hat = require('hat') + +const boom = require('boom') +const proxyquire = require('proxyquire') +const superagent = require('superagent') +const mock = require('superagent-mocker')(superagent) + +const ClientFactory = proxyquire('../../src/remote-node/client', { + superagent: () => { + return superagent + } +}) + +describe('client', () => { + const client = new ClientFactory() + + let node = null + describe('.spawn', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/spawn', (req) => { + expect(req.body.opts.opt1).to.equal('hello!') + return { + body: { + id: hat(), + api: { + apiAddr: '/ip4/127.0.0.1/tcp/5001', + gatewayAddr: '/ip4/127.0.0.1/tcp/8080' + } + } + } + }) + + client.spawn({ opt1: 'hello!' }, (err, ipfsd) => { + expect(err).to.not.exist() + expect(ipfsd).to.exist() + expect(ipfsd.apiAddr.toString()).to.equal('/ip4/127.0.0.1/tcp/5001') + expect(ipfsd.gatewayAddr.toString()).to.equal('/ip4/127.0.0.1/tcp/8080') + node = ipfsd + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/spawn', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + client.spawn((err, ipfsd) => { + expect(err).to.exist() + expect(ipfsd).to.not.exist() + done() + }) + }) + }) + }) + + describe('.init', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/init', (req) => { + expect(req.query.id).to.exist() + expect(req.body.initOpts.initOpt1).to.equal('hello!') + + return { + body: { + initialized: true + } + } + }) + + node.init({ initOpt1: 'hello!' }, (err, res) => { + expect(err).to.not.exist() + expect(res.initialized).to.be.ok() + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/init', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.init((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.cleanup', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/cleanup', (req) => { + expect(req.query.id).to.exist() + }) + + node.cleanup((err) => { + expect(err).to.not.exist() + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.post('http://localhost:9999/cleanup', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.init((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.start', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/start', (req) => { + expect(req.query.id).to.exist() + expect(req.body.flags).to.exist() + expect(req.body.flags[0]).to.equal('--enable-pubsub-experiment') + + return { + body: { + api: { + apiAddr: '/ip4/127.0.0.1/tcp/5001', + gatewayAddr: '/ip4/127.0.0.1/tcp/8080' + } + } + } + }) + + node.start(['--enable-pubsub-experiment'], (err, api) => { + expect(err).to.not.exist() + expect(api).to.exist() + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.post('http://localhost:9999/start', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.start((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.stop', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/stop', (req) => { + expect(req.query.id).to.exist() + }) + + node.stop((err) => { + expect(err).to.not.exist() + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.post('http://localhost:9999/stop', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.stop((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.killProcess', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.post('http://localhost:9999/kill', (req) => { + expect(req.query.id).to.exist() + }) + + node.killProcess((err) => { + expect(err).to.not.exist() + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.post('http://localhost:9999/kill', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.killProcess((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.pid', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.get('http://localhost:9999/pid', (req) => { + expect(req.query.id).to.exist() + return { + body: { + pid: 1 + } + } + }) + + node.pid((err, res) => { + expect(err).to.not.exist() + expect(res).to.equal(1) + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.get('http://localhost:9999/pid', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.pid((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.getConfig', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.get('http://localhost:9999/config', (req) => { + expect(req.query.id).to.exist() + expect(req.query.key).to.equal('foo') + return { + body: { + config: { + foo: 'bar' + } + } + } + }) + + node.getConfig('foo', (err, res) => { + expect(err).to.not.exist() + expect(res.foo).to.equal('bar') + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.get('http://localhost:9999/config', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.getConfig((err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) + + describe('.setConfig', () => { + describe('handle valid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle valid request', (done) => { + mock.put('http://localhost:9999/config', (req) => { + expect(req.query.id).to.exist() + expect(req.body.key).to.equal('foo') + expect(req.body.value).to.equal('bar') + }) + + node.setConfig('foo', 'bar', (err) => { + expect(err).to.not.exist() + done() + }) + }) + }) + + describe('handle invalid', () => { + after(() => { + mock.clearRoutes() + }) + + it('should handle invalid request', (done) => { + mock.put('http://localhost:9999/config', () => { + const badReq = boom.badRequest() + return { + status: badReq.output.statusCode, + body: { + message: badReq.message + } + } + }) + + node.setConfig('foo', 'bar', (err) => { + expect(err).to.exist() + done() + }) + }) + }) + }) +}) diff --git a/test/remote/routes.js b/test/remote/routes.js new file mode 100644 index 00000000..322a1672 --- /dev/null +++ b/test/remote/routes.js @@ -0,0 +1,350 @@ +/* eslint-env mocha */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const proxyquire = require('proxyquire') +const multiaddr = require('multiaddr') + +const Hapi = require('hapi') +const routes = proxyquire('../../src/remote-node/routes', { + '../daemon-ctrl': class { + spawn (ops, cb) { + const node = {} + node.apiAddr = multiaddr('/ip4/127.0.0.1/tcp/5001') + node.gatewayAddr = multiaddr('/ip4/127.0.0.1/tcp/8080') + node.started = false + + node.init = (opts, cb) => { + cb(null, node) + } + + node.cleanup = (cb) => { + cb() + } + + node.start = (_, cb) => { + node.started = true + + const api = {} + api.apiHost = node.apiAddr.nodeAddress().address + api.apiPort = node.apiAddr.nodeAddress().port + + api.gatewayHost = node.gatewayAddr.nodeAddress().address + api.gatewayPort = node.gatewayAddr.nodeAddress().port + + node.api = api + cb(null, api) + } + + node.stop = (cb) => { + node.killProcess(cb) + } + + node.killProcess = (cb) => { + node.started = false + cb() + } + + node.pid = (cb) => { + cb(null, 1) + } + + node.getConfig = (key, cb) => { + cb(null, { foo: 'bar' }) + } + + node.setConfig = (key, val, cb) => { + cb() + } + + node.start(null, () => { + cb(null, node) + }) + } + } +}) + +describe('routes', () => { + let id + const server = new Hapi.Server() + before(() => { + server.connection() + routes(server) + }) + + after((done) => server.stop(done)) + + describe('POST /spawn', () => { + it('should return 200', (done) => { + server.inject({ + method: 'POST', + url: '/spawn', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(200) + expect(res.result.id).to.exist() + expect(res.result.api.apiAddr).to.exist() + expect(res.result.api.gatewayAddr).to.exist() + + id = res.result.id + done() + }) + }) + }) + + describe('GET /api-addr', () => { + it('should return 200', (done) => { + server.inject({ + method: 'GET', + url: `/api-addr?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + expect(res.result.apiAddr).to.exist() + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'GET', + url: '/api-addr', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('GET /getaway-addr', () => { + it('should return 200', (done) => { + server.inject({ + method: 'GET', + url: `/getaway-addr?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + expect(res.result.getawayAddr).to.exist() + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'GET', + url: '/getaway-addr', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('POST /init', () => { + it('should return 200', (done) => { + server.inject({ + method: 'POST', + url: `/init?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'POST', + url: '/init', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('POST /cleanup', () => { + it('should return 200', (done) => { + server.inject({ + method: 'POST', + url: `/cleanup?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'POST', + url: '/cleanup', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('POST /start', () => { + it('should return 200', (done) => { + server.inject({ + method: 'POST', + url: `/start?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'POST', + url: '/start', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('POST /stop', () => { + it('should return 200', (done) => { + server.inject({ + method: 'POST', + url: `/stop?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'POST', + url: '/stop', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('POST /kill', () => { + it('should return 200', (done) => { + server.inject({ + method: 'POST', + url: `/kill?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'POST', + url: '/kill', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('GET /pid', () => { + it('should return 200', (done) => { + server.inject({ + method: 'GET', + url: `/pid?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'GET', + url: '/pid', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('GET /config', () => { + it('should return 200', (done) => { + server.inject({ + method: 'GET', + url: `/config?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { id } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'GET', + url: '/config', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) + + describe('PUT /config', () => { + it('should return 200', (done) => { + server.inject({ + method: 'PUT', + url: `/config?id=${id}`, + headers: { 'content-type': 'application/json' }, + payload: { key: 'foo', value: 'bar' } + }, (res) => { + expect(res.statusCode).to.equal(200) + done() + }) + }) + + it('should return 400', (done) => { + server.inject({ + method: 'PUT', + url: '/config', + headers: { 'content-type': 'application/json' } + }, (res) => { + expect(res.statusCode).to.equal(400) + done() + }) + }) + }) +}) diff --git a/test/remote/server.js b/test/remote/server.js new file mode 100644 index 00000000..db98b904 --- /dev/null +++ b/test/remote/server.js @@ -0,0 +1,30 @@ +/* eslint-env mocha */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const Server = require('../../src/remote-node/server') +const portUsed = require('detect-port') + +describe('server', () => { + let server + before((done) => { + server = new Server() + server.start(done) + }) + + it('should start', (done) => { + portUsed(9999, (err, port) => { + expect(err).to.not.exist() + expect(port !== 9999).to.be.ok() + done() + }) + }) + + it('should stop', (done) => { + server.stop(done) + }) +}) diff --git a/test/spawning-daemons.spec.js b/test/spawning-daemons.spec.js deleted file mode 100644 index 3d8e8d8f..00000000 --- a/test/spawning-daemons.spec.js +++ /dev/null @@ -1,455 +0,0 @@ -/* eslint-env mocha */ -/* eslint max-nested-callbacks: ["error", 8] */ -'use strict' - -const async = require('async') -const chai = require('chai') -const dirtyChai = require('dirty-chai') -const expect = chai.expect -chai.use(dirtyChai) -const ipfsApi = require('ipfs-api') -const multiaddr = require('multiaddr') -const fs = require('fs') -const rimraf = require('rimraf') -const path = require('path') -const once = require('once') -const os = require('os') - -const exec = require('../src/exec') -const ipfsd = require('../src') - -const isWindows = os.platform() === 'win32' -describe('daemon spawning', function () { - this.timeout(60 * 1000) - - describe('local daemon', () => { - const repoPath = path.join(os.tmpdir(), 'ipfsd-ctl-test') - const addr = '/ip4/127.0.0.1/tcp/5678' - const config = { - Addresses: { - API: addr - } - } - - it('allows passing flags to init', (done) => { - async.waterfall([ - (cb) => ipfsd.local(repoPath, config, cb), - (node, cb) => { - async.series([ - (cb) => node.init(cb), - (cb) => node.getConfig('Addresses.API', cb) - ], (err, res) => { - expect(err).to.not.exist() - expect(res[1]).to.be.eql(addr) - rimraf(repoPath, cb) - }) - } - ], done) - }) - }) - - describe('disposable daemon', () => { - const blorb = Buffer.from('blorb') - let ipfs - let store - let retrieve - - beforeEach((done) => { - async.waterfall([ - (cb) => ipfs.block.put(blorb, cb), - (block, cb) => { - store = block.cid.toBaseEncodedString() - ipfs.block.get(store, cb) - }, - (_block, cb) => { - retrieve = _block.data - cb() - } - ], done) - }) - - describe('without api instance (.disposable)', () => { - before((done) => { - async.waterfall([ - (cb) => ipfsd.disposable(cb), - (node, cb) => { - node.startDaemon((err) => { - expect(err).to.not.exist() - ipfs = ipfsApi(node.apiAddr) - cb() - }) - } - ], done) - }) - - it('should have started the daemon and returned an api', () => { - expect(ipfs).to.exist() - expect(ipfs.id).to.exist() - }) - - it('should be able to store objects', () => { - expect(store) - .to.eql('QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ') - }) - - it('should be able to retrieve objects', () => { - expect(retrieve.toString()).to.be.eql('blorb') - }) - }) - - describe('with api instance (.disposableApi)', () => { - before((done) => { - ipfsd.disposableApi((err, api) => { - expect(err).to.not.exist() - - ipfs = api - done() - }) - }) - - it('should have started the daemon and returned an api with host/port', () => { - expect(ipfs).to.have.property('id') - expect(ipfs).to.have.property('apiHost') - expect(ipfs).to.have.property('apiPort') - }) - - it('should be able to store objects', () => { - expect(store) - .to.equal('QmPv52ekjS75L4JmHpXVeuJ5uX2ecSfSZo88NSyxwA3rAQ') - }) - - it('should be able to retrieve objects', () => { - expect(retrieve.toString()).to.equal('blorb') - }) - }) - }) - - describe('starting and stopping', () => { - let node - - describe('init', () => { - before((done) => { - ipfsd.disposable((err, res) => { - if (err) { - done(err) - } - node = res - done() - }) - }) - - it('should returned a node', () => { - expect(node).to.exist() - }) - - it('daemon should not be running', () => { - expect(node.daemonPid()).to.not.exist() - }) - }) - - let pid - - describe('starting', () => { - let ipfs - - before((done) => { - node.startDaemon((err, res) => { - expect(err).to.not.exist() - - pid = node.daemonPid() - ipfs = res - - // actually running? - done = once(done) - exec('kill', ['-0', pid], { cleanup: true }, () => done()) - }) - }) - - it('should be running', () => { - expect(ipfs.id).to.exist() - }) - }) - - describe('stopping', () => { - let stopped = false - - before((done) => { - node.stopDaemon((err) => { - expect(err).to.not.exist() - stopped = true - }) - - // make sure it's not still running - const poll = setInterval(() => { - exec('kill', ['-0', pid], { cleanup: true }, { - error () { - clearInterval(poll) - done() - // so it does not get called again - done = () => {} - } - }) - }, 100) - }) - - it('should be stopped', () => { - expect(node.daemonPid()).to.not.exist() - expect(stopped).to.equal(true) - }) - }) - }) - - describe('setting up and init a local node', () => { - const testpath1 = path.join(os.tmpdir(), 'ipfstestpath1') - - describe('cleanup', () => { - before((done) => { - rimraf(testpath1, done) - }) - - it('should not have a directory', () => { - expect(fs.existsSync(testpath1)).to.be.eql(false) - }) - }) - - describe('setup', () => { - let node - before((done) => { - ipfsd.local(testpath1, (err, res) => { - if (err) { - return done(err) - } - node = res - done() - }) - }) - - it('should have returned a node', () => { - expect(node).to.exist() - }) - - it('should not be initialized', () => { - expect(node.initialized).to.be.eql(false) - }) - - describe('initialize', () => { - before((done) => { - node.init(done) - }) - - it('should have made a directory', () => { - expect(fs.existsSync(testpath1)).to.be.eql(true) - }) - - it('should be initialized', () => { - expect(node.initialized).to.be.eql(true) - }) - - it('should be initialized', () => { - expect(node.initialized).to.be.eql(true) - }) - }) - }) - }) - - describe('change config of a disposable node', () => { - let ipfsNode - - before((done) => { - ipfsd.disposable((err, node) => { - if (err) { - return done(err) - } - ipfsNode = node - done() - }) - }) - - it('Should return a config value', (done) => { - ipfsNode.getConfig('Bootstrap', (err, config) => { - expect(err).to.not.exist() - expect(config).to.exist() - done() - }) - }) - - it('Should return the whole config', (done) => { - ipfsNode.getConfig((err, config) => { - expect(err).to.not.exist() - expect(config).to.exist() - done() - }) - }) - - it('Should set a config value', (done) => { - async.series([ - (cb) => ipfsNode.setConfig('Bootstrap', 'null', cb), - (cb) => ipfsNode.getConfig('Bootstrap', cb) - ], (err, res) => { - expect(err).to.not.exist() - expect(res[1]).to.be.eql('null') - done() - }) - }) - - it('should give an error if setting an invalid config value', (done) => { - ipfsNode.setConfig('Bootstrap', 'true', (err) => { - expect(err.message).to.match(/failed to set config value/) - done() - }) - }) - }) - - it('allows passing via $IPFS_EXEC', (done) => { - process.env.IPFS_EXEC = '/some/path' - ipfsd.local((err, node) => { - expect(err).to.not.exist() - expect(node.exec).to.be.eql('/some/path') - - process.env.IPFS_EXEC = '' - done() - }) - }) - - it('prints the version', (done) => { - ipfsd.version((err, version) => { - expect(err).to.not.exist() - expect(version).to.equal('ipfs version 0.4.13') - done() - }) - }) - - describe('ipfs-api version', () => { - let ipfs - - before((done) => { - ipfsd.disposable((err, node) => { - expect(err).to.not.exist() - node.startDaemon((err, ignore) => { - expect(err).to.not.exist() - ipfs = ipfsApi(node.apiAddr) - done() - }) - }) - }) - - it('uses the correct ipfs-api', (done) => { - ipfs.util.addFromFs(path.join(__dirname, 'fixtures/'), { - recursive: true - }, (err, res) => { - expect(err).to.not.exist() - - const added = res[res.length - 1] - - // Temporary: Need to see what is going on on windows - expect(res).to.deep.equal([ - { - path: 'fixtures/test.txt', - hash: 'Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD', - size: 19 - }, - { - path: 'fixtures', - hash: 'QmXkiTdnfRJjiQREtF5dWf2X4V9awNHQSn9YGofwVY4qUU', - size: 73 - } - ]) - - expect(res.length).to.equal(2) - expect(added).to.have.property('path', 'fixtures') - expect(added).to.have.property( - 'hash', - 'QmXkiTdnfRJjiQREtF5dWf2X4V9awNHQSn9YGofwVY4qUU' - ) - expect(res[0]).to.have.property('path', 'fixtures/test.txt') - expect(res[0]).to.have.property( - 'hash', - 'Qmf412jQZiuVUtdgnB36FXFX7xg5V6KEbSJ4dpQuhkLyfD' - ) - done() - }) - }) - }) - - describe('startDaemon', () => { - it('start and stop', (done) => { - const dir = `${os.tmpdir()}/tmp-${Date.now() + '-' + Math.random().toString(36)}` - - const check = (cb) => { - // skip on windows - // https://github.com/ipfs/js-ipfsd-ctl/pull/155#issuecomment-326983530 - if (!isWindows) { - if (fs.existsSync(path.join(dir, 'repo.lock'))) { - cb(new Error('repo.lock not removed')) - } - if (fs.existsSync(path.join(dir, 'api'))) { - cb(new Error('api file not removed')) - } - } - cb() - } - - async.waterfall([ - (cb) => ipfsd.local(dir, cb), - (node, cb) => node.init((err) => cb(err, node)), - (node, cb) => node.startDaemon((err) => cb(err, node)), - (node, cb) => node.stopDaemon(cb), - check, - (cb) => ipfsd.local(dir, cb), - (node, cb) => node.startDaemon((err) => cb(err, node)), - (node, cb) => node.stopDaemon(cb), - check, - (cb) => ipfsd.local(dir, cb), - (node, cb) => node.startDaemon((err) => cb(err, node)), - (node, cb) => node.stopDaemon(cb), - check - ], done) - }) - - it('starts the daemon and returns valid API and gateway addresses', (done) => { - const dir = `${os.tmpdir()}/tmp-${Date.now() + '-' + Math.random().toString(36)}` - - async.waterfall([ - (cb) => ipfsd.local(dir, cb), - (daemon, cb) => daemon.init((err) => cb(err, daemon)), - (daemon, cb) => daemon.startDaemon((err, api) => cb(err, daemon, api)) - ], (err, daemon, api) => { - expect(err).to.not.exist() - - // Check for props in daemon - expect(daemon).to.have.property('apiAddr') - expect(daemon).to.have.property('gatewayAddr') - expect(daemon.apiAddr).to.not.equal(null) - expect(multiaddr.isMultiaddr(daemon.apiAddr)).to.equal(true) - expect(daemon.gatewayAddr).to.not.equal(null) - expect(multiaddr.isMultiaddr(daemon.gatewayAddr)).to.equal(true) - - // Check for props in ipfs-api instance - expect(api).to.have.property('apiHost') - expect(api).to.have.property('apiPort') - expect(api).to.have.property('gatewayHost') - expect(api).to.have.property('gatewayPort') - expect(api.apiHost).to.equal('127.0.0.1') - expect(api.apiPort).to.equal('5001') - expect(api.gatewayHost).to.equal('127.0.0.1') - expect(api.gatewayPort).to.equal('8080') - - daemon.stopDaemon(done) - }) - }) - - it('allows passing flags', (done) => { - ipfsd.disposable((err, node) => { - expect(err).to.not.exist() - - node.startDaemon(['--should-not-exist'], (err) => { - expect(err).to.exist() - expect(err.message) - .to.match(/Unrecognized option 'should-not-exist'/) - - done() - }) - }) - }) - }) -}) diff --git a/test/spawning.js b/test/spawning.js new file mode 100644 index 00000000..483b6367 --- /dev/null +++ b/test/spawning.js @@ -0,0 +1,315 @@ +/* eslint-env mocha */ +/* eslint max-nested-callbacks: ["error", 8] */ +'use strict' + +const async = require('async') +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const fs = require('fs') +const tempDir = require('../src/utils').tempDir +const isNode = require('detect-node') +const hat = require('hat') + +const addRetrieveTests = require('./add-retrive') + +module.exports = (df, type, exec) => { + return () => { + const VERSION_STRING = type === 'js' + ? `js-ipfs version: ${require('ipfs/package.json').version}` + : 'ipfs version 0.4.13' + + describe('daemon spawning', () => { + it('prints the version', function (done) { + if (!isNode || type === 'proc') { + this.skip() + } + df.version({ exec }, (err, version) => { + expect(err).to.not.exist() + expect(version).to.be.eql(VERSION_STRING) + done() + }) + }) + + describe('spawn a bare node', function () { + this.ipfsd = null + + after(function (done) { + this.timeout(50 * 1000) + this.ipfsd.stop(done) + }) + + it('create node', function (done) { + df.spawn({ exec, init: false, start: false, disposable: true }, (err, ipfsd) => { + expect(err).to.not.exist() + expect(ipfsd).to.exist() + expect(ipfsd.api).to.not.exist() + this.ipfsd = ipfsd + done() + }) + }) + + it('init node', function (done) { + this.timeout(50 * 1000) + this.ipfsd.init((err) => { + expect(err).to.not.exist() + expect(this.ipfsd.initialized).to.be.ok() + done() + }) + }) + + it('start node', function (done) { + this.timeout(50 * 1000) + this.ipfsd.start((err, api) => { + expect(err).to.not.exist() + expect(api).to.exist() + expect(api.id).to.exist() + done() + }) + }) + + addRetrieveTests() + }) + + describe('spawn an initialized node', function () { + this.ipfsd = null + + after(function (done) { + this.timeout(50 * 1000) + this.ipfsd.stop(done) + }) + + it('create node and init', function (done) { + this.timeout(50 * 1000) + df.spawn({ exec, start: false, disposable: true }, (err, ipfsd) => { + expect(err).to.not.exist() + expect(ipfsd).to.exist() + expect(ipfsd.api).to.not.exist() + this.ipfsd = ipfsd + done() + }) + }) + + it('start node', function (done) { + this.timeout(50 * 1000) + this.ipfsd.start((err, api) => { + expect(err).to.not.exist() + expect(api).to.exist() + expect(api.id).to.exist() + done() + }) + }) + + addRetrieveTests() + }) + + describe('spawn a node and attach api', () => { + this.ipfsd = null + + after(function (done) { + this.timeout(50 * 1000) + this.ipfsd.stop(done) + }) + + it('create init and start node', function (done) { + this.timeout(50 * 1000) + df.spawn({ exec }, (err, ipfsd) => { + expect(err).to.not.exist() + expect(ipfsd).to.exist() + expect(ipfsd.api).to.exist() + expect(ipfsd.api.id).to.exist() + this.ipfsd = ipfsd + done() + }) + }) + + addRetrieveTests() + }) + + describe('spawn a node and pass init options', () => { + const addr = '/ip4/127.0.0.1/tcp/5678' + const swarmAddr1 = '/ip4/127.0.0.1/tcp/35666' + const config = { + Addresses: { + Swarm: [ + swarmAddr1 + ], + API: addr + } + } + + it('allows passing ipfs config options to spawn', function (done) { + this.timeout(60 * 1000) + const options = { + config: config, + exec: exec + } + + let ipfsd + async.waterfall([ + (cb) => df.spawn(options, cb), + (res, cb) => { + ipfsd = res + ipfsd.getConfig('Addresses.API', (err, res) => { + expect(err).to.not.exist() + expect(res).to.be.eql(addr) + cb() + }) + }, + (cb) => { + ipfsd.getConfig('Addresses.Swarm', (err, res) => { + expect(err).to.not.exist() + if (typeof res === 'string') { + res = JSON.parse(res) + } + expect(res).to.deep.eql([swarmAddr1]) + cb() + }) + } + ], (err) => { + expect(err).to.not.exist() + ipfsd.stop(done) + }) + }) + }) + + describe('spawn a node on custom repo path', function () { + if (!isNode) { + return + } + + this.ipfsd = null + it('allows passing custom repo path to spawn', function (done) { + this.timeout(50 * 1000) + + const repoPath = tempDir(type) + + const config = { + Addresses: { + Swarm: [ + '/ip4/127.0.0.1/tcp/0/ws', + '/ip4/127.0.0.1/tcp/0' + ], + API: '/ip4/127.0.0.1/tcp/0' + } + } + + async.series([ + (cb) => df.spawn({ exec, repoPath, disposable: false, config }, (err, node) => { + expect(err).to.not.exist() + this.ipfsd = node + cb() + }), + (cb) => this.ipfsd.init(cb), + (cb) => this.ipfsd.start(cb) + ], (err) => { + expect(err).to.not.exist() + expect(fs.existsSync(repoPath)).to.be.ok() + done() + }) + }) + + addRetrieveTests() + + after(function (done) { + this.ipfsd.stop(() => { + this.ipfsd.cleanup(done) + }) + }) + }) + + describe('spawn a node with custom arguments', function () { + if (!isNode && type !== 'proc') { + return + } + + this.ipfsd = null + this.timeout(50 * 1000) + const topic = `test-topic-${hat()}` + + before(function (done) { + df.spawn({ exec, args: ['--enable-pubsub-experiment'] }, (err, node) => { + expect(err).to.not.exist() + this.ipfsd = node + done() + }) + }) + + after(function (done) { this.ipfsd.stop(done) }) + + it('should start with pubsub enabled', function (done) { + const handler = (msg) => { + expect(msg.data.toString()).to.equal('hi') + expect(msg).to.have.property('seqno') + expect(Buffer.isBuffer(msg.seqno)).to.eql(true) + expect(msg).to.have.property('topicIDs').eql([topic]) + done() + } + + this.ipfsd.api.pubsub.subscribe(topic, handler, (err) => { + expect(err).to.not.exist() + this.ipfsd.api.pubsub.publish(topic, Buffer.from('hi')) + }) + }) + }) + + describe('change config of a disposable node', () => { + let ipfsd + + before(function (done) { + this.timeout(50 * 1000) + df.spawn({ exec }, (err, res) => { + if (err) { + return done(err) + } + ipfsd = res + done() + }) + }) + + after((done) => ipfsd.stop(done)) + + it('Should return a config value', (done) => { + ipfsd.getConfig('Bootstrap', (err, config) => { + expect(err).to.not.exist() + expect(config).to.exist() + done() + }) + }) + + it('Should return the whole config', (done) => { + ipfsd.getConfig((err, config) => { + expect(err).to.not.exist() + expect(config).to.exist() + done() + }) + }) + + it('Should set a config value', function (done) { + this.timeout(30 * 1000) + async.series([ + (cb) => ipfsd.setConfig('Bootstrap', 'null', cb), + (cb) => ipfsd.getConfig('Bootstrap', cb) + ], (err, res) => { + expect(err).to.not.exist() + expect(res[1]).to.be.eql('null') + done() + }) + }) + + it('should give an error if setting an invalid config value', function (done) { + if (type !== 'go') { + this.skip() // js doesn't fail on invalid config + } else { + ipfsd.setConfig('Bootstrap', 'true', (err) => { + expect(err.message).to.match(/(?:Error: )?failed to set config value/mgi) + done() + }) + } + }) + }) + }) + } +} diff --git a/test/start-stop.js b/test/start-stop.js new file mode 100644 index 00000000..f716d44f --- /dev/null +++ b/test/start-stop.js @@ -0,0 +1,232 @@ +/* eslint-env mocha */ +/* eslint max-nested-callbacks: ["error", 8] */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const async = require('async') +const fs = require('fs') +const path = require('path') +const os = require('os') +const isrunning = require('is-running') + +const isWindows = os.platform() === 'win32' +const findIpfsExecutable = require('../src/utils').findIpfsExecutable +const tempDir = require('../src/utils').tempDir + +const DaemonFactory = require('../src') + +module.exports = (type) => { + return () => { + const df = DaemonFactory.create({ type }) + describe('starting and stopping', () => { + if (isWindows) { + return + } + + let ipfsd + + describe(`create and init a node (ipfsd)`, function () { + this.timeout(50 * 1000) + before((done) => { + df.spawn({ init: true, start: false, disposable: true }, (err, daemon) => { + expect(err).to.not.exist() + expect(daemon).to.exist() + + ipfsd = daemon + done() + }) + }) + + it('should return a node', () => { + expect(ipfsd).to.exist() + }) + + it('daemon should not be running', (done) => { + ipfsd.pid((pid) => { + expect(pid).to.not.exist() + done() + }) + }) + }) + + let pid + describe('starting', () => { + let api + + before(function (done) { + this.timeout(50 * 1000) + ipfsd.start((err, ipfs) => { + expect(err).to.not.exist() + + ipfsd.pid((_pid) => { + pid = _pid + api = ipfs + + // actually running? + expect(isrunning(pid)).to.be.ok() + done() + }) + }) + }) + + it('should be running', () => { + expect(api.id).to.exist() + }) + }) + + describe('stopping', () => { + let stopped = false + + before(function (done) { + this.timeout(20 * 1000) + ipfsd.stop((err) => { + expect(err).to.not.exist() + let tries = 5 + const interval = setInterval(() => { + const running = isrunning(pid) + if (!running || tries-- <= 0) { + clearInterval(interval) + expect(running).to.not.be.ok() + stopped = true + done() + } + }, 200) + }) + }) + + it('should be stopped', function (done) { + this.timeout(30 * 1000) // shutdown grace period is already 10500 + ipfsd.pid((pid) => { + expect(pid).to.not.exist() + expect(stopped).to.equal(true) + expect(fs.existsSync(path.join(ipfsd.path, 'repo.lock'))).to.not.be.ok() + expect(fs.existsSync(path.join(ipfsd.path, 'api'))).to.not.be.ok() + done() + }) + }) + }) + }) + + describe('starting and stopping on custom exec path', () => { + let ipfsd + + describe(`create and init a node (ipfsd) on custom exec path`, function () { + this.timeout(50 * 1000) + const exec = findIpfsExecutable(type) + before((done) => { + df.spawn({ exec }, (err, daemon) => { + expect(err).to.not.exist() + expect(daemon).to.exist() + + ipfsd = daemon + done() + }) + }) + + after((done) => ipfsd.stop(done)) + + it('should return a node', () => { + expect(ipfsd).to.exist() + }) + + it('ipfsd.exec should match exec', () => { + expect(ipfsd.exec).to.equal(exec) + }) + }) + + describe(`should fail on invalid exec path`, function () { + this.timeout(20 * 1000) + const exec = path.join('invalid', 'exec', 'ipfs') + before((done) => { + df.spawn({ + init: false, + start: false, + exec + }, (err, daemon) => { + expect(err).to.not.exist() + expect(daemon).to.exist() + + ipfsd = daemon + done() + }) + }) + + it('should fail on init', (done) => { + ipfsd.init((err, node) => { + expect(err).to.exist() + expect(node).to.not.exist() + done() + }) + }) + }) + }) + + describe('starting and stopping multiple times', () => { + let ipfsd + + describe(`create and init a node (ipfsd)`, function () { + this.timeout(50 * 1000) + before((done) => { + async.series([ + (cb) => df.spawn({ + init: false, + start: false, + disposable: false, + repoPath: tempDir(type) + }, (err, daemon) => { + expect(err).to.not.exist() + expect(daemon).to.exist() + + ipfsd = daemon + cb() + }), + (cb) => ipfsd.init(cb), + (cb) => ipfsd.start(cb) + ], done) + }) + + it('should return a node', () => { + expect(ipfsd).to.exist() + }) + + it('daemon should not be running', (done) => { + ipfsd.pid((pid) => { + expect(pid).to.exist() + done() + }) + }) + + it('should stop', (done) => { + ipfsd.stop((err) => { + expect(err).to.not.exist() + ipfsd.pid((pid) => { + expect(pid).to.not.exist() + done() + }) + }) + }) + + it('should start', (done) => { + ipfsd.start((err) => { + expect(err).to.not.exist() + ipfsd.pid((pid) => { + expect(pid).to.exist() + done() + }) + }) + }) + + it('should stop and cleanup', (done) => { + ipfsd.stop((err) => { + expect(err).to.not.exist() + ipfsd.cleanup(done) + }) + }) + }) + }) + } +} diff --git a/test/utils.js b/test/utils.js new file mode 100644 index 00000000..f19c89b6 --- /dev/null +++ b/test/utils.js @@ -0,0 +1,83 @@ +/* eslint-env mocha */ +/* eslint max-nested-callbacks: ["error", 8] */ +'use strict' + +const chai = require('chai') +const dirtyChai = require('dirty-chai') +const expect = chai.expect +chai.use(dirtyChai) + +const fs = require('fs') +const path = require('path') +const utils = require('../src/utils') +const flatten = utils.flatten +const tempDir = utils.tempDir +const findIpfsExecutable = utils.findIpfsExecutable +const createRepo = utils.createRepo + +const IPFSRepo = require('ipfs-repo') + +describe('utils', () => { + describe('.flatten', () => { + it('should flatten', () => { + expect(flatten({ a: { b: { c: [1, 2, 3] } } })).to.deep.equal({ 'a.b.c': [1, 2, 3] }) + }) + + it('should handle nulls', () => { + expect(flatten(null)).to.deep.equal({}) + }) + + it('should handle undefined', () => { + expect(flatten(undefined)).to.deep.equal({}) + }) + }) + + describe('.tempDir', () => { + it('should create tmp directory path for go-ipfs', () => { + const tmpDir = tempDir() + expect(tmpDir).to.exist() + expect(tmpDir).to.include('ipfs_') + }) + + it('should create tmp directory path for js-ipfs', () => { + const tmpDir = tempDir(true) + expect(tmpDir).to.exist() + expect(tmpDir).to.include('jsipfs_') + }) + }) + + describe('.findIpfsExecutable', () => { + it('should find go executable', () => { + const execPath = findIpfsExecutable('go', __dirname) + expect(execPath).to.exist() + expect(execPath).to.include(path.join('go-ipfs-dep', 'go-ipfs', 'ipfs')) + expect(fs.existsSync(execPath)).to.be.ok() + }) + + it('should find go executable', () => { + const execPath = findIpfsExecutable('js', __dirname) + expect(execPath).to.exist() + expect(execPath).to.include(path.join('ipfs', 'src', 'cli', 'bin.js')) + expect(fs.existsSync(execPath)).to.be.ok() + }) + }) + + describe('.createRepo', () => { + let repo = null + let repoPath = tempDir() + it('should create repo', () => { + repo = createRepo(repoPath) + expect(repo).to.exist() + expect(repo).to.be.instanceOf(IPFSRepo) + expect(fs.existsSync(repoPath)).to.be.ok() + }) + + it('should cleanup repo', (done) => { + repo.teardown((err) => { + expect(err).to.not.exist() + expect(!fs.existsSync(repoPath)).to.be.ok() + done() + }) + }) + }) +})