Skip to content

Commit

Permalink
more work on bitswap and other code cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
whyrusleeping committed Aug 25, 2014
1 parent 91e4675 commit 678db4f
Show file tree
Hide file tree
Showing 7 changed files with 121 additions and 30 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
*.swp
.ipfsconfig
*.out
*.test
9 changes: 9 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,15 @@ Guidelines:
- if you'd like to work on ipfs part-time (20+ hrs/wk) or full-time (40+ hrs/wk), contact [@jbenet](https://github.com/jbenet)
- have fun!

## Todo

Ipfs is still under heavy development, there is a lot to be done!

- [ ] Finish Bitswap
- [ ] Connect fuse interface to Blockservice
- [ ] Write tests for bitswap
- [ ] Come up with more TODO items

## Development Dependencies

If you make changes to the protocol buffers, you will need to install the [protoc compiler](https://code.google.com/p/protobuf/downloads/list).
Expand Down
53 changes: 29 additions & 24 deletions bitswap/bitswap.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
blocks "github.com/jbenet/go-ipfs/blocks"
peer "github.com/jbenet/go-ipfs/peer"
routing "github.com/jbenet/go-ipfs/routing"
dht "github.com/jbenet/go-ipfs/routing/dht"
swarm "github.com/jbenet/go-ipfs/swarm"
u "github.com/jbenet/go-ipfs/util"

Expand Down Expand Up @@ -36,7 +37,7 @@ type BitSwap struct {
datastore ds.Datastore

// routing interface for communication
routing routing.IpfsRouting
routing *dht.IpfsDHT

listener *swarm.MesListener

Expand All @@ -63,7 +64,7 @@ func NewBitSwap(p *peer.Peer, net swarm.Network, d ds.Datastore, r routing.IpfsR
datastore: d,
partners: LedgerMap{},
wantList: KeySet{},
routing: r,
routing: r.(*dht.IpfsDHT),
meschan: net.GetChannel(swarm.PBWrapper_BITSWAP),
haltChan: make(chan struct{}),
}
Expand All @@ -76,32 +77,32 @@ func NewBitSwap(p *peer.Peer, net swarm.Network, d ds.Datastore, r routing.IpfsR
func (bs *BitSwap) GetBlock(k u.Key, timeout time.Duration) (
*blocks.Block, error) {
begin := time.Now()
provs, err := bs.routing.FindProviders(k, timeout)
if err != nil {
u.PErr("GetBlock error: %s\n", err)
return nil, err
}
tleft := timeout - time.Now().Sub(begin)
provs_ch := bs.routing.FindProvidersAsync(k, 20, timeout)

valchan := make(chan []byte)
after := time.After(tleft)
for _, p := range provs {
go func(pr *peer.Peer) {
ledger := bs.GetLedger(pr.Key())
blk, err := bs.getBlock(k, pr, tleft)
if err != nil {
u.PErr("%v\n", err)
return
}
// NOTE: this credits everyone who sends us a block,
// even if we dont use it
ledger.ReceivedBytes(uint64(len(blk)))
select {
case valchan <- blk:
default:
}
}(p)
}

// TODO: when the data is received, shut down this for loop
go func() {
for p := range provs_ch {
go func(pr *peer.Peer) {
ledger := bs.GetLedger(pr.Key())
blk, err := bs.getBlock(k, pr, tleft)
if err != nil {
u.PErr("%v\n", err)
return
}
// NOTE: this credits everyone who sends us a block,
// even if we dont use it
ledger.ReceivedBytes(uint64(len(blk)))
select {
case valchan <- blk:
default:
}
}(p)
}
}()

select {
case blkdata := <-valchan:
Expand Down Expand Up @@ -213,3 +214,7 @@ func (bs *BitSwap) GetLedger(k u.Key) *Ledger {
bs.partners[k] = l
return l
}

func (bs *BitSwap) Halt() {
bs.haltChan <- struct{}{}
}
8 changes: 5 additions & 3 deletions blocks/blocks_test.go → blockservice/blocks_test.go
Original file line number Diff line number Diff line change
@@ -1,11 +1,13 @@
package blocks
package blockservice

import (
"bytes"
"fmt"
"testing"

ds "github.com/jbenet/datastore.go"
blocks "github.com/jbenet/go-ipfs/blocks"
u "github.com/jbenet/go-ipfs/util"
"testing"
)

func TestBlocks(t *testing.T) {
Expand All @@ -17,7 +19,7 @@ func TestBlocks(t *testing.T) {
return
}

b, err := NewBlock([]byte("beep boop"))
b, err := blocks.NewBlock([]byte("beep boop"))
if err != nil {
t.Error("failed to construct block", err)
return
Expand Down
9 changes: 7 additions & 2 deletions importer/importer.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ package importer

import (
"fmt"
dag "github.com/jbenet/go-ipfs/merkledag"
"io"
"io/ioutil"
"os"

dag "github.com/jbenet/go-ipfs/merkledag"
)

// BlockSizeLimit specifies the maximum size an imported block can have.
Expand All @@ -23,12 +24,16 @@ func NewDagFromReader(r io.Reader, size int64) (*dag.Node, error) {
// todo: block-splitting based on rabin fingerprinting
// todo: block-splitting with user-defined function
// todo: block-splitting at all. :P
// todo: write mote todos

// totally just trusts the reported size. fix later.
if size > BlockSizeLimit { // 1 MB limit for now.
return nil, ErrSizeLimitExceeded
}

// Ensure that we dont get stuck reading way too much data
r = io.LimitReader(r, BlockSizeLimit)

// we're doing it live!
buf, err := ioutil.ReadAll(r)
if err != nil {
Expand All @@ -52,7 +57,7 @@ func NewDagFromFile(fpath string) (*dag.Node, error) {
}

if stat.IsDir() {
return nil, fmt.Errorf("`fpath` is a directory")
return nil, fmt.Errorf("`%s` is a directory", fpath)
}

f, err := os.Open(fpath)
Expand Down
56 changes: 55 additions & 1 deletion routing/dht/routing.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

// PutValue adds value corresponding to given Key.
// This is the top level "Store" operation of the DHT
func (dht *IpfsDHT) PutValue(key u.Key, value []byte) {
func (dht *IpfsDHT) PutValue(key u.Key, value []byte) error {
complete := make(chan struct{})
count := 0
for _, route := range dht.routingTables {
Expand All @@ -45,6 +45,7 @@ func (dht *IpfsDHT) PutValue(key u.Key, value []byte) {
for i := 0; i < count; i++ {
<-complete
}
return nil
}

// GetValue searches for the value corresponding to given Key.
Expand Down Expand Up @@ -183,6 +184,59 @@ func (dht *IpfsDHT) Provide(key u.Key) error {
return nil
}

func (dht *IpfsDHT) FindProvidersAsync(key u.Key, count int, timeout time.Duration) chan *peer.Peer {
peerOut := make(chan *peer.Peer, count)
go func() {
ps := newPeerSet()
provs := dht.providers.GetProviders(key)
for _, p := range provs {
count--
// NOTE: assuming that the list of peers is unique
ps.Add(p)
peerOut <- p
if count <= 0 {
return
}
}

peers := dht.routingTables[0].NearestPeers(kb.ConvertKey(key), AlphaValue)
for _, pp := range peers {
go func() {
pmes, err := dht.findProvidersSingle(pp, key, 0, timeout)
if err != nil {
u.PErr("%v\n", err)
return
}
dht.addPeerListAsync(key, pmes.GetPeers(), ps, count, peerOut)
}()
}

}()
return peerOut
}

//TODO: this function could also be done asynchronously
func (dht *IpfsDHT) addPeerListAsync(k u.Key, peers []*PBDHTMessage_PBPeer, ps *peerSet, count int, out chan *peer.Peer) {
for _, pbp := range peers {
maddr, err := ma.NewMultiaddr(pbp.GetAddr())
if err != nil {
u.PErr("%v\n", err)
continue
}
p, err := dht.network.GetConnection(peer.ID(pbp.GetId()), maddr)
if err != nil {
u.PErr("%v\n", err)
continue
}
dht.providers.AddProvider(k, p)
if ps.AddIfSmallerThan(p, count) {
out <- p
} else if ps.Size() >= count {
return
}
}
}

// FindProviders searches for peers who can provide the value for given key.
func (dht *IpfsDHT) FindProviders(key u.Key, timeout time.Duration) ([]*peer.Peer, error) {
ll := startNewRPC("FindProviders")
Expand Down
12 changes: 12 additions & 0 deletions routing/dht/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ func (c *counter) Size() (s int) {
return
}

// peerSet is a threadsafe set of peers
type peerSet struct {
ps map[string]bool
lk sync.RWMutex
Expand Down Expand Up @@ -69,3 +70,14 @@ func (ps *peerSet) Size() int {
defer ps.lk.RUnlock()
return len(ps.ps)
}

func (ps *peerSet) AddIfSmallerThan(p *peer.Peer, maxsize int) bool {
var success bool
ps.lk.Lock()
if _, ok := ps.ps[string(p.ID)]; !ok && len(ps.ps) < maxsize {
success = true
ps.ps[string(p.ID)] = true
}
ps.lk.Unlock()
return success
}

0 comments on commit 678db4f

Please sign in to comment.